summaryrefslogtreecommitdiff
path: root/firmware/test/memory/memory-slab.c
diff options
context:
space:
mode:
authorAlan Korr <alkorr@rockbox.org>2002-04-21 12:21:14 +0000
committerAlan Korr <alkorr@rockbox.org>2002-04-21 12:21:14 +0000
commitb7cf0602fd08f6a367d42f0b6adadb8322b3d35d (patch)
treeabbfb87b615f4c01a5f56eedacd75acbd2e52b87 /firmware/test/memory/memory-slab.c
parent257d17da6d64d2e265df3c80192a01f47e1dd2b7 (diff)
downloadrockbox-b7cf0602fd08f6a367d42f0b6adadb8322b3d35d.tar.gz
rockbox-b7cf0602fd08f6a367d42f0b6adadb8322b3d35d.zip
removing all that stuff permanently.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@159 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/test/memory/memory-slab.c')
-rw-r--r--firmware/test/memory/memory-slab.c409
1 files changed, 0 insertions, 409 deletions
diff --git a/firmware/test/memory/memory-slab.c b/firmware/test/memory/memory-slab.c
deleted file mode 100644
index 35ab96f787..0000000000
--- a/firmware/test/memory/memory-slab.c
+++ /dev/null
@@ -1,409 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#if 0
20#include <memory.h>
21#include "memory-page.h"
22#include "memory-slab.h"
23
24///////////////////////////////////////////////////////////////////////////////
25// MEMORY SLAB :
26////////////////
27//
28//
29
30static inline struct memory_slab *__memory_push_slab (struct memory_slab *head,struct memory_slab *node)
31 {
32 node->less = head;
33 if (head)
34 {
35 node->more = head->more;
36 head->more = node;
37 }
38 else
39 node->more = 0;
40 return node;
41 }
42
43static inline struct memory_slab *__memory_pop_slab (struct memory_slab *head,struct memory_slab *node)
44 {
45 if (head)
46 head->more = node->more;
47 return node->more;
48 }
49
50static inline struct memory_slab *__memory_move_slab (struct memory_slab **from,struct memory_slab **to)
51 {
52 struct memory_slab *head = *from;
53 *from = (*from)->more;
54 if (*from)
55 (*from)->less = head->less;
56 head->less = 0;
57 head->more = (*to);
58 if (*to)
59 (*to)->prev = head;
60 *to = head;
61 return head;
62 }
63
64//
65///////////////////////////////////////////////////////////////////////////////
66
67///////////////////////////////////////////////////////////////////////////////
68// MEMORY CACHE :
69/////////////////
70//
71//
72
73static struct memory_cache *cache_tree;
74
75static inline int __memory_get_order (unsigned size)
76 {
77 int order = 0;
78 size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block);
79 while (size > 0)
80 {
81 ++order; size <<= 1;
82 }
83 return order;
84 }
85
86static inline struct memory_slab *__memory_get_slab (struct memory_cache *cache,void *address)
87 {
88#ifdef TEST
89 return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab));
90#else
91 return (struct memory_slab *)((free_page + (((unsigned)address - free_page + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)));
92#endif
93 }
94
95static struct memory_cache *__memory_splay_cache (struct memory_cache *root,unsigned int left)
96 {
97 struct memory_cache *down;
98 struct memory_cache *less;
99 struct memory_cache *more;
100 struct memory_cache head;
101 head.less =
102 head.more = 0;
103 less =
104 more = &head;
105 while (1)
106 {
107 if (left < root->left)
108 {
109 if ((down = root->less))
110 {
111 if (left < down->left)
112 {
113 root->less = down->more;
114 down->more = root;
115 root = down;
116 if (!root->less)
117 break;
118 }
119 more->less = root;
120 more = root;
121 root = root->less;
122 continue;
123 }
124 break;
125 }
126 if (root->left < left)
127 {
128 if ((down = root->more))
129 {
130 if (root->left < left)
131 {
132 root->more = down->less;
133 down->less = root;
134 root = down;
135 if (!root->more)
136 break;
137 }
138 less->more = root;
139 less = root;
140 root = root->more;
141 continue;
142 }
143 }
144 break;
145 }
146 less->more = root->less;
147 more->less = root->more;
148 root->less = head.more;
149 root->more = head.less;
150 return root;
151 }
152
153static inline struct memory_cache *__memory_insert_cache (struct memory_cache *root,struct memory_cache *node)
154 {
155 node->less =
156 node->more =
157 node->same = 0;
158 if (root)
159 {
160 if (node->left == ((root = __memory_splay_cache (root,node))->left))
161 {
162 node->less = root.less;
163 node->more = root.more;
164 node->same = root;
165 root->less = node;
166 }
167 else if (node < root)
168 {
169 node->less = root->less;
170 node->more = root;
171 root->less = 0;
172 }
173 else
174 {
175 node->less = root;
176 node->more = root->more;
177 node->more = 0;
178 }
179 }
180 return node;
181 }
182
183static inline struct memory_cache *__memory_remove_cache (struct memory_cache *root,struct memory_cache *node)
184 {
185 if (root)
186 {
187 root = __memory_splay_cache (root,node);
188 if (root != node)
189 {
190 node->less->same = node->same;
191 if (node->same)
192 node->same->less = node->less;
193 return root;
194 }
195 if (root->less)
196 {
197 node = __memory_splay_page (root->less,node);
198 node->more = root->more;
199 }
200 else
201 node = root->more;
202 }
203 return root;
204 }
205
206static inline struct memory_cache *__memory_move_cache (struct memory_cache *root,struct memory_cache *node,int delta)
207 {
208 if ((root = __memory_remove_cache (root,node)))
209 {
210 node->left += delta;
211 root = __memory_insert_cache (root,node);
212 }
213 return root;
214 }
215
216//
217/////////////////////
218// PUBLIC FUNCTIONS :
219/////////////////////
220//
221// - memory_grow_cache : allocate a new slab for a cache
222// - memory_shrink_cache : release free slabs from a cache
223// - memory_create_cache : create a new cache of size-fixed blocks
224// - memory_destroy_cache : destroy the cache and release all the slabs
225// - memory_cache_allocate : allocate a block from the cache
226// - memory_cache_release : release a block in the cache
227//
228
229struct memory_slab *memory_grow_cache (struct memory_cache *cache)
230 {
231 struct memory_slab *slab;
232 unsigned int page;
233 if (cache)
234 {
235 page = (unsigned int)memory_allocate_page (cache->page_order);
236 if (page)
237 {
238 struct memory_free_block *block,**link;
239 slab = (struct memory_slab *)(page + cache->page_size - sizeof (struct memory_slab));
240 slab->free = 0;
241 slab->left = 0;
242 link = &slab->free;
243 for ((unsigned int)block = page;
244 (unsigned int)block + cache->size < (unsigned int)slab;
245 (unsigned int)block += cache->size)
246 {
247 *link = block;
248 link = &block->link;
249 ++slab->free;
250 }
251 *link = 0;
252 cache->blocks_per_slab = slab->free;
253 cache->reap = __memory_push_slab (cache->reap,slab);
254 cache_tree = __memory_move_cache (cache_tree,cache,+1);
255 return slab;
256 }
257 }
258 return MEMORY_RETURN_FAILURE;
259 }
260
261static int __memory_shrink_cache (struct memory_cache *cache,int all,int move)
262 {
263 struct memory_slab *slab;
264 unsigned int slabs = 0;
265 if (cache)
266 {
267 while ((slab = cache->reap))
268 {
269 ++slabs;
270 cache->reap = __memory_pop_slab (cache->reap,slab);
271 memory_release_page ((void *)slab);
272 if (all)
273 continue;
274 if (move)
275 cache_tree = __memory_move_cache (cache_tree,cache,-slabs);
276 return MEMORY_RETURN_SUCCESS;
277 }
278 }
279 return MEMORY_RETURN_FAILURE;
280 }
281
282int memory_shrink_cache (struct memory_cache *cache,int all)
283 {
284 return __memory_shrink_cache (cache,all,1 /* move cache in cache_tree */);
285 }
286
287struct memory_cache *memory_create_cache (unsigned int size,int align,int flags)
288 {
289 struct memory_cache *cache;
290 unsigned int waste = 0,blocks_per_page;
291 int page_order;
292 unsigned int page_size;
293 unsigned int original_size = size;
294
295 // Align size on 'align' bytes ('align' should equal 1<<n)
296 // if 'align' is inferior to 4, 32-bit word alignment is done by default.
297 size = (align > 4) ? ((size + align - 1) & -align) : ((size + sizeof (int) - 1) & -sizeof (int));
298 if (!(cache = memory_cache_allocate (&cache_cache))
299 return MEMORY_RETURN_FAILURE;
300
301 cache->flags =
302 cache->left = 0;
303
304 cache->used =
305 cache->free =
306 cache->reap = 0;
307
308 cache->original_size = original_size;
309 cache->size = size;
310
311 page_size = 0;
312 page_order = MEMORY_PAGE_MINIMAL_SIZE;;
313
314 // Trying to determine what is the best number of pages per slab
315 for (;; ++order,(page_size <<= 1))
316 {
317 if (page_order >= MEMORY_MAXIMUM_PAGE_ORDER_PER_SLAB)
318 {
319 memory_cache_release (&cache_cache,cache);
320 return MEMORY_RETURN_FAILURE;
321 }
322
323 waste = page_size;
324 waste -= sizeof (struct memory_slab);
325
326 blocks_per_slab = waste / size;
327 waste -= block_per_slab * size;
328
329 if (blocks_per_slab < MEMORY_MINIMUM_BLOCKS_PER_SLAB)
330 {
331 ++page_order; page_size <<= 1;
332 continue;
333 }
334
335 // below 3% of lost space is correct
336 if ((waste << 16) / page_size) < 1967)
337 break;
338 ++page_order; page_size <<= 1;
339 }
340
341 cache->page_size = page_size;
342 cache->page_order = page_order;
343
344 cache_tree = __memory_insert_cache (cache_tree,cache);
345
346 return cache;
347 }
348
349int memory_destroy_cache (struct memory_cache *cache)
350 {
351 /* FIX ME : this function shouldn't be called if there are still used blocks */
352 if (cache && !cache->free && !cache->used)
353 {
354 cache_tree = __memory_remove_cache (cache_tree,cache);
355 if (__memory_shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */))
356 return memory_cache_release (&cache_cache,cache);
357 }
358 return MEMORY_RETURN_FAILURE;
359 }
360
361void *memory_cache_allocate (struct memory_cache *cache)
362 {
363 if (cache)
364 {
365 do
366 {
367 struct memory_slab *slab;
368 if ((slab = cache->free))
369 {
370 if (slab->left > 0)
371 {
372ok: struct memory_free_block *block = slab->free;
373 slab->free = block->link;
374 if (--slab->left == 0)
375 __memory_move_slab (&cache->free,&cache->used);
376 return block;
377 }
378 }
379 if (cache->reap)
380 {
381 slab = __memory_move_slab (&cache->reap,&cache->free);
382 cache_tree = __memory_move_cache (cache_tree,cache,-1);
383 goto ok;
384 }
385 }
386 while (__memory_grow_cache (cache));
387 }
388 return MEMORY_RETURN_FAILURE;
389 }
390
391int memory_cache_release (struct memory_cache *cache,void *address)
392 {
393 struct memory_slab *slab = __memory_get_slab (cache,address);
394 ((struct memory_free_block *)address)->link = slab->free;
395 slab->free = (struct memory_free_block *)address;
396 if (slab->left++ == 0)
397 __memory_move_slab (&cache->used,&cache->free);
398 else if (slab->left == cache->blocks_per_slab)
399 {
400 __memory_move_slab (&cache->free,&cache->reap);
401 cache_tree = __memory_move_cache (cache_tree,cache,+1);
402 }
403 return MEMORY_RETURN_SUCCESS;
404 }
405
406//
407///////////////////////////////////////////////////////////////////////////////
408
409#endif