diff options
author | Alan Korr <alkorr@rockbox.org> | 2002-04-17 15:00:28 +0000 |
---|---|---|
committer | Alan Korr <alkorr@rockbox.org> | 2002-04-17 15:00:28 +0000 |
commit | c25510f944553681d7898e5b9d8e132f6e3c431b (patch) | |
tree | 5665e58628f634374dada65da74b5b854a6e70b4 /firmware/test/memory/memory-slab.c | |
parent | 454be44f8d0c7cc1a6f77c0e694c8a32f2cd6099 (diff) | |
download | rockbox-c25510f944553681d7898e5b9d8e132f6e3c431b.tar.gz rockbox-c25510f944553681d7898e5b9d8e132f6e3c431b.zip |
Now memory-page and memory-misc compile fine (others are in stage-development)
Conventions :
* Public headers :
memory.h,config.h,defines.h,inlines.h,types.h,functions.h
* Private headers :
memory-page.h,memory-slab.h (here you can find
prototypes functions or structures we want to share
only between memory-page.c, memory-slab.c,
memory-block.c, memory-misc.c).
* Public or private codes in :
memory-page.c,memory-slab.c,memory-block.c,memory-misc.c
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@126 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/test/memory/memory-slab.c')
-rw-r--r-- | firmware/test/memory/memory-slab.c | 111 |
1 files changed, 35 insertions, 76 deletions
diff --git a/firmware/test/memory/memory-slab.c b/firmware/test/memory/memory-slab.c index 2c9e1f231f..35ab96f787 100644 --- a/firmware/test/memory/memory-slab.c +++ b/firmware/test/memory/memory-slab.c | |||
@@ -16,17 +16,10 @@ | |||
16 | * KIND, either express or implied. | 16 | * KIND, either express or implied. |
17 | * | 17 | * |
18 | ****************************************************************************/ | 18 | ****************************************************************************/ |
19 | #ifndef __LIBRARY_MEMORY_C__ | 19 | #if 0 |
20 | # error "This header file must be included ONLY from memory.c." | 20 | #include <memory.h> |
21 | #endif | 21 | #include "memory-page.h" |
22 | #ifndef __LIBRARY_MEMORY_PAGE_H__ | 22 | #include "memory-slab.h" |
23 | # define __LIBRARY_MEMORY_PAGE_H__ | ||
24 | |||
25 | struct memory_free_block | ||
26 | { | ||
27 | struct memory_free_block | ||
28 | *link; | ||
29 | }; | ||
30 | 23 | ||
31 | /////////////////////////////////////////////////////////////////////////////// | 24 | /////////////////////////////////////////////////////////////////////////////// |
32 | // MEMORY SLAB : | 25 | // MEMORY SLAB : |
@@ -34,17 +27,7 @@ struct memory_free_block | |||
34 | // | 27 | // |
35 | // | 28 | // |
36 | 29 | ||
37 | struct memory_slab | 30 | static inline struct memory_slab *__memory_push_slab (struct memory_slab *head,struct memory_slab *node) |
38 | { | ||
39 | struct memory_slab | ||
40 | *less,*more; | ||
41 | unsigned int // left == number of free blocks left | ||
42 | left; | ||
43 | struct memory_free_block | ||
44 | *free; | ||
45 | }; | ||
46 | |||
47 | static inline struct memory_slab *push_slab (struct memory_slab *head,struct memory_slab *node) | ||
48 | { | 31 | { |
49 | node->less = head; | 32 | node->less = head; |
50 | if (head) | 33 | if (head) |
@@ -57,14 +40,14 @@ static inline struct memory_slab *push_slab (struct memory_slab *head,struct mem | |||
57 | return node; | 40 | return node; |
58 | } | 41 | } |
59 | 42 | ||
60 | static inline struct memory_slab *pop_slab (struct memory_slab *head,struct memory_slab *node) | 43 | static inline struct memory_slab *__memory_pop_slab (struct memory_slab *head,struct memory_slab *node) |
61 | { | 44 | { |
62 | if (head) | 45 | if (head) |
63 | head->more = node->more; | 46 | head->more = node->more; |
64 | return node->more; | 47 | return node->more; |
65 | } | 48 | } |
66 | 49 | ||
67 | static inline struct memory_slab *move_slab (struct memory_slab **from,struct memory_slab **to) | 50 | static inline struct memory_slab *__memory_move_slab (struct memory_slab **from,struct memory_slab **to) |
68 | { | 51 | { |
69 | struct memory_slab *head = *from; | 52 | struct memory_slab *head = *from; |
70 | *from = (*from)->more; | 53 | *from = (*from)->more; |
@@ -87,33 +70,9 @@ static inline struct memory_slab *move_slab (struct memory_slab **from,struct me | |||
87 | // | 70 | // |
88 | // | 71 | // |
89 | 72 | ||
90 | struct memory_cache | ||
91 | { | ||
92 | struct memory_cache | ||
93 | *less,*more,*same; | ||
94 | unsigned int | ||
95 | left; // number of free slabs | ||
96 | struct memory_slab | ||
97 | *used; | ||
98 | struct memory_slab | ||
99 | *free; | ||
100 | struct memory_slab | ||
101 | *reap; | ||
102 | unsigned int | ||
103 | size,original_size; | ||
104 | unsigned int | ||
105 | page_size; | ||
106 | unsigned int | ||
107 | blocks_per_slab; | ||
108 | int | ||
109 | page_order; | ||
110 | unsigned int | ||
111 | flags; | ||
112 | }; | ||
113 | |||
114 | static struct memory_cache *cache_tree; | 73 | static struct memory_cache *cache_tree; |
115 | 74 | ||
116 | static inline int get_order (unsigned size) | 75 | static inline int __memory_get_order (unsigned size) |
117 | { | 76 | { |
118 | int order = 0; | 77 | int order = 0; |
119 | size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block); | 78 | size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block); |
@@ -124,7 +83,7 @@ static inline int get_order (unsigned size) | |||
124 | return order; | 83 | return order; |
125 | } | 84 | } |
126 | 85 | ||
127 | static inline struct memory_slab *get_slab (struct memory_cache *cache,void *address) | 86 | static inline struct memory_slab *__memory_get_slab (struct memory_cache *cache,void *address) |
128 | { | 87 | { |
129 | #ifdef TEST | 88 | #ifdef TEST |
130 | return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)); | 89 | return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)); |
@@ -133,7 +92,7 @@ static inline struct memory_slab *get_slab (struct memory_cache *cache,void *add | |||
133 | #endif | 92 | #endif |
134 | } | 93 | } |
135 | 94 | ||
136 | static struct memory_cache *splay_cache (struct memory_cache *root,unsigned int left) | 95 | static struct memory_cache *__memory_splay_cache (struct memory_cache *root,unsigned int left) |
137 | { | 96 | { |
138 | struct memory_cache *down; | 97 | struct memory_cache *down; |
139 | struct memory_cache *less; | 98 | struct memory_cache *less; |
@@ -191,14 +150,14 @@ static struct memory_cache *splay_cache (struct memory_cache *root,unsigned int | |||
191 | return root; | 150 | return root; |
192 | } | 151 | } |
193 | 152 | ||
194 | static inline struct memory_cache *insert_cache (struct memory_cache *root,struct memory_cache *node) | 153 | static inline struct memory_cache *__memory_insert_cache (struct memory_cache *root,struct memory_cache *node) |
195 | { | 154 | { |
196 | node->less = | 155 | node->less = |
197 | node->more = | 156 | node->more = |
198 | node->same = 0; | 157 | node->same = 0; |
199 | if (root) | 158 | if (root) |
200 | { | 159 | { |
201 | if (node->left == ((root = splay_cache (root,node))->left)) | 160 | if (node->left == ((root = __memory_splay_cache (root,node))->left)) |
202 | { | 161 | { |
203 | node->less = root.less; | 162 | node->less = root.less; |
204 | node->more = root.more; | 163 | node->more = root.more; |
@@ -221,11 +180,11 @@ static inline struct memory_cache *insert_cache (struct memory_cache *root,struc | |||
221 | return node; | 180 | return node; |
222 | } | 181 | } |
223 | 182 | ||
224 | static inline struct memory_cache *remove_cache (struct memory_cache *root,struct memory_cache *node) | 183 | static inline struct memory_cache *__memory_remove_cache (struct memory_cache *root,struct memory_cache *node) |
225 | { | 184 | { |
226 | if (root) | 185 | if (root) |
227 | { | 186 | { |
228 | root = splay_cache (root,node); | 187 | root = __memory_splay_cache (root,node); |
229 | if (root != node) | 188 | if (root != node) |
230 | { | 189 | { |
231 | node->less->same = node->same; | 190 | node->less->same = node->same; |
@@ -235,7 +194,7 @@ static inline struct memory_cache *remove_cache (struct memory_cache *root,struc | |||
235 | } | 194 | } |
236 | if (root->less) | 195 | if (root->less) |
237 | { | 196 | { |
238 | node = splay_page (root->less,node); | 197 | node = __memory_splay_page (root->less,node); |
239 | node->more = root->more; | 198 | node->more = root->more; |
240 | } | 199 | } |
241 | else | 200 | else |
@@ -244,12 +203,12 @@ static inline struct memory_cache *remove_cache (struct memory_cache *root,struc | |||
244 | return root; | 203 | return root; |
245 | } | 204 | } |
246 | 205 | ||
247 | static inline struct memory_cache *move_cache (struct memory_cache *root,struct memory_cache *node,int delta) | 206 | static inline struct memory_cache *__memory_move_cache (struct memory_cache *root,struct memory_cache *node,int delta) |
248 | { | 207 | { |
249 | if ((root = remove_cache (root,node))) | 208 | if ((root = __memory_remove_cache (root,node))) |
250 | { | 209 | { |
251 | node->left += delta; | 210 | node->left += delta; |
252 | root = insert_cache (root,node); | 211 | root = __memory_insert_cache (root,node); |
253 | } | 212 | } |
254 | return root; | 213 | return root; |
255 | } | 214 | } |
@@ -291,15 +250,15 @@ struct memory_slab *memory_grow_cache (struct memory_cache *cache) | |||
291 | } | 250 | } |
292 | *link = 0; | 251 | *link = 0; |
293 | cache->blocks_per_slab = slab->free; | 252 | cache->blocks_per_slab = slab->free; |
294 | cache->reap = push_slab (cache->reap,slab); | 253 | cache->reap = __memory_push_slab (cache->reap,slab); |
295 | cache_tree = move_cache (cache_tree,cache,+1); | 254 | cache_tree = __memory_move_cache (cache_tree,cache,+1); |
296 | return slab; | 255 | return slab; |
297 | } | 256 | } |
298 | } | 257 | } |
299 | return MEMORY_RETURN_FAILURE; | 258 | return MEMORY_RETURN_FAILURE; |
300 | } | 259 | } |
301 | 260 | ||
302 | static int shrink_cache (struct memory_cache *cache,int all,int move) | 261 | static int __memory_shrink_cache (struct memory_cache *cache,int all,int move) |
303 | { | 262 | { |
304 | struct memory_slab *slab; | 263 | struct memory_slab *slab; |
305 | unsigned int slabs = 0; | 264 | unsigned int slabs = 0; |
@@ -308,12 +267,12 @@ static int shrink_cache (struct memory_cache *cache,int all,int move) | |||
308 | while ((slab = cache->reap)) | 267 | while ((slab = cache->reap)) |
309 | { | 268 | { |
310 | ++slabs; | 269 | ++slabs; |
311 | cache->reap = pop_slab (cache->reap,slab); | 270 | cache->reap = __memory_pop_slab (cache->reap,slab); |
312 | memory_release_page ((void *)slab); | 271 | memory_release_page ((void *)slab); |
313 | if (all) | 272 | if (all) |
314 | continue; | 273 | continue; |
315 | if (move) | 274 | if (move) |
316 | cache_tree = move_cache (cache_tree,cache,-slabs); | 275 | cache_tree = __memory_move_cache (cache_tree,cache,-slabs); |
317 | return MEMORY_RETURN_SUCCESS; | 276 | return MEMORY_RETURN_SUCCESS; |
318 | } | 277 | } |
319 | } | 278 | } |
@@ -322,7 +281,7 @@ static int shrink_cache (struct memory_cache *cache,int all,int move) | |||
322 | 281 | ||
323 | int memory_shrink_cache (struct memory_cache *cache,int all) | 282 | int memory_shrink_cache (struct memory_cache *cache,int all) |
324 | { | 283 | { |
325 | return shrink_cache (cache,all,1 /* move cache in cache_tree */); | 284 | return __memory_shrink_cache (cache,all,1 /* move cache in cache_tree */); |
326 | } | 285 | } |
327 | 286 | ||
328 | struct memory_cache *memory_create_cache (unsigned int size,int align,int flags) | 287 | struct memory_cache *memory_create_cache (unsigned int size,int align,int flags) |
@@ -382,7 +341,7 @@ struct memory_cache *memory_create_cache (unsigned int size,int align,int flags) | |||
382 | cache->page_size = page_size; | 341 | cache->page_size = page_size; |
383 | cache->page_order = page_order; | 342 | cache->page_order = page_order; |
384 | 343 | ||
385 | cache_tree = insert_cache (cache_tree,cache); | 344 | cache_tree = __memory_insert_cache (cache_tree,cache); |
386 | 345 | ||
387 | return cache; | 346 | return cache; |
388 | } | 347 | } |
@@ -392,8 +351,8 @@ int memory_destroy_cache (struct memory_cache *cache) | |||
392 | /* FIX ME : this function shouldn't be called if there are still used blocks */ | 351 | /* FIX ME : this function shouldn't be called if there are still used blocks */ |
393 | if (cache && !cache->free && !cache->used) | 352 | if (cache && !cache->free && !cache->used) |
394 | { | 353 | { |
395 | cache_tree = remove_cache (cache_tree,cache); | 354 | cache_tree = __memory_remove_cache (cache_tree,cache); |
396 | if (shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */)) | 355 | if (__memory_shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */)) |
397 | return memory_cache_release (&cache_cache,cache); | 356 | return memory_cache_release (&cache_cache,cache); |
398 | } | 357 | } |
399 | return MEMORY_RETURN_FAILURE; | 358 | return MEMORY_RETURN_FAILURE; |
@@ -413,33 +372,33 @@ void *memory_cache_allocate (struct memory_cache *cache) | |||
413 | ok: struct memory_free_block *block = slab->free; | 372 | ok: struct memory_free_block *block = slab->free; |
414 | slab->free = block->link; | 373 | slab->free = block->link; |
415 | if (--slab->left == 0) | 374 | if (--slab->left == 0) |
416 | move_slab (&cache->free,&cache->used); | 375 | __memory_move_slab (&cache->free,&cache->used); |
417 | return block; | 376 | return block; |
418 | } | 377 | } |
419 | } | 378 | } |
420 | if (cache->reap) | 379 | if (cache->reap) |
421 | { | 380 | { |
422 | slab = move_slab (&cache->reap,&cache->free); | 381 | slab = __memory_move_slab (&cache->reap,&cache->free); |
423 | cache_tree = move_cache (cache_tree,cache,-1); | 382 | cache_tree = __memory_move_cache (cache_tree,cache,-1); |
424 | goto ok; | 383 | goto ok; |
425 | } | 384 | } |
426 | } | 385 | } |
427 | while (grow_cache (cache)); | 386 | while (__memory_grow_cache (cache)); |
428 | } | 387 | } |
429 | return MEMORY_RETURN_FAILURE; | 388 | return MEMORY_RETURN_FAILURE; |
430 | } | 389 | } |
431 | 390 | ||
432 | int memory_cache_release (struct memory_cache *cache,void *address) | 391 | int memory_cache_release (struct memory_cache *cache,void *address) |
433 | { | 392 | { |
434 | struct memory_slab *slab = get_slab (cache,address); | 393 | struct memory_slab *slab = __memory_get_slab (cache,address); |
435 | ((struct memory_free_block *)address)->link = slab->free; | 394 | ((struct memory_free_block *)address)->link = slab->free; |
436 | slab->free = (struct memory_free_block *)address; | 395 | slab->free = (struct memory_free_block *)address; |
437 | if (slab->left++ == 0) | 396 | if (slab->left++ == 0) |
438 | move_slab (&cache->used,&cache->free); | 397 | __memory_move_slab (&cache->used,&cache->free); |
439 | else if (slab->left == cache->blocks_per_slab) | 398 | else if (slab->left == cache->blocks_per_slab) |
440 | { | 399 | { |
441 | move_slab (&cache->free,&cache->reap); | 400 | __memory_move_slab (&cache->free,&cache->reap); |
442 | cache_tree = move_cache (cache_tree,cache,+1); | 401 | cache_tree = __memory_move_cache (cache_tree,cache,+1); |
443 | } | 402 | } |
444 | return MEMORY_RETURN_SUCCESS; | 403 | return MEMORY_RETURN_SUCCESS; |
445 | } | 404 | } |