diff options
Diffstat (limited to 'firmware/test/alkorr')
-rw-r--r-- | firmware/test/alkorr/memory.c | 844 | ||||
-rw-r--r-- | firmware/test/alkorr/memory.h | 113 |
2 files changed, 0 insertions, 957 deletions
diff --git a/firmware/test/alkorr/memory.c b/firmware/test/alkorr/memory.c deleted file mode 100644 index b58902f4d5..0000000000 --- a/firmware/test/alkorr/memory.c +++ /dev/null | |||
@@ -1,844 +0,0 @@ | |||
1 | /* | ||
2 | ////////////////////////////////////////////////////////////////////////////////// | ||
3 | // __________ __ ___. // | ||
4 | // Open \______ \ ____ ____ | | _\_ |__ _______ ___ // | ||
5 | // Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / // | ||
6 | // Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < // | ||
7 | // Software |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ // | ||
8 | // \/ \/ \/ \/ \/ // | ||
9 | ////////////////////////////////////////////////////////////////////////////////// | ||
10 | // | ||
11 | // $Id$ | ||
12 | // | ||
13 | ///////////////////////////////////// | ||
14 | // Copyright (C) 2002 by Alan Korr // | ||
15 | ///////////////////////////////////// | ||
16 | // | ||
17 | // All files in this archive are subject to the GNU General Public License. | ||
18 | // See the file COPYING in the source tree root for full license agreement. | ||
19 | // | ||
20 | // This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, | ||
21 | // either express or implied. | ||
22 | // | ||
23 | ////////////////////////////////////////////////////////////////////////////////// | ||
24 | */ | ||
25 | |||
26 | #include "memory.h" | ||
27 | |||
28 | #define MEMORY_PAGE_USE_SPLAY_TREE | ||
29 | |||
30 | /* | ||
31 | ///////////////////////////////////////////////////////////////////// | ||
32 | // MEMORY PAGE // | ||
33 | ///////////////// | ||
34 | // | ||
35 | // | ||
36 | */ | ||
37 | |||
38 | struct __memory_free_page | ||
39 | { | ||
40 | struct __memory_free_page *less,*more; | ||
41 | char reserved[MEMORY_PAGE_MINIMAL_SIZE - 2*sizeof (struct memory_free_page *)]; | ||
42 | }; | ||
43 | |||
44 | #define LESS -1 | ||
45 | #define MORE +1 | ||
46 | |||
47 | extern struct __memory_free_page __memory_free_page[MEMORY_TOTAL_PAGES] asm("dram"); | ||
48 | |||
49 | char __memory_free_page_order[MEMORY_TOTAL_PAGES]; | ||
50 | struct __memory_free_page *__memory_free_page_bin[MEMORY_TOTAL_ORDERS]; | ||
51 | |||
52 | static inline unsigned int __memory_get_size (int order) | ||
53 | /* | ||
54 | SH1 has very poor shift instructions (only <<1,>>1,<<2,>>2,<<8, | ||
55 | >>8,<<16 and >>16), so we should use a lookup table to speedup. | ||
56 | */ | ||
57 | { | ||
58 | return | ||
59 | ( | ||
60 | (unsigned short [MEMORY_TOTAL_ORDERS]) | ||
61 | { | ||
62 | 1<<MEMORY_PAGE_MINIMAL_ORDER, | ||
63 | 2<<MEMORY_PAGE_MINIMAL_ORDER, | ||
64 | 4<<MEMORY_PAGE_MINIMAL_ORDER, | ||
65 | 8<<MEMORY_PAGE_MINIMAL_ORDER, | ||
66 | 16<<MEMORY_PAGE_MINIMAL_ORDER, | ||
67 | 32<<MEMORY_PAGE_MINIMAL_ORDER, | ||
68 | 64<<MEMORY_PAGE_MINIMAL_ORDER, | ||
69 | 128<<MEMORY_PAGE_MINIMAL_ORDER, | ||
70 | 256<<MEMORY_PAGE_MINIMAL_ORDER, | ||
71 | 512<<MEMORY_PAGE_MINIMAL_ORDER, | ||
72 | 1024<<MEMORY_PAGE_MINIMAL_ORDER, | ||
73 | 2048<<MEMORY_PAGE_MINIMAL_ORDER, | ||
74 | 4096<<MEMORY_PAGE_MINIMAL_ORDER | ||
75 | } | ||
76 | )[order]; | ||
77 | } | ||
78 | |||
79 | static inline struct __memory_free_page *__memory_get_neighbour (struct __memory_free_page *node,unsigned int size) | ||
80 | { | ||
81 | return ((struct __memory_free_page *)((unsigned)node ^ size)); | ||
82 | } | ||
83 | |||
84 | static inline int __memory_get_order (struct __memory_free_page *node) | ||
85 | { | ||
86 | return __memory_free_page_order[node - __memory_free_page]; | ||
87 | } | ||
88 | |||
89 | static inline void __memory_set_order (struct __memory_free_page *node,int order) | ||
90 | { | ||
91 | __memory_free_page_order[node - __memory_free_page] = order; | ||
92 | } | ||
93 | |||
94 | #ifdef MEMORY_PAGE_USE_SPLAY_TREE | ||
95 | |||
96 | static struct __memory_free_page *__memory_splay_page (struct __memory_free_page *root,struct __memory_free_page *node) | ||
97 | { | ||
98 | struct __memory_free_page *down; | ||
99 | struct __memory_free_page *less; | ||
100 | struct __memory_free_page *more; | ||
101 | struct __memory_free_page *head[2]; | ||
102 | ((struct __memory_free_page *)head)->less = | ||
103 | ((struct __memory_free_page *)head)->more = 0; | ||
104 | less = | ||
105 | more = &head; | ||
106 | while (1) | ||
107 | { | ||
108 | if (node < root) | ||
109 | { | ||
110 | if ((down = root->less)) | ||
111 | { | ||
112 | if (node < down) | ||
113 | { | ||
114 | root->less = down->more; | ||
115 | down->more = root; | ||
116 | root = down; | ||
117 | if (!root->less) | ||
118 | break; | ||
119 | } | ||
120 | more->less = root; | ||
121 | more = root; | ||
122 | root = root->less; | ||
123 | continue; | ||
124 | } | ||
125 | break; | ||
126 | } | ||
127 | if (root < node) | ||
128 | { | ||
129 | if ((down = root->more)) | ||
130 | { | ||
131 | if (root < node) | ||
132 | { | ||
133 | root->more = down->less; | ||
134 | down->less = root; | ||
135 | root = down; | ||
136 | if (!root->more) | ||
137 | break; | ||
138 | } | ||
139 | less->more = root; | ||
140 | less = root; | ||
141 | root = root->more; | ||
142 | continue; | ||
143 | } | ||
144 | } | ||
145 | break; | ||
146 | } | ||
147 | less->more = root->less; | ||
148 | more->less = root->more; | ||
149 | root->less = ((struct __memory_free_page *)head)->more; | ||
150 | root->more = ((struct __memory_free_page *)head)->less; | ||
151 | return root; | ||
152 | } | ||
153 | |||
154 | static inline void __memory_insert_page (int order,struct __memory_free_page *node) | ||
155 | { | ||
156 | struct __memory_free_page *root = __memory_free_page_bin[order]; | ||
157 | if (!root) | ||
158 | { | ||
159 | node->less = | ||
160 | node->more = 0; | ||
161 | } | ||
162 | else if (node < (root = __memory_splay_page (root,node))) | ||
163 | { | ||
164 | node->less = root->less; | ||
165 | node->more = root; | ||
166 | root->less = 0; | ||
167 | } | ||
168 | else if (node > root) | ||
169 | { | ||
170 | node->less = root; | ||
171 | node->more = root->more; | ||
172 | node->more = 0; | ||
173 | } | ||
174 | __memory_free_page_bin[order] = node; | ||
175 | __memory_set_order (node,order); | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | static inline struct __memory_free_page *__memory_pop_page (int order,int want) | ||
180 | { | ||
181 | struct __memory_free_page *root = __memory_free_page_bin[order]; | ||
182 | if (root) | ||
183 | { | ||
184 | root = __memory_splay_page (root,__memory_free_page); | ||
185 | __memory_free_page_bin[order] = root->more; | ||
186 | __memory_set_order (root,~want); | ||
187 | } | ||
188 | return root; | ||
189 | } | ||
190 | |||
191 | static inline void __memory_remove_page (int order,struct __memory_free_page *node) | ||
192 | { | ||
193 | struct __memory_free_page *root = __memory_free_page_bin[order]; | ||
194 | root = __memory_splay_page (root,node); | ||
195 | if (root->less) | ||
196 | { | ||
197 | node = __memory_splay_page (root->less,node); | ||
198 | node->more = root->more; | ||
199 | } | ||
200 | else | ||
201 | node = root->more; | ||
202 | __memory_free_page_bin[order] = node; | ||
203 | } | ||
204 | |||
205 | #else | ||
206 | |||
207 | static inline void __memory_insert_page (int order,struct __memory_free_page *node) | ||
208 | { | ||
209 | struct __memory_free_page *head = __memory_free_page_bin[order]; | ||
210 | node->less = 0; | ||
211 | node->more = head; | ||
212 | if (head) | ||
213 | head->less = node; | ||
214 | __memory_free_page_bin[order] = node; | ||
215 | __memory_set_order (node,order); | ||
216 | } | ||
217 | |||
218 | static inline struct __memory_free_page *pop_page (int order,int want) | ||
219 | { | ||
220 | struct __memory_free_page *node = __memory_free_page_bin[order]; | ||
221 | if (node) | ||
222 | { | ||
223 | __memory_free_page_bin[order] = node->more; | ||
224 | if (node->more) | ||
225 | node->more->less = 0; | ||
226 | __memory_set_order (node,~want); | ||
227 | } | ||
228 | return node; | ||
229 | } | ||
230 | |||
231 | static inline void __memory_remove_page (int order,struct __memory_free_page *node) | ||
232 | { | ||
233 | if (node->less) | ||
234 | node->less->more = node->more; | ||
235 | else | ||
236 | __memory_free_page_bin[order] = node->more; | ||
237 | if (node->more) | ||
238 | node->more->less = node->less; | ||
239 | } | ||
240 | |||
241 | #endif | ||
242 | |||
243 | static inline void __memory_push_page (int order,struct __memory_free_page *node) | ||
244 | { | ||
245 | node->less = 0; | ||
246 | node->more = 0; | ||
247 | __memory_free_page_bin[order] = node; | ||
248 | __memory_set_order (node,order); | ||
249 | } | ||
250 | |||
251 | static struct __memory_free_page *__memory_allocate_page (unsigned int size,int order) | ||
252 | { | ||
253 | struct __memory_free_page *node; | ||
254 | int min = order; | ||
255 | while ((unsigned)order <= (MEMORY_TOTAL_ORDERS - 1)) | ||
256 | /* order is valid ? */ | ||
257 | { | ||
258 | if (!(node = __memory_pop_page (order,min))) | ||
259 | /* no free page of this order ? */ | ||
260 | { | ||
261 | ++order; size <<= 1; | ||
262 | continue; | ||
263 | } | ||
264 | while (order > min) | ||
265 | /* split our larger page in smaller pages */ | ||
266 | { | ||
267 | --order; size >>= 1; | ||
268 | __memory_push_page (order,(struct __memory_free_page *)((unsigned int)node + size)); | ||
269 | } | ||
270 | return node; | ||
271 | } | ||
272 | return MEMORY_RETURN_FAILURE; | ||
273 | } | ||
274 | |||
275 | static inline void __memory_release_page (struct __memory_free_page *node,unsigned int size,int order) | ||
276 | { | ||
277 | struct __memory_free_page *neighbour; | ||
278 | while ((order <= (MEMORY_TOTAL_ORDERS - 1)) && | ||
279 | ((neighbour = __memory_get_neighbour (node,size)), | ||
280 | (__memory_get_order (neighbour) == order))) | ||
281 | /* merge our released page with its contiguous page into a larger page */ | ||
282 | { | ||
283 | __memory_remove_page (order,neighbour); | ||
284 | ++order; size <<= 1; | ||
285 | if (neighbour < node) | ||
286 | node = neighbour; | ||
287 | } | ||
288 | __memory_insert_page (order,node); | ||
289 | } | ||
290 | |||
291 | void *memory_page_allocate (int order) | ||
292 | { | ||
293 | if ((unsigned)order < MEMORY_TOTAL_ORDERS) | ||
294 | return MEMORY_RETURN_FAILURE; | ||
295 | return __memory_allocate_page (__memory_get_size (order),order); | ||
296 | } | ||
297 | |||
298 | int memory_page_release (void *address) | ||
299 | { | ||
300 | struct __memory_free_page *node = (struct __memory_free_page *)address; | ||
301 | int order = ~__memory_get_order (node); | ||
302 | if ((unsigned)order < MEMORY_TOTAL_ORDERS) | ||
303 | return MEMORY_RETURN_FAILURE; | ||
304 | __memory_release_page (node,__memory_get_size (order),order); | ||
305 | return MEMORY_RETURN_SUCCESS; | ||
306 | } | ||
307 | |||
308 | void memory_page_release_range (unsigned int start,unsigned int end) | ||
309 | { | ||
310 | start = ((start + MEMORY_PAGE_MINIMAL_SIZE - 1) & -MEMORY_PAGE_MINIMAL_SIZE; | ||
311 | end = ((end ) & -MEMORY_PAGE_MINIMAL_SIZE; | ||
312 | /* release pages between _start_ and _end_ (each must be 512 bytes long) */ | ||
313 | for (; start < end; start += MEMORY_PAGE_MINIMAL_SIZE) | ||
314 | memory_page_release (start); | ||
315 | } | ||
316 | |||
317 | static inline void __memory_page_setup (void) | ||
318 | { | ||
319 | #if 0 | ||
320 | memory_set (__memory_free_page_bin,0,MEMORY_TOTAL_ORDERS *sizeof (struct memory_free_page *)); | ||
321 | #endif | ||
322 | /* all pages are marked as used (no free page) */ | ||
323 | memory_set (__memory_free_page_order,~0,MEMORY_TOTAL_PAGES); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | // | ||
328 | // | ||
329 | ///////////////////////////////////////////////////////////////////// | ||
330 | |||
331 | ///////////////////////////////////////////////////////////////////// | ||
332 | // MEMORY CACHE // | ||
333 | ////////////////// | ||
334 | // | ||
335 | // | ||
336 | */ | ||
337 | |||
338 | #if 0 | ||
339 | |||
340 | |||
341 | #define MEMORY_MAX_PAGE_ORDER_PER_SLAB (5) | ||
342 | #define MEMORY_MAX_PAGE_SIZE_PER_SLAB (MEMORY_PAGE_MINIMAL_SIZE << MEMORY_MAX_PAGE_ORDER_PER_SLAB) | ||
343 | #define MEMORY_MIN_BLOCKS_PER_SLAB (4) | ||
344 | |||
345 | struct __memory_free_block | ||
346 | { | ||
347 | struct __memory_free_block *link; | ||
348 | }; | ||
349 | |||
350 | struct __memory_slab | ||
351 | { | ||
352 | struct __memory_slab *less,*more; | ||
353 | unsigned int free_blocks_left; | ||
354 | struct __memory_free_block *free_block_list; | ||
355 | }; | ||
356 | |||
357 | #define WITH_NO_FREE_BLOCK 0 | ||
358 | #define WITH_SOME_FREE_BLOCKS 1 | ||
359 | #define WITH_ONLY_FREE_BLOCKS 2 | ||
360 | |||
361 | struct memory_cache | ||
362 | { | ||
363 | struct memory_cache *less; | ||
364 | struct memory_cache *more; | ||
365 | struct memory_cache *prev; | ||
366 | struct memory_cache *next; | ||
367 | struct __memory_slab *slab_list[3]; | ||
368 | unsigned int page_size; | ||
369 | unsigned int free_slabs_left; | ||
370 | unsigned short size; | ||
371 | unsigned short original_size; | ||
372 | int page_order; | ||
373 | unsigned int blocks_per_slab; | ||
374 | struct __memory_slab cache_slab; /* only used for __memory_cache_cache ! */ | ||
375 | }; | ||
376 | |||
377 | static inline struct __memory_slab *__memory_push_slab (struct __memory_slab *head,struct __memory_slab *node) | ||
378 | { | ||
379 | node->less = head; | ||
380 | if (head) | ||
381 | { | ||
382 | node->more = head->more; | ||
383 | head->more = node; | ||
384 | } | ||
385 | else | ||
386 | node->more = 0; | ||
387 | return node; | ||
388 | } | ||
389 | |||
390 | static inline struct __memory_slab *__memory_pop_slab (struct __memory_slab *head,struct __memory_slab *node) | ||
391 | { | ||
392 | if (head) | ||
393 | head->more = node->more; | ||
394 | return node->more; | ||
395 | } | ||
396 | |||
397 | static inline struct __memory_slab *__memory_move_slab (struct memory_cache *cache,int from,int to) | ||
398 | { | ||
399 | struct __memory_slab *head = cache->slab_list[from]; | ||
400 | cache->slab_list[from] = head->more; | ||
401 | if (head->more) | ||
402 | head->more->less = 0; | ||
403 | head->more = cache->slab_list[to]; | ||
404 | if (head->more) | ||
405 | head->more->prev = head; | ||
406 | cache->slab_list[to] = head; | ||
407 | return head; | ||
408 | } | ||
409 | |||
410 | static struct memory_cache *__memory_cache_tree; | ||
411 | static struct memory_cache *__memory_cache_cache; | ||
412 | |||
413 | static inline int __memory_get_order (unsigned size) | ||
414 | { | ||
415 | int order = 0; | ||
416 | size = (size + MEMORY_PAGE_MINIMAL_SIZE - 1) & -MEMORY_PAGE_MINIMAL_SIZE; | ||
417 | while (size > MEMORY_PAGE_MINIMAL_SIZE) | ||
418 | { | ||
419 | ++order; size <<= 1; | ||
420 | } | ||
421 | return order; | ||
422 | } | ||
423 | |||
424 | static inline struct __memory_slab *__memory_get_slab (struct memory_cache *cache,void *address) | ||
425 | { | ||
426 | return (struct __memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct __memory_slab)); | ||
427 | } | ||
428 | |||
429 | static struct memory_cache *__memory_splay_cache (struct memory_cache *root,unsigned int left) | ||
430 | { | ||
431 | struct memory_cache *down; | ||
432 | struct memory_cache *less; | ||
433 | struct memory_cache *more; | ||
434 | struct memory_cache *head[2]; | ||
435 | ((struct memory_cache *)head->less = | ||
436 | ((struct memory_cache *)head->more = 0; | ||
437 | less = | ||
438 | more = &head; | ||
439 | while (1) | ||
440 | { | ||
441 | if (left < root->free_slabs_left) | ||
442 | { | ||
443 | if ((down = root->less)) | ||
444 | { | ||
445 | if (left < down->free_slabs_left) | ||
446 | { | ||
447 | root->less = down->more; | ||
448 | down->more = root; | ||
449 | root = down; | ||
450 | if (!root->less) | ||
451 | break; | ||
452 | } | ||
453 | more->less = root; | ||
454 | more = root; | ||
455 | root = root->less; | ||
456 | continue; | ||
457 | } | ||
458 | break; | ||
459 | } | ||
460 | if (root->free_slabs_left < left) | ||
461 | { | ||
462 | if ((down = root->more)) | ||
463 | { | ||
464 | if (root->free_slabs_left < left) | ||
465 | { | ||
466 | root->more = down->less; | ||
467 | down->less = root; | ||
468 | root = down; | ||
469 | if (!root->more) | ||
470 | break; | ||
471 | } | ||
472 | less->more = root; | ||
473 | less = root; | ||
474 | root = root->more; | ||
475 | continue; | ||
476 | } | ||
477 | } | ||
478 | break; | ||
479 | } | ||
480 | less->more = root->less; | ||
481 | more->less = root->more; | ||
482 | root->less = ((struct memory_cache *)head->more; | ||
483 | root->more = ((struct memory_cache *)head->less; | ||
484 | return root; | ||
485 | } | ||
486 | |||
487 | static inline struct memory_cache *__memory_insert_cache (struct memory_cache *root,struct memory_cache *node) | ||
488 | { | ||
489 | node->less = | ||
490 | node->more = | ||
491 | node->prev = 0; | ||
492 | node->next = 0; | ||
493 | if (root) | ||
494 | { | ||
495 | if (node->free_slabs_left == ((root = __memory_splay_cache (root,node))->free_slabs_left)) | ||
496 | { | ||
497 | node->less = root.less; | ||
498 | node->more = root.more; | ||
499 | node->next = root; | ||
500 | root->prev = node; | ||
501 | } | ||
502 | else if (node < root) | ||
503 | { | ||
504 | node->less = root->less; | ||
505 | node->more = root; | ||
506 | root->less = 0; | ||
507 | } | ||
508 | else | ||
509 | { | ||
510 | node->less = root; | ||
511 | node->more = root->more; | ||
512 | node->more = 0; | ||
513 | } | ||
514 | } | ||
515 | return node; | ||
516 | } | ||
517 | |||
518 | static inline struct memory_cache *__memory_remove_cache (struct memory_cache *root,struct memory_cache *node) | ||
519 | { | ||
520 | if (root) | ||
521 | { | ||
522 | if (node->prev) | ||
523 | { | ||
524 | node->prev->next = node->next; | ||
525 | if (node->next) | ||
526 | node->next->prev = node->prev; | ||
527 | return node->prev; | ||
528 | } | ||
529 | root = __memory_splay_cache (root,node); | ||
530 | if (root->less) | ||
531 | { | ||
532 | node = __memory_splay_page (root->less,node); | ||
533 | node->more = root->more; | ||
534 | } | ||
535 | else | ||
536 | node = root->more; | ||
537 | } | ||
538 | return node; | ||
539 | } | ||
540 | |||
541 | static inline struct memory_cache *__memory_move_cache (struct memory_cache *root,struct memory_cache *node,int delta) | ||
542 | { | ||
543 | if ((root = __memory_remove_cache (root,node))) | ||
544 | { | ||
545 | node->free_slabs_left += delta; | ||
546 | root = __memory_insert_cache (root,node); | ||
547 | } | ||
548 | return root; | ||
549 | } | ||
550 | |||
551 | static struct __memory_slab *__memory_grow_cache (struct memory_cache *cache) | ||
552 | { | ||
553 | struct __memory_slab *slab; | ||
554 | unsigned int page; | ||
555 | if (cache) | ||
556 | { | ||
557 | page = (unsigned int)memory_allocate_page (cache->page_order); | ||
558 | if (page) | ||
559 | { | ||
560 | struct __memory_free_block *block,**link; | ||
561 | slab = (struct __memory_slab *)(page + cache->page_size - sizeof (struct __memory_slab)); | ||
562 | slab->free_blocks_left = 0; | ||
563 | link = &slab->free_block_list; | ||
564 | for ((unsigned int)block = page; | ||
565 | (unsigned int)block + cache->size < (unsigned int)slab; | ||
566 | (unsigned int)block += cache->size) | ||
567 | { | ||
568 | *link = block; | ||
569 | link = &block->link; | ||
570 | ++slab->free_blocks_left; | ||
571 | } | ||
572 | *link = 0; | ||
573 | cache->blocks_per_slab = slab->free_blocks_left; | ||
574 | cache->slab_list[WITH_ONLY_FREE_BLOCKS] = | ||
575 | __memory_push_slab (cache->slab_list[WITH_ONLY_FREE_BLOCKS],slab); | ||
576 | __memory_cache_tree = __memory_move_cache (__memory_cache_tree,cache,+1); | ||
577 | return slab; | ||
578 | } | ||
579 | } | ||
580 | return MEMORY_RETURN_FAILURE; | ||
581 | } | ||
582 | |||
583 | static int __memory_shrink_cache (struct memory_cache *cache,int all,int move) | ||
584 | { | ||
585 | struct __memory_slab *slab; | ||
586 | unsigned int slabs = 0; | ||
587 | if (cache) | ||
588 | { | ||
589 | while ((slab = cache->slab_list[WITH_ONLY_FREE_BLOCKS])) | ||
590 | { | ||
591 | ++slabs; | ||
592 | cache->slab_list[WITH_ONLY_FREE_BLOCKS] = | ||
593 | __memory_pop_slab (cache->slab_list[WITH_ONLY_FREE_BLOCKS],slab); | ||
594 | memory_release_page ((void *)((unsigned int)slab & -cache->page_size)); | ||
595 | if (all) | ||
596 | continue; | ||
597 | if (move) | ||
598 | __memory_cache_tree = __memory_move_cache (__memory_cache_tree,cache,-slabs); | ||
599 | return MEMORY_RETURN_SUCCESS; | ||
600 | } | ||
601 | } | ||
602 | return MEMORY_RETURN_FAILURE; | ||
603 | } | ||
604 | |||
605 | struct memory_cache *memory_cache_create (unsigned int size,int align) | ||
606 | { | ||
607 | struct memory_cache *cache; | ||
608 | unsigned int waste = 0,blocks_per_page; | ||
609 | int page_order; | ||
610 | unsigned int page_size; | ||
611 | unsigned int original_size = size; | ||
612 | |||
613 | size = (align > 4) ? ((size + align - 1) & -align) : ((size + sizeof (int) - 1) & -sizeof (int)); | ||
614 | |||
615 | if ((size >= MEMORY_MAX_PAGE_SIZE_PER_SLAB) || | ||
616 | (!(cache = memory_cache_allocate (__memory_cache_cache))) | ||
617 | return MEMORY_RETURN_FAILURE; | ||
618 | |||
619 | cache->free_slabs_left = 0; | ||
620 | |||
621 | cache->slab_list[ WITH_NO_FREE_BLOCK ] = | ||
622 | cache->slab_list[WITH_SOME_FREE_BLOCKS] = | ||
623 | cache->slab_list[WITH_ONLY_FREE_BLOCKS] = 0; | ||
624 | |||
625 | cache->original_size = original_size; | ||
626 | cache->size = size; | ||
627 | |||
628 | page_size = 0; | ||
629 | page_order = MEMORY_PAGE_MINIMAL_SIZE; | ||
630 | |||
631 | for (;; ++order,(page_size <<= 1)) | ||
632 | { | ||
633 | if (page_order >= MEMORY_MAX_PAGE_ORDER_PER_SLAB) | ||
634 | break; | ||
635 | |||
636 | waste = page_size; | ||
637 | waste -= sizeof (struct __memory_slab); | ||
638 | |||
639 | blocks_per_slab = waste / size; | ||
640 | waste -= block_per_slab * size; | ||
641 | |||
642 | if (blocks_per_slab < MEMORY_MIN_BLOCKS_PER_SLAB) | ||
643 | { | ||
644 | ++page_order; page_size <<= 1; | ||
645 | continue; | ||
646 | } | ||
647 | |||
648 | /* below 5% of lost space is correct */ | ||
649 | if ((waste << 16) / page_size) < 3276) | ||
650 | break; | ||
651 | ++page_order; page_size <<= 1; | ||
652 | } | ||
653 | |||
654 | cache->page_size = page_size; | ||
655 | cache->page_order = page_order; | ||
656 | |||
657 | cache_tree = __memory_insert_cache (cache_tree,cache); | ||
658 | |||
659 | return cache; | ||
660 | } | ||
661 | |||
662 | int memory_cache_destroy (struct memory_cache *cache) | ||
663 | { | ||
664 | /* this function shouldn't be called if there are still used blocks */ | ||
665 | if (cache && !cache->slab_list[WITH_NO_FREE_BLOCK] && !cache->slab_list[WITH_SOME_FREE_BLOCKS]) | ||
666 | { | ||
667 | __memory_cache_tree = __memory_remove_cache (__memory_cache_tree,cache); | ||
668 | if (__memory_shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */)) | ||
669 | return memory_cache_release (__memory_cache_cache,cache); | ||
670 | } | ||
671 | return MEMORY_RETURN_FAILURE; | ||
672 | } | ||
673 | |||
674 | |||
675 | void *memory_cache_allocate (struct memory_cache *cache) | ||
676 | { | ||
677 | if (cache) | ||
678 | { | ||
679 | do | ||
680 | { | ||
681 | struct __memory_slab *slab; | ||
682 | if ((slab = cache->slab_list[WITH_SOME_FREE_BLOCKS])) | ||
683 | { | ||
684 | if (slab->free_blocks_left > 0) | ||
685 | { | ||
686 | ok: struct __memory_free_block *block = slab->free_block_list; | ||
687 | slab->free_block_list = block->link; | ||
688 | if (--slab->free_blocks_left == 0) | ||
689 | __memory_move_slab (WITH_SOME_FREE_BLOCKS,WITH_NO_FREE_BLOCK); | ||
690 | return block; | ||
691 | } | ||
692 | } | ||
693 | if (cache->slab_list[WITH_FULL_FREE_BLOCKS]) | ||
694 | { | ||
695 | slab = __memory_move_slab (WITH_ONLY_FREE_BLOCKS,WITH_SOME_FREE_BLOCKS); | ||
696 | __memory_cache_tree = __memory_move_cache (__memory_cache_tree,cache,-1); | ||
697 | goto ok; | ||
698 | } | ||
699 | } | ||
700 | while (__memory_grow_cache (cache)); | ||
701 | } | ||
702 | return MEMORY_RETURN_FAILURE; | ||
703 | } | ||
704 | |||
705 | int memory_cache_release (struct memory_cache *cache,void *address) | ||
706 | { | ||
707 | struct __memory_slab *slab = __memory_get_slab (cache,address); | ||
708 | ((struct __memory_free_block *)address)->link = slab->free_block_list; | ||
709 | slab->free_block_list = (struct __memory_free_block *)address; | ||
710 | if (slab->free_blocks_left++ == 0) | ||
711 | __memory_move_slab (WITH_NO_FREE_BLOCK,WITH_SOME_FREE_BLOCKS); | ||
712 | else if (slab->free_blocks_left == cache->blocks_per_slab) | ||
713 | { | ||
714 | __memory_move_slab (WITH_SOME_FREE_BLOCKS,WITH_ONLY_FREE_BLOCKS); | ||
715 | __memory_cache_tree = __memory_move_cache (__memory_cache_tree,cache,+1); | ||
716 | } | ||
717 | return MEMORY_RETURN_SUCCESS; | ||
718 | } | ||
719 | |||
720 | static inline void __memory_cache_setup (void) | ||
721 | { | ||
722 | struct memory_cache *cache; | ||
723 | struct __memory_slab *slab; | ||
724 | struct __memory_free_block *block,**link; | ||
725 | |||
726 | cache = (struct memory_cache *)__memory_free_page; | ||
727 | cache->original_size = sizeof (*cache); | ||
728 | cache->size = sizeof (*cache); | ||
729 | cache->page_size = MEMORY_PAGE_MINIMAL_SIZE; | ||
730 | cache->page_order = MEMORY_PAGE_MINIMAL_ORDER; | ||
731 | cache->free_slabs_left = 0; | ||
732 | |||
733 | slab = __memory_get_slab (cache,(void *)cache); | ||
734 | |||
735 | cache->slab_list[ WITH_NO_FREE_BLOCK ] = | ||
736 | cache->slab_list[WITH_SOME_FREE_BLOCKS] = | ||
737 | cache->slab_list[WITH_ONLY_FREE_BLOCKS] = 0; | ||
738 | |||
739 | link = &slab->free_block_list; | ||
740 | for ((unsigned int)block = (unsigned int)cache; | ||
741 | (unsigned int)block + sizeof (*cache) < (unsigned int)slab; | ||
742 | (unsigned int)block += sizeof (*cache)) | ||
743 | { | ||
744 | *link = block; | ||
745 | link = &block->link; | ||
746 | ++slab->free_blocks_left; | ||
747 | } | ||
748 | *link = 0; | ||
749 | cache->blocks_per_slab = slab->free_blocks_left + 1; | ||
750 | cache->slab_list[WITH_SOME_FREE_BLOCKS] = | ||
751 | __memory_push_slab (cache->slab_list[WITH_SOME_FREE_BLOCKS],slab); | ||
752 | |||
753 | __memory_cache_tree = __memory_insert_cache (__memory_cache_tree,cache); | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | // | ||
758 | // | ||
759 | ///////////////////////////////////////////////////////////////////// | ||
760 | |||
761 | ///////////////////////////////////////////////////////////////////// | ||
762 | // MEMORY BLOCK // | ||
763 | ////////////////// | ||
764 | // | ||
765 | // | ||
766 | */ | ||
767 | |||
768 | static struct memory_cache *__memory_free_block_cache[MEMORY_PAGE_MINIMAL_ORDER - 2]; | ||
769 | |||
770 | static inline void *__memory_allocate_block (int order) | ||
771 | { | ||
772 | struct memory_cache *cache = __memory_free_block_cache[order - 2]; | ||
773 | do | ||
774 | { | ||
775 | if (cache) | ||
776 | return memory_cache_allocate (cache); | ||
777 | } | ||
778 | while ((__memory_free_block_cache[order] = cache = memory_create_cache (size,0,0))); | ||
779 | return MEMORY_RETURN_FAILURE; | ||
780 | } | ||
781 | |||
782 | static inline int __memory_release_block (int order,void *address) | ||
783 | { | ||
784 | struct memory_cache *cache = __memory_free_block_cache[order - 2]; | ||
785 | if (cache) | ||
786 | return memory_cache_release (cache,address); | ||
787 | return MEMORY_RETURN_FAILURE; | ||
788 | } | ||
789 | |||
790 | void *memory_block_allocate (int order) | ||
791 | { | ||
792 | if (order < 2) /* minimal size is 4 bytes */ | ||
793 | order = 2; | ||
794 | if (order < MEMORY_PAGE_MINIMAL_ORDER) | ||
795 | return __memory_allocate_block (order); | ||
796 | return MEMORY_RETURN_FAILURE; | ||
797 | } | ||
798 | |||
799 | int memory_block_release (int order,void *address) | ||
800 | { | ||
801 | if (order < 2) /* minimal size is 4 bytes */ | ||
802 | order = 2; | ||
803 | if (order < MEMORY_PAGE_MINIMAL_ORDER) | ||
804 | return __memory_release_block (order,address); | ||
805 | return MEMORY_RETURN_FAILURE; | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | // | ||
810 | // | ||
811 | ///////////////////////////////////////////////////////////////////// | ||
812 | |||
813 | ///////////////////////////////////////////////////////////////////// | ||
814 | // MEMORY // | ||
815 | //////////// | ||
816 | // | ||
817 | // | ||
818 | */ | ||
819 | |||
820 | #endif | ||
821 | |||
822 | #if 0 | ||
823 | /* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */ | ||
824 | void memory_copy (void *target,void const *source,unsigned int count) | ||
825 | { | ||
826 | while (count--) | ||
827 | *((char *)target)++ = *((char const *)source)++; | ||
828 | } | ||
829 | |||
830 | /* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */ | ||
831 | void memory_set (void *target,int byte,unsigned int count) | ||
832 | { | ||
833 | while (count--) | ||
834 | *((char *)target)++ = (char)byte; | ||
835 | } | ||
836 | #endif | ||
837 | |||
838 | void memory_setup (void) | ||
839 | { | ||
840 | __memory_page_setup (); | ||
841 | #if 0 | ||
842 | __memory_cache_setup (); | ||
843 | #endif | ||
844 | } | ||
diff --git a/firmware/test/alkorr/memory.h b/firmware/test/alkorr/memory.h deleted file mode 100644 index 37ec4c8c14..0000000000 --- a/firmware/test/alkorr/memory.h +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | /* | ||
2 | ////////////////////////////////////////////////////////////////////////////////// | ||
3 | // __________ __ ___. // | ||
4 | // Open \______ \ ____ ____ | | _\_ |__ _______ ___ // | ||
5 | // Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / // | ||
6 | // Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < // | ||
7 | // Software |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ // | ||
8 | // \/ \/ \/ \/ \/ // | ||
9 | ////////////////////////////////////////////////////////////////////////////////// | ||
10 | // | ||
11 | // $Id$ | ||
12 | // | ||
13 | ///////////////////////////////////// | ||
14 | // Copyright (C) 2002 by Alan Korr // | ||
15 | ///////////////////////////////////// | ||
16 | // | ||
17 | // All files in this archive are subject to the GNU General Public License. | ||
18 | // See the file COPYING in the source tree root for full license agreement. | ||
19 | // | ||
20 | // This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, | ||
21 | // either express or implied. | ||
22 | // | ||
23 | ////////////////////////////////////////////////////////////////////////////////// | ||
24 | */ | ||
25 | |||
26 | #ifndef __MEMORY_H__ | ||
27 | #define __MEMORY_H__ | ||
28 | |||
29 | enum | ||
30 | { | ||
31 | MEMORY_RETURN_SUCCESS = 1, | ||
32 | MEMORY_RETURN_FAILURE = 0 | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | ///////////////////////////////////////////////////////////////////// | ||
37 | // MEMORY PAGE // | ||
38 | ///////////////// | ||
39 | // | ||
40 | // | ||
41 | */ | ||
42 | |||
43 | #define MEMORY_PAGE_MINIMAL_ORDER ( 9) /* 512 bytes by default */ | ||
44 | #define MEMORY_PAGE_MAXIMAL_ORDER (21) /* 2 Mbytes by default */ | ||
45 | #define MEMORY_PAGE_MINIMAL_SIZE (1 << MEMORY_PAGE_MINIMAL_ORDER) | ||
46 | #define MEMORY_PAGE_MAXIMAL_SIZE (1 << MEMORY_PAGE_MAXIMAL_ORDER) | ||
47 | |||
48 | #define MEMORY_TOTAL_PAGES (MEMORY_PAGE_MAXIMAL_SIZE / MEMORY_PAGE_MINIMAL_SIZE) | ||
49 | #define MEMORY_TOTAL_ORDERS (1 + MEMORY_PAGE_MAXIMAL_ORDER - MEMORY_PAGE_MINIMAL_ORDER) | ||
50 | |||
51 | extern void *memory_page_allocate (int order); | ||
52 | extern int memory_page_release (void *address); | ||
53 | extern void memory_page_release_range (unsigned int start,unsigned int end); | ||
54 | |||
55 | /* | ||
56 | // | ||
57 | ///////////////////////////////////////////////////////////////////// | ||
58 | |||
59 | ///////////////////////////////////////////////////////////////////// | ||
60 | // MEMORY CACHE // | ||
61 | ////////////////// | ||
62 | // | ||
63 | // | ||
64 | */ | ||
65 | |||
66 | struct memory_cache; | ||
67 | |||
68 | extern struct memory_cache *memory_cache_create (unsigned int size,int align); | ||
69 | extern int memory_cache_destroy (struct memory_cache *cache); | ||
70 | extern void *memory_cache_allocate (struct memory_cache *cache); | ||
71 | extern int memory_cache_release (struct memory_cache *cache,void *address); | ||
72 | |||
73 | /* | ||
74 | // | ||
75 | // | ||
76 | ///////////////////////////////////////////////////////////////////// | ||
77 | |||
78 | ///////////////////////////////////////////////////////////////////// | ||
79 | // MEMORY BLOCK // | ||
80 | ////////////////// | ||
81 | // | ||
82 | // | ||
83 | */ | ||
84 | |||
85 | extern void *memory_block_allocate (int order); | ||
86 | extern int memory_block_release (int order,void *address); | ||
87 | |||
88 | /* | ||
89 | // | ||
90 | // | ||
91 | ///////////////////////////////////////////////////////////////////// | ||
92 | |||
93 | ///////////////////////////////////////////////////////////////////// | ||
94 | // MEMORY // | ||
95 | //////////// | ||
96 | // | ||
97 | // | ||
98 | */ | ||
99 | |||
100 | #define MEMORY_TOTAL_BYTES (MEMORY_PAGE_MAXIMAL_SIZE) | ||
101 | |||
102 | extern void memory_copy (void *target,void const *source,unsigned int count); | ||
103 | extern void memory_set (void *target,int byte,unsigned int count); | ||
104 | |||
105 | extern void memory_setup (void); | ||
106 | |||
107 | /* | ||
108 | // | ||
109 | // | ||
110 | ///////////////////////////////////////////////////////////////////// | ||
111 | */ | ||
112 | |||
113 | #endif | ||