summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'firmware')
-rw-r--r--firmware/test/memory/memory-block.h77
-rw-r--r--firmware/test/memory/memory-page.h425
-rw-r--r--firmware/test/memory/memory-slab.h450
-rw-r--r--firmware/test/memory/memory.c50
4 files changed, 1002 insertions, 0 deletions
diff --git a/firmware/test/memory/memory-block.h b/firmware/test/memory/memory-block.h
new file mode 100644
index 0000000000..2c29d255e2
--- /dev/null
+++ b/firmware/test/memory/memory-block.h
@@ -0,0 +1,77 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id:
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#ifndef __LIBRARY_MEMORY_C__
20# error "This header file must be included ONLY from memory.c."
21#endif
22#ifndef __LIBRARY_MEMORY_BLOCK_H__
23# define __LIBRARY_MEMORY_BLOCK_H__
24
25static struct memory_cache *free_block_cache[MEMORY_PAGE_MINIMAL_ORDER - 2];
26
27///////////////////////////////////////////////////////////////////////////////
28// MEMORY BLOCK :
29/////////////////
30//
31// - memory_allocate_block : allocate a power-of-2-sized block (or a page)
32// - memory_release_block : release a power-of-2-sized block (or a page)
33//
34
35static inline void *allocate_small_block (int order)
36 {
37 struct memory_cache *cache = free_block_cache[order - 2];
38 do
39 {
40 if (cache)
41 return memory_cache_allocate (cache);
42 }
43 while ((free_block_cache[order] = cache = memory_create_cache (size,0,0)));
44 return MEMORY_RETURN_FAILURE;
45 }
46
47void *memory_allocate_block (int order)
48 {
49 if (order < 2)
50 order = 2;
51 if (order < MEMORY_PAGE_MINIMAL_ORDER)
52 return allocate_small_block (order);
53 if (order < MEMORY_PAGE_MAXIMAL_ORDER)
54 return memory_allocate_page (order);
55 return MEMORY_RETURN_FAILURE;
56 }
57
58static inline int release_block (int order,void *address)
59 {
60 struct memory_cache *cache = free_block_cache[order - 2];
61 if (cache)
62 return memory_cache_release (cache,address);
63 return MEMORY_RETURN_FAILURE;
64 }
65
66int memory_release_block (int order,void *address)
67 {
68 if (order < 2)
69 order = 2;
70 if (order < MEMORY_PAGE_MINIMAL_ORDER)
71 return release_block (order);
72 if (order < MEMORY_PAGE_MAXIMAL_ORDER)
73 return memory_release_page (address);
74 return MEMORY_RETURN_FAILURE;
75 }
76
77#endif
diff --git a/firmware/test/memory/memory-page.h b/firmware/test/memory/memory-page.h
new file mode 100644
index 0000000000..438fd63669
--- /dev/null
+++ b/firmware/test/memory/memory-page.h
@@ -0,0 +1,425 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id:
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#ifndef __LIBRARY_MEMORY_C__
20# error "This header file must be included ONLY from memory.c."
21#endif
22#ifndef __LIBRARY_MEMORY_PAGE_H__
23# define __LIBRARY_MEMORY_PAGE_H__
24
25struct memory_free_page
26 {
27 struct memory_free_page
28 *less,*more;
29 char
30 reserved[MEMORY_PAGE_MINIMAL_SIZE - 2*sizeof (struct memory_free_page *)];
31 };
32
33#define LESS -1
34#define MORE +1
35
36#ifdef TEST
37
38struct memory_free_page free_page[MEMORY_TOTAL_PAGES];
39
40static inline unsigned int get_offset (int order)
41 {
42 return (2 << order);
43 }
44
45// IA32 has no problem with shift operation
46static inline unsigned int get_size (int order)
47 {
48 return (MEMORY_PAGE_MINIMAL_SIZE << order);
49 }
50
51// Arghhhh ! I cannot align 'free_page' on 512-byte boundary (max is 16-byte for Cygwin)
52static inline struct memory_free_page *get_neighbour (struct memory_free_page *node,unsigned int size)
53 {
54 return ((struct memory_free_page *)((unsigned)free_page + (((unsigned)node - (unsigned)free_page) ^ size)));
55 }
56
57#else
58
59extern struct memory_free_page free_page[MEMORY_TOTAL_PAGES] asm("dram");
60
61static inline unsigned int get_offset (int order)
62 {
63 static unsigned short offset [MEMORY_TOTAL_ORDERS] =
64 { 2,4,8,16,32,64,128,256,512,1024,2048,4096,8192 };
65 return offset[order];
66 }
67
68// SH1 has very poor shift instructions (only <<1,>>1,<<2,>>2,<<8,>>8,<<16 and >>16).
69// so we should use a lookup table to speedup.
70static inline unsigned int get_size (int order)
71 {
72 return (get_offset (order))<<8;
73 }
74
75static inline struct memory_free_page *get_neighbour (struct memory_free_page *node,unsigned int size)
76 {
77 return ((struct memory_free_page *)((unsigned)node ^ size));
78 }
79
80#endif
81
82static char free_page_order[MEMORY_TOTAL_PAGES];
83static struct memory_free_page *free_page_bin[MEMORY_TOTAL_ORDERS];
84
85static inline int get_order (struct memory_free_page *node)
86 {
87 return free_page_order[node - free_page];
88 }
89static inline void set_order (struct memory_free_page *node,int order)
90 {
91 free_page_order[node - free_page] = order;
92 }
93
94#if MEMORY_PAGE_USE_SPLAY_TREE
95
96# include <stdio.h>
97
98static struct memory_free_page *splay_page (struct memory_free_page *root,struct memory_free_page *node)
99 {
100 struct memory_free_page *down;
101 struct memory_free_page *less;
102 struct memory_free_page *more;
103 struct memory_free_page head;
104 head.less =
105 head.more = 0;
106 less =
107 more = &head;
108 while (1)
109 {
110 if (node < root)
111 {
112 if ((down = root->less))
113 {
114 if (node < down)
115 {
116 root->less = down->more;
117 down->more = root;
118 root = down;
119 if (!root->less)
120 break;
121 }
122 more->less = root;
123 more = root;
124 root = root->less;
125 continue;
126 }
127 break;
128 }
129 if (root < node)
130 {
131 if ((down = root->more))
132 {
133 if (root < node)
134 {
135 root->more = down->less;
136 down->less = root;
137 root = down;
138 if (!root->more)
139 break;
140 }
141 less->more = root;
142 less = root;
143 root = root->more;
144 continue;
145 }
146 }
147 break;
148 }
149 less->more = root->less;
150 more->less = root->more;
151 root->less = head.more;
152 root->more = head.less;
153 return root;
154 }
155
156static inline void insert_page (int order,struct memory_free_page *node)
157 {
158 struct memory_free_page *root = free_page_bin[order];
159 if (!root)
160 {
161 node->less =
162 node->more = 0;
163 }
164 else if (node < (root = splay_page (root,node)))
165 {
166 node->less = root->less;
167 node->more = root;
168 root->less = 0;
169 }
170 else if (node > root)
171 {
172 node->less = root;
173 node->more = root->more;
174 node->more = 0;
175 }
176 free_page_bin[order] = node;
177 set_order (node,order);
178 return;
179 }
180
181static inline struct memory_free_page *pop_page (int order,int want)
182 {
183 struct memory_free_page *root = free_page_bin[order];
184 if (root)
185 {
186 root = splay_page (root,free_page);
187 free_page_bin[order] = root->more;
188 set_order (root,~want);
189 }
190 return root;
191 }
192
193static inline void remove_page (int order,struct memory_free_page *node)
194 {
195 struct memory_free_page *root = free_page_bin[order];
196 root = splay_page (root,node);
197 if (root->less)
198 {
199 node = splay_page (root->less,node);
200 node->more = root->more;
201 }
202 else
203 node = root->more;
204 free_page_bin[order] = node;
205 }
206
207#else
208
209static inline void insert_page (int order,struct memory_free_page *node)
210 {
211 struct memory_free_page *head = free_page_bin[order];
212 node->less = 0;
213 node->more = head;
214 if (head)
215 head->less = node;
216 free_page_bin[order] = node;
217 set_order (node,order);
218 }
219
220static inline struct memory_free_page *pop_page (int order,int want)
221 {
222 struct memory_free_page *node = free_page_bin[order];
223 if (node)
224 {
225 free_page_bin[order] = node->more;
226 if (node->more)
227 node->more->less = 0;
228 set_order (node,~want);
229 }
230 return node;
231 }
232
233static inline void remove_page (int order,struct memory_free_page *node)
234 {
235 if (node->less)
236 node->less->more = node->more;
237 else
238 free_page_bin[order] = node->more;
239 if (node->more)
240 node->more->less = node->less;
241 }
242
243#endif
244
245static inline void push_page (int order,struct memory_free_page *node)
246 {
247 node->less = 0;
248 node->more = 0;
249 free_page_bin[order] = node;
250 set_order (node,order);
251 }
252
253static struct memory_free_page *allocate_page (unsigned int size,int order)
254 {
255 struct memory_free_page *node;
256 int min = order;
257 while ((unsigned)order <= (MEMORY_TOTAL_ORDERS - 1))
258 // order is valid ?
259 {
260 if (!(node = pop_page (order,min)))
261 // no free page of this order ?
262 {
263 ++order; size <<= 1;
264 continue;
265 }
266 while (order > min)
267 // split our larger page in smaller pages
268 {
269 --order; size >>= 1;
270 push_page (order,(struct memory_free_page *)((unsigned int)node + size));
271 }
272 return node;
273 }
274 return MEMORY_RETURN_FAILURE;
275 }
276
277static inline void release_page (struct memory_free_page *node,unsigned int size,int order)
278 {
279 struct memory_free_page *neighbour;
280 while ((order <= (MEMORY_TOTAL_ORDERS - 1)) &&
281 ((neighbour = get_neighbour (node,size)),
282 (get_order (neighbour) == order)))
283 // merge our released page with its contiguous page into a larger page
284 {
285 remove_page (order,neighbour);
286 ++order; size <<= 1;
287 if (neighbour < node)
288 node = neighbour;
289 }
290 insert_page (order,node);
291 }
292
293
294/*****************************************************************************/
295/* PUBLIC FUNCTIONS */
296/*****************************************************************************/
297
298void *memory_allocate_page (int order)
299 {
300 if (order < 0)
301 return MEMORY_RETURN_FAILURE;
302 return allocate_page (get_size (order),order);
303 }
304
305// release a page :
306// when called, 'address' MUST be a valid address pointing
307// to &dram[i], where i ranges from 0 to MEMORY_TOTAL_PAGES - 1.
308// FAILURE if block is already freed.
309int memory_release_page (void *address)
310 {
311 struct memory_free_page *node = (struct memory_free_page *)address;
312 int order = ~get_order (node);
313 if (order < 0)
314 return MEMORY_RETURN_FAILURE;
315 release_page (node,get_size (order),order);
316 return MEMORY_RETURN_SUCCESS;
317 }
318
319
320#ifdef TEST
321# include <stdio.h>
322# include <stdlib.h>
323# if MEMORY_PAGE_USE_SPLAY_TREE
324
325static void dump_splay_node (struct memory_free_page *node,int level)
326 {
327 if (!node)
328 return;
329 dump_splay_node (node->less,level+1);
330 printf ("\n%*s[%d-%d]",level,"",(node - free_page),(node - free_page) + (1 << get_order (node)) - 1);
331 dump_splay_node (node->more,level+1);
332 }
333
334static void dump_splay_tree (struct memory_free_page *root)
335 {
336 dump_splay_node (root,2); fflush (stdout);
337 }
338
339# endif
340
341void memory_spy_page (void *address)
342 {
343 struct memory_free_page *node = (struct memory_free_page *)address;
344 int order,used;
345 if (node)
346 {
347 order = get_order (node);
348 used = order < 0;
349 if (used)
350 order = ~order;
351 printf("\n(%s,%2d,%7d)",(used ? "used" : "free"),order,get_size (order));
352 }
353 }
354
355void memory_dump (int order)
356 {
357 struct memory_free_page *node = free_page_bin[order];
358 printf("\n(%s,%2d,%7d)",node ? "free" : "none",order,get_size (order));
359# if MEMORY_PAGE_USE_SPLAY_TREE
360 dump_splay_tree (node);
361# else
362 while (node)
363 {
364 printf("[%d-%d]",(node - free_page),(node - free_page) + (1<<order) - 1);
365 node = node->more;
366 }
367# endif
368
369 }
370
371void memory_check (int order)
372 {
373 struct memory_free_page *node[4096],*swap;
374 unsigned int i = 0,j = 0;
375 while (i <= 12)
376 memory_dump (i++);
377 i = 0;
378 printf ("\nallocating...\n");
379 while (order >= 0)
380 {
381 j = order;
382 while ((swap = memory_allocate_page (j)))
383 {
384 node[i++] = swap;
385 printf("[%d-%d]",(swap - free_page),(swap - free_page) + ((1 << j)-1));
386 for (j += (rand () & 15); j > (unsigned int)order; j -= order);
387 }
388 --order;
389 }
390 node[i] = 0;
391 while (j <= 12)
392 memory_dump (j++);
393 j = 0;
394 printf ("\nreleasing...");
395 --i;
396 while (i > 0)
397 {
398 unsigned int k = 0;
399#if 0
400 printf ("\n");
401#endif
402 swap = node[k++];
403#if 0
404 while (swap)
405 {
406 printf("[%d-%d]",(swap - free_page),(swap - free_page) + ((1 << ~get_order (swap))-1));
407 swap = node[k++];
408 }
409#endif
410 for (j += 1 + (rand () & 15); j >= i; j -= i);
411 swap = node[j];
412 node[j] = node[i];
413 memory_release_page (swap);
414 node[i] = 0;
415 --i;
416 }
417 memory_release_page (node[0]);
418 i = 0;
419 while (i <= 12)
420 memory_dump (i++);
421 printf("\n\n%s !",(get_order (free_page) == 12) ? "SUCCESS" : "FAILURE");
422 }
423
424#endif
425#endif \ No newline at end of file
diff --git a/firmware/test/memory/memory-slab.h b/firmware/test/memory/memory-slab.h
new file mode 100644
index 0000000000..29dc609b47
--- /dev/null
+++ b/firmware/test/memory/memory-slab.h
@@ -0,0 +1,450 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id:
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#ifndef __LIBRARY_MEMORY_C__
20# error "This header file must be included ONLY from memory.c."
21#endif
22#ifndef __LIBRARY_MEMORY_PAGE_H__
23# define __LIBRARY_MEMORY_PAGE_H__
24
25struct memory_free_block
26 {
27 struct memory_free_block
28 *link;
29 };
30
31///////////////////////////////////////////////////////////////////////////////
32// MEMORY SLAB :
33////////////////
34//
35//
36
37struct memory_slab
38 {
39 struct memory_slab
40 *less,*more;
41 unsigned int // left == number of free blocks left
42 left;
43 struct memory_free_block
44 *free;
45 };
46
47static inline struct memory_slab *push_slab (struct memory_slab *head,struct memory_slab *node)
48 {
49 node->less = head;
50 if (head)
51 {
52 node->more = head->more;
53 head->more = node;
54 }
55 else
56 node->more = 0;
57 return node;
58 }
59
60static inline struct memory_slab *pop_slab (struct memory_slab *head,struct memory_slab *node)
61 {
62 if (head)
63 head->more = node->more;
64 return node->more;
65 }
66
67static inline struct memory_slab *move_slab (struct memory_slab **from,struct memory_slab **to)
68 {
69 struct memory_slab *head = *from;
70 *from = (*from)->more;
71 if (*from)
72 (*from)->less = head->less;
73 head->less = 0;
74 head->more = (*to);
75 if (*to)
76 (*to)->prev = head;
77 *to = head;
78 return head;
79 }
80
81//
82///////////////////////////////////////////////////////////////////////////////
83
84///////////////////////////////////////////////////////////////////////////////
85// MEMORY CACHE :
86/////////////////
87//
88//
89
90struct memory_cache
91 {
92 struct memory_cache
93 *less,*more,*same;
94 unsigned int
95 left; // number of free slabs
96 struct memory_slab
97 *used;
98 struct memory_slab
99 *free;
100 struct memory_slab
101 *reap;
102 unsigned int
103 size,original_size;
104 unsigned int
105 page_size;
106 unsigned int
107 blocks_per_slab;
108 int
109 page_order;
110 unsigned int
111 flags;
112 };
113
114static struct memory_cache *cache_tree;
115
116static inline int get_order (unsigned size)
117 {
118 int order = 0;
119 size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block);
120 while (size > 0)
121 {
122 ++order; size <<= 1;
123 }
124 return order;
125 }
126
127static inline struct memory_slab *get_slab (struct memory_cache *cache,void *address)
128 {
129#ifdef TEST
130 return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab));
131#else
132 return (struct memory_slab *)((free_page + (((unsigned)address - free_page + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)));
133#endif
134 }
135
136static struct memory_cache *splay_cache (struct memory_cache *root,unsigned int left)
137 {
138 struct memory_cache *down;
139 struct memory_cache *less;
140 struct memory_cache *more;
141 struct memory_cache head;
142 head.less =
143 head.more = 0;
144 less =
145 more = &head;
146 while (1)
147 {
148 if (left < root->left)
149 {
150 if ((down = root->less))
151 {
152 if (left < down->left)
153 {
154 root->less = down->more;
155 down->more = root;
156 root = down;
157 if (!root->less)
158 break;
159 }
160 more->less = root;
161 more = root;
162 root = root->less;
163 continue;
164 }
165 break;
166 }
167 if (root->left < left)
168 {
169 if ((down = root->more))
170 {
171 if (root->left < left)
172 {
173 root->more = down->less;
174 down->less = root;
175 root = down;
176 if (!root->more)
177 break;
178 }
179 less->more = root;
180 less = root;
181 root = root->more;
182 continue;
183 }
184 }
185 break;
186 }
187 less->more = root->less;
188 more->less = root->more;
189 root->less = head.more;
190 root->more = head.less;
191 return root;
192 }
193
194static inline struct memory_cache *insert_cache (struct memory_cache *root,struct memory_cache *node)
195 {
196 node->less =
197 node->more =
198 node->same = 0;
199 if (root)
200 {
201 if (node->left == ((root = splay_cache (root,node))->left))
202 {
203 node->less = root.less;
204 node->more = root.more;
205 node->same = root;
206 root->less = node;
207 }
208 else if (node < root)
209 {
210 node->less = root->less;
211 node->more = root;
212 root->less = 0;
213 }
214 else
215 {
216 node->less = root;
217 node->more = root->more;
218 node->more = 0;
219 }
220 }
221 return node;
222 }
223
224static inline struct memory_cache *remove_cache (struct memory_cache *root,struct memory_cache *node)
225 {
226 if (root)
227 {
228 root = splay_cache (root,node);
229 if (root != node)
230 {
231 node->less->same = node->same;
232 if (node->same)
233 node->same->less = node->less;
234 return root;
235 }
236 if (root->less)
237 {
238 node = splay_page (root->less,node);
239 node->more = root->more;
240 }
241 else
242 node = root->more;
243 }
244 return root;
245 }
246
247static inline struct memory_cache *move_cache (struct memory_cache *root,struct memory_cache *node,int delta)
248 {
249 if ((root = remove_cache (root,node)))
250 {
251 node->left += delta;
252 root = insert_cache (root,node);
253 }
254 return root;
255 }
256
257//
258/////////////////////
259// PUBLIC FUNCTIONS :
260/////////////////////
261//
262// - memory_grow_cache : allocate a new slab for a cache
263// - memory_shrink_cache : release free slabs from a cache
264// - memory_create_cache : create a new cache of size-fixed blocks
265// - memory_destroy_cache : destroy the cache and release all the slabs
266// - memory_cache_allocate : allocate a block from the cache
267// - memory_cache_release : release a block in the cache
268//
269
270struct memory_slab *memory_grow_cache (struct memory_cache *cache)
271 {
272 struct memory_slab *slab;
273 unsigned int page;
274 if (cache)
275 {
276 page = (unsigned int)memory_allocate_page (cache->page_order);
277 if (page)
278 {
279 struct memory_free_block *block,**link;
280 slab = (struct memory_slab *)(page + cache->page_size - sizeof (struct memory_slab));
281 slab->free = 0;
282 slab->left = 0;
283 link = &slab->free;
284 for ((unsigned int)block = page;
285 (unsigned int)block + cache->size < (unsigned int)slab;
286 (unsigned int)block += cache->size)
287 {
288 *link = block;
289 link = &block->link;
290 ++slab->free;
291 }
292 *link = 0;
293 cache->blocks_per_slab = slab->free;
294 cache->reap = push_slab (cache->reap,slab);
295 cache_tree = move_cache (cache_tree,cache,+1);
296 return slab;
297 }
298 }
299 return MEMORY_RETURN_FAILURE;
300 }
301
302static int shrink_cache (struct memory_cache *cache,int all,int move)
303 {
304 struct memory_slab *slab;
305 unsigned int slabs = 0;
306 if (cache)
307 {
308 while ((slab = cache->reap))
309 {
310 ++slabs;
311 cache->reap = pop_slab (cache->reap,slab);
312 memory_release_page ((void *)slab);
313 if (all)
314 continue;
315 if (move)
316 cache_tree = move_cache (cache_tree,cache,-slabs);
317 return MEMORY_RETURN_SUCCESS;
318 }
319 }
320 return MEMORY_RETURN_FAILURE;
321 }
322
323int memory_shrink_cache (struct memory_cache *cache,int all)
324 {
325 return shrink_cache (cache,all,1 /* move cache in cache_tree */);
326 }
327
328struct memory_cache *memory_create_cache (unsigned int size,int align,int flags)
329 {
330 struct memory_cache *cache;
331 unsigned int waste = 0,blocks_per_page;
332 int page_order;
333 unsigned int page_size;
334 unsigned int original_size = size;
335
336 // Align size on 'align' bytes ('align' should equal 1<<n)
337 // if 'align' is inferior to 4, 32-bit word alignment is done by default.
338 size = (align > 4) ? ((size + align - 1) & -align) : ((size + sizeof (int) - 1) & -sizeof (int));
339 if (!(cache = memory_cache_allocate (&cache_cache))
340 return MEMORY_RETURN_FAILURE;
341
342 cache->flags =
343 cache->left = 0;
344
345 cache->used =
346 cache->free =
347 cache->reap = 0;
348
349 cache->original_size = original_size;
350 cache->size = size;
351
352 page_size = 0;
353 page_order = MEMORY_PAGE_MINIMAL_SIZE;;
354
355 // Trying to determine what is the best number of pages per slab
356 for (;; ++order,(page_size <<= 1))
357 {
358 if (page_order >= MEMORY_MAXIMUM_PAGE_ORDER_PER_SLAB)
359 {
360 memory_cache_release (&cache_cache,cache);
361 return MEMORY_RETURN_FAILURE;
362 }
363
364 waste = page_size;
365 waste -= sizeof (struct memory_slab);
366
367 blocks_per_slab = waste / size;
368 waste -= block_per_slab * size;
369
370 if (blocks_per_slab < MEMORY_MINIMUM_BLOCKS_PER_SLAB)
371 {
372 ++page_order; page_size <<= 1;
373 continue;
374 }
375
376 // below 3% of lost space is correct
377 if ((waste << 16) / page_size) < 1967)
378 break;
379 ++page_order; page_size <<= 1;
380 }
381
382 cache->page_size = page_size;
383 cache->page_order = page_order;
384
385 cache_tree = insert_cache (cache_tree,cache);
386
387 return cache;
388 }
389
390int memory_destroy_cache (struct memory_cache *cache)
391 {
392 /* FIX ME : this function shouldn't be called if there are still used blocks */
393 if (cache && !cache->free && !cache->used)
394 {
395 cache_tree = remove_cache (cache_tree,cache);
396 if (shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */))
397 return memory_cache_release (&cache_cache,cache);
398 }
399 return MEMORY_RETURN_FAILURE;
400 }
401
402void *memory_cache_allocate (struct memory_cache *cache)
403 {
404 if (cache)
405 {
406 do
407 {
408 struct memory_slab *slab;
409 if ((slab = cache->free))
410 {
411 if (slab->left > 0)
412 {
413ok: struct memory_free_block *block = slab->free;
414 slab->free = block->link;
415 if (--slab->left == 0)
416 move_slab (&cache->free,&cache->used);
417 return block;
418 }
419 }
420 if (cache->reap)
421 {
422 slab = move_slab (&cache->reap,&cache->free);
423 cache_tree = move_cache (cache_tree,cache,-1);
424 goto ok;
425 }
426 }
427 while (grow_cache (cache));
428 }
429 return MEMORY_RETURN_FAILURE;
430 }
431
432int memory_cache_release (struct memory_cache *cache,void *address)
433 {
434 struct memory_slab *slab = get_slab (cache,address);
435 ((struct memory_free_block *)address)->link = slab->free;
436 slab->free = (struct memory_free_block *)address;
437 if (slab->left++ == 0)
438 move_slab (&cache->used,&cache->free);
439 else if (slab->left == cache->blocks_per_slab)
440 {
441 move_slab (&cache->free,&cache->reap);
442 cache_tree = move_cache (cache_tree,cache,+1);
443 }
444 return MEMORY_RETURN_SUCCESS;
445 }
446
447//
448///////////////////////////////////////////////////////////////////////////////
449
450#endif
diff --git a/firmware/test/memory/memory.c b/firmware/test/memory/memory.c
new file mode 100644
index 0000000000..19944d5fb4
--- /dev/null
+++ b/firmware/test/memory/memory.c
@@ -0,0 +1,50 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id:
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#define __LIBRARY_MEMORY_C__
20#include <memory.h>
21#include "memory-page.h"
22#if 0
23# include "memory-slab.h"
24# include "memory-block.h"
25#endif
26
27/* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */
28void memory_copy (void *target,void const *source,unsigned int count)
29 {
30 while (count--)
31 *((char *)target)++ = *((char const *)source)++;
32 }
33
34/* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */
35void memory_set (void *target,int byte,unsigned int count)
36 {
37 while (count--)
38 *((char *)target)++ = (char)byte;
39 }
40
41void memory_setup (void)
42 {
43#if 1
44 memory_set (free_page,0,MEMORY_TOTAL_BYTES);
45 memory_set (free_page_bin,0,MEMORY_TOTAL_ORDERS *sizeof (struct memory_free_page *));
46 memory_set (free_page_order + 1,0,MEMORY_TOTAL_PAGES);
47#endif
48 free_page_order[0] = MEMORY_TOTAL_ORDERS - 1;
49 free_page_bin[MEMORY_TOTAL_ORDERS - 1] = free_page;
50 }