summaryrefslogtreecommitdiff
path: root/firmware/test/memory/memory-slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/test/memory/memory-slab.h')
-rw-r--r--firmware/test/memory/memory-slab.h450
1 files changed, 450 insertions, 0 deletions
diff --git a/firmware/test/memory/memory-slab.h b/firmware/test/memory/memory-slab.h
new file mode 100644
index 0000000000..29dc609b47
--- /dev/null
+++ b/firmware/test/memory/memory-slab.h
@@ -0,0 +1,450 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id:
9 *
10 * Copyright (C) 2002 by Alan Korr
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#ifndef __LIBRARY_MEMORY_C__
20# error "This header file must be included ONLY from memory.c."
21#endif
22#ifndef __LIBRARY_MEMORY_PAGE_H__
23# define __LIBRARY_MEMORY_PAGE_H__
24
25struct memory_free_block
26 {
27 struct memory_free_block
28 *link;
29 };
30
31///////////////////////////////////////////////////////////////////////////////
32// MEMORY SLAB :
33////////////////
34//
35//
36
37struct memory_slab
38 {
39 struct memory_slab
40 *less,*more;
41 unsigned int // left == number of free blocks left
42 left;
43 struct memory_free_block
44 *free;
45 };
46
47static inline struct memory_slab *push_slab (struct memory_slab *head,struct memory_slab *node)
48 {
49 node->less = head;
50 if (head)
51 {
52 node->more = head->more;
53 head->more = node;
54 }
55 else
56 node->more = 0;
57 return node;
58 }
59
60static inline struct memory_slab *pop_slab (struct memory_slab *head,struct memory_slab *node)
61 {
62 if (head)
63 head->more = node->more;
64 return node->more;
65 }
66
67static inline struct memory_slab *move_slab (struct memory_slab **from,struct memory_slab **to)
68 {
69 struct memory_slab *head = *from;
70 *from = (*from)->more;
71 if (*from)
72 (*from)->less = head->less;
73 head->less = 0;
74 head->more = (*to);
75 if (*to)
76 (*to)->prev = head;
77 *to = head;
78 return head;
79 }
80
81//
82///////////////////////////////////////////////////////////////////////////////
83
84///////////////////////////////////////////////////////////////////////////////
85// MEMORY CACHE :
86/////////////////
87//
88//
89
90struct memory_cache
91 {
92 struct memory_cache
93 *less,*more,*same;
94 unsigned int
95 left; // number of free slabs
96 struct memory_slab
97 *used;
98 struct memory_slab
99 *free;
100 struct memory_slab
101 *reap;
102 unsigned int
103 size,original_size;
104 unsigned int
105 page_size;
106 unsigned int
107 blocks_per_slab;
108 int
109 page_order;
110 unsigned int
111 flags;
112 };
113
114static struct memory_cache *cache_tree;
115
116static inline int get_order (unsigned size)
117 {
118 int order = 0;
119 size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block);
120 while (size > 0)
121 {
122 ++order; size <<= 1;
123 }
124 return order;
125 }
126
127static inline struct memory_slab *get_slab (struct memory_cache *cache,void *address)
128 {
129#ifdef TEST
130 return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab));
131#else
132 return (struct memory_slab *)((free_page + (((unsigned)address - free_page + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)));
133#endif
134 }
135
136static struct memory_cache *splay_cache (struct memory_cache *root,unsigned int left)
137 {
138 struct memory_cache *down;
139 struct memory_cache *less;
140 struct memory_cache *more;
141 struct memory_cache head;
142 head.less =
143 head.more = 0;
144 less =
145 more = &head;
146 while (1)
147 {
148 if (left < root->left)
149 {
150 if ((down = root->less))
151 {
152 if (left < down->left)
153 {
154 root->less = down->more;
155 down->more = root;
156 root = down;
157 if (!root->less)
158 break;
159 }
160 more->less = root;
161 more = root;
162 root = root->less;
163 continue;
164 }
165 break;
166 }
167 if (root->left < left)
168 {
169 if ((down = root->more))
170 {
171 if (root->left < left)
172 {
173 root->more = down->less;
174 down->less = root;
175 root = down;
176 if (!root->more)
177 break;
178 }
179 less->more = root;
180 less = root;
181 root = root->more;
182 continue;
183 }
184 }
185 break;
186 }
187 less->more = root->less;
188 more->less = root->more;
189 root->less = head.more;
190 root->more = head.less;
191 return root;
192 }
193
194static inline struct memory_cache *insert_cache (struct memory_cache *root,struct memory_cache *node)
195 {
196 node->less =
197 node->more =
198 node->same = 0;
199 if (root)
200 {
201 if (node->left == ((root = splay_cache (root,node))->left))
202 {
203 node->less = root.less;
204 node->more = root.more;
205 node->same = root;
206 root->less = node;
207 }
208 else if (node < root)
209 {
210 node->less = root->less;
211 node->more = root;
212 root->less = 0;
213 }
214 else
215 {
216 node->less = root;
217 node->more = root->more;
218 node->more = 0;
219 }
220 }
221 return node;
222 }
223
224static inline struct memory_cache *remove_cache (struct memory_cache *root,struct memory_cache *node)
225 {
226 if (root)
227 {
228 root = splay_cache (root,node);
229 if (root != node)
230 {
231 node->less->same = node->same;
232 if (node->same)
233 node->same->less = node->less;
234 return root;
235 }
236 if (root->less)
237 {
238 node = splay_page (root->less,node);
239 node->more = root->more;
240 }
241 else
242 node = root->more;
243 }
244 return root;
245 }
246
247static inline struct memory_cache *move_cache (struct memory_cache *root,struct memory_cache *node,int delta)
248 {
249 if ((root = remove_cache (root,node)))
250 {
251 node->left += delta;
252 root = insert_cache (root,node);
253 }
254 return root;
255 }
256
257//
258/////////////////////
259// PUBLIC FUNCTIONS :
260/////////////////////
261//
262// - memory_grow_cache : allocate a new slab for a cache
263// - memory_shrink_cache : release free slabs from a cache
264// - memory_create_cache : create a new cache of size-fixed blocks
265// - memory_destroy_cache : destroy the cache and release all the slabs
266// - memory_cache_allocate : allocate a block from the cache
267// - memory_cache_release : release a block in the cache
268//
269
270struct memory_slab *memory_grow_cache (struct memory_cache *cache)
271 {
272 struct memory_slab *slab;
273 unsigned int page;
274 if (cache)
275 {
276 page = (unsigned int)memory_allocate_page (cache->page_order);
277 if (page)
278 {
279 struct memory_free_block *block,**link;
280 slab = (struct memory_slab *)(page + cache->page_size - sizeof (struct memory_slab));
281 slab->free = 0;
282 slab->left = 0;
283 link = &slab->free;
284 for ((unsigned int)block = page;
285 (unsigned int)block + cache->size < (unsigned int)slab;
286 (unsigned int)block += cache->size)
287 {
288 *link = block;
289 link = &block->link;
290 ++slab->free;
291 }
292 *link = 0;
293 cache->blocks_per_slab = slab->free;
294 cache->reap = push_slab (cache->reap,slab);
295 cache_tree = move_cache (cache_tree,cache,+1);
296 return slab;
297 }
298 }
299 return MEMORY_RETURN_FAILURE;
300 }
301
302static int shrink_cache (struct memory_cache *cache,int all,int move)
303 {
304 struct memory_slab *slab;
305 unsigned int slabs = 0;
306 if (cache)
307 {
308 while ((slab = cache->reap))
309 {
310 ++slabs;
311 cache->reap = pop_slab (cache->reap,slab);
312 memory_release_page ((void *)slab);
313 if (all)
314 continue;
315 if (move)
316 cache_tree = move_cache (cache_tree,cache,-slabs);
317 return MEMORY_RETURN_SUCCESS;
318 }
319 }
320 return MEMORY_RETURN_FAILURE;
321 }
322
323int memory_shrink_cache (struct memory_cache *cache,int all)
324 {
325 return shrink_cache (cache,all,1 /* move cache in cache_tree */);
326 }
327
328struct memory_cache *memory_create_cache (unsigned int size,int align,int flags)
329 {
330 struct memory_cache *cache;
331 unsigned int waste = 0,blocks_per_page;
332 int page_order;
333 unsigned int page_size;
334 unsigned int original_size = size;
335
336 // Align size on 'align' bytes ('align' should equal 1<<n)
337 // if 'align' is inferior to 4, 32-bit word alignment is done by default.
338 size = (align > 4) ? ((size + align - 1) & -align) : ((size + sizeof (int) - 1) & -sizeof (int));
339 if (!(cache = memory_cache_allocate (&cache_cache))
340 return MEMORY_RETURN_FAILURE;
341
342 cache->flags =
343 cache->left = 0;
344
345 cache->used =
346 cache->free =
347 cache->reap = 0;
348
349 cache->original_size = original_size;
350 cache->size = size;
351
352 page_size = 0;
353 page_order = MEMORY_PAGE_MINIMAL_SIZE;;
354
355 // Trying to determine what is the best number of pages per slab
356 for (;; ++order,(page_size <<= 1))
357 {
358 if (page_order >= MEMORY_MAXIMUM_PAGE_ORDER_PER_SLAB)
359 {
360 memory_cache_release (&cache_cache,cache);
361 return MEMORY_RETURN_FAILURE;
362 }
363
364 waste = page_size;
365 waste -= sizeof (struct memory_slab);
366
367 blocks_per_slab = waste / size;
368 waste -= block_per_slab * size;
369
370 if (blocks_per_slab < MEMORY_MINIMUM_BLOCKS_PER_SLAB)
371 {
372 ++page_order; page_size <<= 1;
373 continue;
374 }
375
376 // below 3% of lost space is correct
377 if ((waste << 16) / page_size) < 1967)
378 break;
379 ++page_order; page_size <<= 1;
380 }
381
382 cache->page_size = page_size;
383 cache->page_order = page_order;
384
385 cache_tree = insert_cache (cache_tree,cache);
386
387 return cache;
388 }
389
390int memory_destroy_cache (struct memory_cache *cache)
391 {
392 /* FIX ME : this function shouldn't be called if there are still used blocks */
393 if (cache && !cache->free && !cache->used)
394 {
395 cache_tree = remove_cache (cache_tree,cache);
396 if (shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */))
397 return memory_cache_release (&cache_cache,cache);
398 }
399 return MEMORY_RETURN_FAILURE;
400 }
401
402void *memory_cache_allocate (struct memory_cache *cache)
403 {
404 if (cache)
405 {
406 do
407 {
408 struct memory_slab *slab;
409 if ((slab = cache->free))
410 {
411 if (slab->left > 0)
412 {
413ok: struct memory_free_block *block = slab->free;
414 slab->free = block->link;
415 if (--slab->left == 0)
416 move_slab (&cache->free,&cache->used);
417 return block;
418 }
419 }
420 if (cache->reap)
421 {
422 slab = move_slab (&cache->reap,&cache->free);
423 cache_tree = move_cache (cache_tree,cache,-1);
424 goto ok;
425 }
426 }
427 while (grow_cache (cache));
428 }
429 return MEMORY_RETURN_FAILURE;
430 }
431
432int memory_cache_release (struct memory_cache *cache,void *address)
433 {
434 struct memory_slab *slab = get_slab (cache,address);
435 ((struct memory_free_block *)address)->link = slab->free;
436 slab->free = (struct memory_free_block *)address;
437 if (slab->left++ == 0)
438 move_slab (&cache->used,&cache->free);
439 else if (slab->left == cache->blocks_per_slab)
440 {
441 move_slab (&cache->free,&cache->reap);
442 cache_tree = move_cache (cache_tree,cache,+1);
443 }
444 return MEMORY_RETURN_SUCCESS;
445 }
446
447//
448///////////////////////////////////////////////////////////////////////////////
449
450#endif