summaryrefslogtreecommitdiff
path: root/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c
diff options
context:
space:
mode:
Diffstat (limited to 'apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c')
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c995
1 files changed, 995 insertions, 0 deletions
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c
new file mode 100644
index 0000000000..47b461bac5
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c
@@ -0,0 +1,995 @@
1/*
2 * Two Levels Segregate Fit memory allocator (TLSF)
3 * Version 2.4.4
4 *
5 * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
6 *
7 * Thanks to Ismael Ripoll for his suggestions and reviews
8 *
9 * Copyright (C) 2008, 2007, 2006, 2005, 2004
10 *
11 * This code is released using a dual license strategy: GPL/LGPL
12 * You can choose the licence that better fits your requirements.
13 *
14 * Released under the terms of the GNU General Public License Version 2.0
15 * Released under the terms of the GNU Lesser General Public License Version 2.1
16 *
17 */
18
19/*
20 * Code contributions:
21 *
22 * (Jul 28 2007) Herman ten Brugge <hermantenbrugge@home.nl>:
23 *
24 * - Add 64 bit support. It now runs on x86_64 and solaris64.
25 * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
26 * - Remove assembly code. I could not measure any performance difference
27 * on my core2 processor. This also makes the code more portable.
28 * - Moved defines/typedefs from tlsf.h to tlsf.c
29 * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to
30 * (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact
31 * that the minumum size is still sizeof
32 * (bhdr_t).
33 * - Changed all C++ comment style to C style. (// -> /.* ... *./)
34 * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to
35 * avoid confusion with the standard ffs function which returns
36 * different values.
37 * - Created set_bit/clear_bit fuctions because they are not present
38 * on x86_64.
39 * - Added locking support + extra file target.h to show how to use it.
40 * - Added get_used_size function (REMOVED in 2.4)
41 * - Added rtl_realloc and rtl_calloc function
42 * - Implemented realloc clever support.
43 * - Added some test code in the example directory.
44 *
45 *
46 * (Oct 23 2006) Adam Scislowicz:
47 *
48 * - Support for ARMv5 implemented
49 *
50 */
51
52/*#define USE_SBRK (0) */
53/*#define USE_MMAP (0) */
54
55#include <stdio.h>
56#include <string.h>
57
58#ifndef TLSF_USE_LOCKS
59#define TLSF_USE_LOCKS (0)
60#endif
61
62#ifndef TLSF_STATISTIC
63#define TLSF_STATISTIC (0)
64#endif
65
66#ifndef USE_MMAP
67#define USE_MMAP (0)
68#endif
69
70#ifndef USE_SBRK
71#define USE_SBRK (0)
72#endif
73
74
75#if TLSF_USE_LOCKS
76#include "target.h"
77#else
78#define TLSF_CREATE_LOCK(_unused_) do{}while(0)
79#define TLSF_DESTROY_LOCK(_unused_) do{}while(0)
80#define TLSF_ACQUIRE_LOCK(_unused_) do{}while(0)
81#define TLSF_RELEASE_LOCK(_unused_) do{}while(0)
82#endif
83
84#if TLSF_STATISTIC
85#define TLSF_ADD_SIZE(tlsf, b) do { \
86 tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
87 if (tlsf->used_size > tlsf->max_size) \
88 tlsf->max_size = tlsf->used_size; \
89 } while(0)
90
91#define TLSF_REMOVE_SIZE(tlsf, b) do { \
92 tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
93 } while(0)
94#else
95#define TLSF_ADD_SIZE(tlsf, b) do{}while(0)
96#define TLSF_REMOVE_SIZE(tlsf, b) do{}while(0)
97#endif
98
99#if USE_MMAP || USE_SBRK
100#include <unistd.h>
101#endif
102
103#if USE_MMAP
104#include <sys/mman.h>
105#endif
106
107#include "tlsf.h"
108
109#if !defined(__GNUC__)
110#ifndef __inline__
111#define __inline__
112#endif
113#endif
114
115/* The debug functions only can be used when _DEBUG_TLSF_ is set. */
116#ifndef _DEBUG_TLSF_
117#define _DEBUG_TLSF_ (0)
118#endif
119
120/*************************************************************************/
121/* Definition of the structures used by TLSF */
122
123
124/* Some IMPORTANT TLSF parameters */
125/* Unlike the preview TLSF versions, now they are statics */
126#define BLOCK_ALIGN (sizeof(void *) * 2)
127
128#define MAX_FLI (30)
129#define MAX_LOG2_SLI (5)
130#define MAX_SLI (1 << MAX_LOG2_SLI) /* MAX_SLI = 2^MAX_LOG2_SLI */
131
132#define FLI_OFFSET (6) /* tlsf structure just will manage blocks bigger */
133/* than 128 bytes */
134#define SMALL_BLOCK (128)
135#define REAL_FLI (MAX_FLI - FLI_OFFSET)
136#define MIN_BLOCK_SIZE (sizeof (free_ptr_t))
137#define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE)
138#define TLSF_SIGNATURE (0x2A59FA59)
139
140#define PTR_MASK (sizeof(void *) - 1)
141#define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK)
142
143#define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
144#define MEM_ALIGN ((BLOCK_ALIGN) - 1)
145#define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
146#define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN)
147#define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x))
148
149#define BLOCK_STATE (0x1)
150#define PREV_STATE (0x2)
151
152/* bit 0 of the block size */
153#define FREE_BLOCK (0x1)
154#define USED_BLOCK (0x0)
155
156/* bit 1 of the block size */
157#define PREV_FREE (0x2)
158#define PREV_USED (0x0)
159
160
161#define DEFAULT_AREA_SIZE (1024*10)
162
163#ifdef USE_MMAP
164#define PAGE_SIZE (getpagesize())
165#endif
166
167#define PRINT_MSG(fmt, args...) printf(fmt, ## args)
168#define ERROR_MSG(fmt, args...) printf(fmt, ## args)
169
170typedef unsigned int u32_t; /* NOTE: Make sure that this type is 4 bytes long on your computer */
171typedef unsigned char u8_t; /* NOTE: Make sure that this type is 1 byte on your computer */
172
173typedef struct free_ptr_struct {
174 struct bhdr_struct *prev;
175 struct bhdr_struct *next;
176} free_ptr_t;
177
178typedef struct bhdr_struct {
179 /* This pointer is just valid if the first bit of size is set */
180 struct bhdr_struct *prev_hdr;
181 /* The size is stored in bytes */
182 size_t size; /* bit 0 indicates whether the block is used and */
183 /* bit 1 allows to know whether the previous block is free */
184 union {
185 struct free_ptr_struct free_ptr;
186 u8_t buffer[1]; /*sizeof(struct free_ptr_struct)]; */
187 } ptr;
188} bhdr_t;
189
190/* This structure is embedded at the beginning of each area, giving us
191 * enough information to cope with a set of areas */
192
193typedef struct area_info_struct {
194 bhdr_t *end;
195 struct area_info_struct *next;
196} area_info_t;
197
198typedef struct TLSF_struct {
199 /* the TLSF's structure signature */
200 u32_t tlsf_signature;
201
202#if TLSF_USE_LOCKS
203 TLSF_MLOCK_T lock;
204#endif
205
206#if TLSF_STATISTIC
207 /* These can not be calculated outside tlsf because we
208 * do not know the sizes when freeing/reallocing memory. */
209 size_t used_size;
210 size_t max_size;
211#endif
212
213 /* A linked list holding all the existing areas */
214 area_info_t *area_head;
215
216 /* the first-level bitmap */
217 /* This array should have a size of REAL_FLI bits */
218 u32_t fl_bitmap;
219
220 /* the second-level bitmap */
221 u32_t sl_bitmap[REAL_FLI];
222
223 bhdr_t *matrix[REAL_FLI][MAX_SLI];
224} tlsf_t;
225
226
227/******************************************************************/
228/************** Helping functions **************************/
229/******************************************************************/
230static __inline__ void set_bit(int nr, u32_t * addr);
231static __inline__ void clear_bit(int nr, u32_t * addr);
232static __inline__ int ls_bit(int x);
233static __inline__ int ms_bit(int x);
234static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
235static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
236static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
237static __inline__ bhdr_t *process_area(void *area, size_t size);
238#if USE_SBRK || USE_MMAP
239static __inline__ void *get_new_area(size_t * size);
240#endif
241
242static const int table[] = {
243 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
244 4, 4,
245 4, 4, 4, 4, 4, 4, 4,
246 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
247 5,
248 5, 5, 5, 5, 5, 5, 5,
249 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
250 6,
251 6, 6, 6, 6, 6, 6, 6,
252 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
253 6,
254 6, 6, 6, 6, 6, 6, 6,
255 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
256 7,
257 7, 7, 7, 7, 7, 7, 7,
258 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
259 7,
260 7, 7, 7, 7, 7, 7, 7,
261 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
262 7,
263 7, 7, 7, 7, 7, 7, 7,
264 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
265 7,
266 7, 7, 7, 7, 7, 7, 7
267};
268
269static __inline__ int ls_bit(int i)
270{
271 unsigned int a;
272 unsigned int x = i & -i;
273
274 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
275 return table[x >> a] + a;
276}
277
278static __inline__ int ms_bit(int i)
279{
280 unsigned int a;
281 unsigned int x = (unsigned int) i;
282
283 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
284 return table[x >> a] + a;
285}
286
287static __inline__ void set_bit(int nr, u32_t * addr)
288{
289 addr[nr >> 5] |= 1 << (nr & 0x1f);
290}
291
292static __inline__ void clear_bit(int nr, u32_t * addr)
293{
294 addr[nr >> 5] &= ~(1 << (nr & 0x1f));
295}
296
297static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
298{
299 int _t;
300
301 if (*_r < SMALL_BLOCK) {
302 *_fl = 0;
303 *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
304 } else {
305 _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
306 *_r = *_r + _t;
307 *_fl = ms_bit(*_r);
308 *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
309 *_fl -= FLI_OFFSET;
310 /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
311 *_fl = *_sl = 0;
312 */
313 *_r &= ~_t;
314 }
315}
316
317static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
318{
319 if (_r < SMALL_BLOCK) {
320 *_fl = 0;
321 *_sl = _r / (SMALL_BLOCK / MAX_SLI);
322 } else {
323 *_fl = ms_bit(_r);
324 *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
325 *_fl -= FLI_OFFSET;
326 }
327}
328
329
330static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
331{
332 u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
333 bhdr_t *_b = NULL;
334
335 if (_tmp) {
336 *_sl = ls_bit(_tmp);
337 _b = _tlsf->matrix[*_fl][*_sl];
338 } else {
339 *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
340 if (*_fl > 0) { /* likely */
341 *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
342 _b = _tlsf->matrix[*_fl][*_sl];
343 }
344 }
345 return _b;
346}
347
348
349#define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do { \
350 _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \
351 if (_tlsf -> matrix[_fl][_sl]) \
352 _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \
353 else { \
354 clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
355 if (!_tlsf -> sl_bitmap [_fl]) \
356 clear_bit (_fl, &_tlsf -> fl_bitmap); \
357 } \
358 _b -> ptr.free_ptr.prev = NULL; \
359 _b -> ptr.free_ptr.next = NULL; \
360 }while(0)
361
362
363#define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do { \
364 if (_b -> ptr.free_ptr.next) \
365 _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
366 if (_b -> ptr.free_ptr.prev) \
367 _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
368 if (_tlsf -> matrix [_fl][_sl] == _b) { \
369 _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \
370 if (!_tlsf -> matrix [_fl][_sl]) { \
371 clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \
372 if (!_tlsf -> sl_bitmap [_fl]) \
373 clear_bit (_fl, &_tlsf -> fl_bitmap); \
374 } \
375 } \
376 _b -> ptr.free_ptr.prev = NULL; \
377 _b -> ptr.free_ptr.next = NULL; \
378 } while(0)
379
380#define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do { \
381 _b -> ptr.free_ptr.prev = NULL; \
382 _b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \
383 if (_tlsf -> matrix [_fl][_sl]) \
384 _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \
385 _tlsf -> matrix [_fl][_sl] = _b; \
386 set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
387 set_bit (_fl, &_tlsf -> fl_bitmap); \
388 } while(0)
389
390#if USE_SBRK || USE_MMAP
391static __inline__ void *get_new_area(size_t * size)
392{
393 void *area;
394
395#if USE_SBRK
396 area = (void *)sbrk(0);
397 if (((void *)sbrk(*size)) != ((void *) -1))
398 return area;
399#endif
400
401#if USE_MMAP
402 *size = ROUNDUP(*size, PAGE_SIZE);
403 if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
404 return area;
405#endif
406 return ((void *) ~0);
407}
408#endif
409
410static __inline__ bhdr_t *process_area(void *area, size_t size)
411{
412 bhdr_t *b, *lb, *ib;
413 area_info_t *ai;
414
415 ib = (bhdr_t *) area;
416 ib->size =
417 (sizeof(area_info_t) <
418 MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
419 b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
420 b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
421 b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
422 lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
423 lb->prev_hdr = b;
424 lb->size = 0 | USED_BLOCK | PREV_FREE;
425 ai = (area_info_t *) ib->ptr.buffer;
426 ai->next = 0;
427 ai->end = lb;
428 return ib;
429}
430
431/******************************************************************/
432/******************** Begin of the allocator code *****************/
433/******************************************************************/
434
435static char *mp = NULL; /* Default memory pool. */
436
437/******************************************************************/
438size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
439{
440/******************************************************************/
441 tlsf_t *tlsf;
442 bhdr_t *b, *ib;
443
444 if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
445 ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
446 return -1;
447 }
448
449 if (((unsigned long) mem_pool & PTR_MASK)) {
450 ERROR_MSG("init_memory_pool (): mem_pool must be aligned to a word\n");
451 return -1;
452 }
453 tlsf = (tlsf_t *) mem_pool;
454 /* Check if already initialised */
455 if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
456 mp = mem_pool;
457 b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(sizeof(tlsf_t)));
458 return b->size & BLOCK_SIZE;
459 }
460
461 mp = mem_pool;
462
463 /* Zeroing the memory pool */
464 memset(mem_pool, 0, sizeof(tlsf_t));
465
466 tlsf->tlsf_signature = TLSF_SIGNATURE;
467
468 TLSF_CREATE_LOCK(&tlsf->lock);
469
470 ib = process_area(GET_NEXT_BLOCK
471 (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
472 b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
473 free_ex(b->ptr.buffer, tlsf);
474 tlsf->area_head = (area_info_t *) ib->ptr.buffer;
475
476#if TLSF_STATISTIC
477 tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
478 tlsf->max_size = tlsf->used_size;
479#endif
480
481 return (b->size & BLOCK_SIZE);
482}
483
484/******************************************************************/
485size_t add_new_area(void *area, size_t area_size, void *mem_pool)
486{
487/******************************************************************/
488 tlsf_t *tlsf = (tlsf_t *) mem_pool;
489 area_info_t *ptr, *ptr_prev, *ai;
490 bhdr_t *ib0, *b0, *lb0, *ib1, *b1, *lb1, *next_b;
491
492 memset(area, 0, area_size);
493 ptr = tlsf->area_head;
494 ptr_prev = 0;
495
496 ib0 = process_area(area, area_size);
497 b0 = GET_NEXT_BLOCK(ib0->ptr.buffer, ib0->size & BLOCK_SIZE);
498 lb0 = GET_NEXT_BLOCK(b0->ptr.buffer, b0->size & BLOCK_SIZE);
499
500 /* Before inserting the new area, we have to merge this area with the
501 already existing ones */
502
503 while (ptr) {
504 ib1 = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
505 b1 = GET_NEXT_BLOCK(ib1->ptr.buffer, ib1->size & BLOCK_SIZE);
506 lb1 = ptr->end;
507
508 /* Merging the new area with the next physically contigous one */
509 if ((unsigned long) ib1 == (unsigned long) lb0 + BHDR_OVERHEAD) {
510 if (tlsf->area_head == ptr) {
511 tlsf->area_head = ptr->next;
512 ptr = ptr->next;
513 } else {
514 ptr_prev->next = ptr->next;
515 ptr = ptr->next;
516 }
517
518 b0->size =
519 ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
520 (ib1->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | PREV_USED;
521
522 b1->prev_hdr = b0;
523 lb0 = lb1;
524
525 continue;
526 }
527
528 /* Merging the new area with the previous physically contigous
529 one */
530 if ((unsigned long) lb1->ptr.buffer == (unsigned long) ib0) {
531 if (tlsf->area_head == ptr) {
532 tlsf->area_head = ptr->next;
533 ptr = ptr->next;
534 } else {
535 ptr_prev->next = ptr->next;
536 ptr = ptr->next;
537 }
538
539 lb1->size =
540 ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
541 (ib0->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | (lb1->size & PREV_STATE);
542 next_b = GET_NEXT_BLOCK(lb1->ptr.buffer, lb1->size & BLOCK_SIZE);
543 next_b->prev_hdr = lb1;
544 b0 = lb1;
545 ib0 = ib1;
546
547 continue;
548 }
549 ptr_prev = ptr;
550 ptr = ptr->next;
551 }
552
553 /* Inserting the area in the list of linked areas */
554 ai = (area_info_t *) ib0->ptr.buffer;
555 ai->next = tlsf->area_head;
556 ai->end = lb0;
557 tlsf->area_head = ai;
558 free_ex(b0->ptr.buffer, mem_pool);
559 return (b0->size & BLOCK_SIZE);
560}
561
562
563/******************************************************************/
564size_t get_used_size(void *mem_pool)
565{
566/******************************************************************/
567#if TLSF_STATISTIC
568 return ((tlsf_t *) mem_pool)->used_size;
569#else
570 return 0;
571#endif
572}
573
574/******************************************************************/
575size_t get_max_size(void *mem_pool)
576{
577/******************************************************************/
578#if TLSF_STATISTIC
579 return ((tlsf_t *) mem_pool)->max_size;
580#else
581 return 0;
582#endif
583}
584
585/******************************************************************/
586void destroy_memory_pool(void *mem_pool)
587{
588/******************************************************************/
589 tlsf_t *tlsf = (tlsf_t *) mem_pool;
590
591 tlsf->tlsf_signature = 0;
592
593 TLSF_DESTROY_LOCK(&tlsf->lock);
594
595}
596
597
598/******************************************************************/
599void *tlsf_malloc(size_t size)
600{
601/******************************************************************/
602 void *ret;
603
604#if USE_MMAP || USE_SBRK
605 if (!mp) {
606 size_t area_size;
607 void *area;
608
609 area_size = sizeof(tlsf_t) + BHDR_OVERHEAD * 8; /* Just a safety constant */
610 area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
611 area = get_new_area(&area_size);
612 if (area == ((void *) ~0))
613 return NULL; /* Not enough system memory */
614 init_memory_pool(area_size, area);
615 }
616#endif
617
618 TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
619
620 ret = malloc_ex(size, mp);
621
622 TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
623
624 return ret;
625}
626
627/******************************************************************/
628void tlsf_free(void *ptr)
629{
630/******************************************************************/
631
632 TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
633
634 free_ex(ptr, mp);
635
636 TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
637
638}
639
640/******************************************************************/
641void *tlsf_realloc(void *ptr, size_t size)
642{
643/******************************************************************/
644 void *ret;
645
646#if USE_MMAP || USE_SBRK
647 if (!mp) {
648 return tlsf_malloc(size);
649 }
650#endif
651
652 TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
653
654 ret = realloc_ex(ptr, size, mp);
655
656 TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
657
658 return ret;
659}
660
661/******************************************************************/
662void *tlsf_calloc(size_t nelem, size_t elem_size)
663{
664/******************************************************************/
665 void *ret;
666
667 TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
668
669 ret = calloc_ex(nelem, elem_size, mp);
670
671 TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
672
673 return ret;
674}
675
676/******************************************************************/
677void *malloc_ex(size_t size, void *mem_pool)
678{
679/******************************************************************/
680 tlsf_t *tlsf = (tlsf_t *) mem_pool;
681 bhdr_t *b, *b2, *next_b;
682 int fl, sl;
683 size_t tmp_size;
684
685 size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
686
687 /* Rounding up the requested size and calculating fl and sl */
688 MAPPING_SEARCH(&size, &fl, &sl);
689
690 /* Searching a free block, recall that this function changes the values of fl and sl,
691 so they are not longer valid when the function fails */
692 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
693
694#if USE_MMAP || USE_SBRK
695 if (!b) {
696 size_t area_size;
697 void *area;
698 /* Growing the pool size when needed */
699 area_size = size + BHDR_OVERHEAD * 8; /* size plus enough room for the requered headers. */
700 area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
701 area = get_new_area(&area_size); /* Call sbrk or mmap */
702 if (area == ((void *) ~0))
703 return NULL; /* Not enough system memory */
704 add_new_area(area, area_size, mem_pool);
705 /* Rounding up the requested size and calculating fl and sl */
706 MAPPING_SEARCH(&size, &fl, &sl);
707 /* Searching a free block */
708 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
709 }
710#endif
711 if (!b)
712 return NULL; /* Not found */
713
714 EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
715
716 /*-- found: */
717 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
718 /* Should the block be split? */
719 tmp_size = (b->size & BLOCK_SIZE) - size;
720 if (tmp_size >= sizeof(bhdr_t)) {
721 tmp_size -= BHDR_OVERHEAD;
722 b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
723 b2->size = tmp_size | FREE_BLOCK | PREV_USED;
724 next_b->prev_hdr = b2;
725 MAPPING_INSERT(tmp_size, &fl, &sl);
726 INSERT_BLOCK(b2, tlsf, fl, sl);
727
728 b->size = size | (b->size & PREV_STATE);
729 } else {
730 next_b->size &= (~PREV_FREE);
731 b->size &= (~FREE_BLOCK); /* Now it's used */
732 }
733
734 TLSF_ADD_SIZE(tlsf, b);
735
736 return (void *) b->ptr.buffer;
737}
738
739/******************************************************************/
740void free_ex(void *ptr, void *mem_pool)
741{
742/******************************************************************/
743 tlsf_t *tlsf = (tlsf_t *) mem_pool;
744 bhdr_t *b, *tmp_b;
745 int fl = 0, sl = 0;
746
747 if (!ptr) {
748 return;
749 }
750 b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
751 b->size |= FREE_BLOCK;
752
753 TLSF_REMOVE_SIZE(tlsf, b);
754
755 b->ptr.free_ptr.prev = NULL;
756 b->ptr.free_ptr.next = NULL;
757 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
758 if (tmp_b->size & FREE_BLOCK) {
759 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
760 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
761 b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
762 }
763 if (b->size & PREV_FREE) {
764 tmp_b = b->prev_hdr;
765 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
766 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
767 tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
768 b = tmp_b;
769 }
770 MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
771 INSERT_BLOCK(b, tlsf, fl, sl);
772
773 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
774 tmp_b->size |= PREV_FREE;
775 tmp_b->prev_hdr = b;
776}
777
778/******************************************************************/
779void *realloc_ex(void *ptr, size_t new_size, void *mem_pool)
780{
781/******************************************************************/
782 tlsf_t *tlsf = (tlsf_t *) mem_pool;
783 void *ptr_aux;
784 unsigned int cpsize;
785 bhdr_t *b, *tmp_b, *next_b;
786 int fl, sl;
787 size_t tmp_size;
788
789 if (!ptr) {
790 if (new_size)
791 return (void *) malloc_ex(new_size, mem_pool);
792 if (!new_size)
793 return NULL;
794 } else if (!new_size) {
795 free_ex(ptr, mem_pool);
796 return NULL;
797 }
798
799 b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
800 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
801 new_size = (new_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(new_size);
802 tmp_size = (b->size & BLOCK_SIZE);
803 if (new_size <= tmp_size) {
804 TLSF_REMOVE_SIZE(tlsf, b);
805 if (next_b->size & FREE_BLOCK) {
806 MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
807 EXTRACT_BLOCK(next_b, tlsf, fl, sl);
808 tmp_size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
809 next_b = GET_NEXT_BLOCK(next_b->ptr.buffer, next_b->size & BLOCK_SIZE);
810 /* We allways reenter this free block because tmp_size will
811 be greater then sizeof (bhdr_t) */
812 }
813 tmp_size -= new_size;
814 if (tmp_size >= sizeof(bhdr_t)) {
815 tmp_size -= BHDR_OVERHEAD;
816 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
817 tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
818 next_b->prev_hdr = tmp_b;
819 next_b->size |= PREV_FREE;
820 MAPPING_INSERT(tmp_size, &fl, &sl);
821 INSERT_BLOCK(tmp_b, tlsf, fl, sl);
822 b->size = new_size | (b->size & PREV_STATE);
823 }
824 TLSF_ADD_SIZE(tlsf, b);
825 return (void *) b->ptr.buffer;
826 }
827 if ((next_b->size & FREE_BLOCK)) {
828 if (new_size <= (tmp_size + (next_b->size & BLOCK_SIZE))) {
829 TLSF_REMOVE_SIZE(tlsf, b);
830 MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
831 EXTRACT_BLOCK(next_b, tlsf, fl, sl);
832 b->size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
833 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
834 next_b->prev_hdr = b;
835 next_b->size &= ~PREV_FREE;
836 tmp_size = (b->size & BLOCK_SIZE) - new_size;
837 if (tmp_size >= sizeof(bhdr_t)) {
838 tmp_size -= BHDR_OVERHEAD;
839 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
840 tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
841 next_b->prev_hdr = tmp_b;
842 next_b->size |= PREV_FREE;
843 MAPPING_INSERT(tmp_size, &fl, &sl);
844 INSERT_BLOCK(tmp_b, tlsf, fl, sl);
845 b->size = new_size | (b->size & PREV_STATE);
846 }
847 TLSF_ADD_SIZE(tlsf, b);
848 return (void *) b->ptr.buffer;
849 }
850 }
851
852 ptr_aux = malloc_ex(new_size, mem_pool);
853
854 cpsize = ((b->size & BLOCK_SIZE) > new_size) ? new_size : (b->size & BLOCK_SIZE);
855
856 memcpy(ptr_aux, ptr, cpsize);
857
858 free_ex(ptr, mem_pool);
859 return ptr_aux;
860}
861
862
863/******************************************************************/
864void *calloc_ex(size_t nelem, size_t elem_size, void *mem_pool)
865{
866/******************************************************************/
867 void *ptr;
868
869 if (nelem <= 0 || elem_size <= 0)
870 return NULL;
871
872 if (!(ptr = malloc_ex(nelem * elem_size, mem_pool)))
873 return NULL;
874 memset(ptr, 0, nelem * elem_size);
875
876 return ptr;
877}
878
879
880
881#if _DEBUG_TLSF_
882
883/*************** DEBUG FUNCTIONS **************/
884
885/* The following functions have been designed to ease the debugging of */
886/* the TLSF structure. For non-developing purposes, it may be they */
887/* haven't too much worth. To enable them, _DEBUG_TLSF_ must be set. */
888
889extern void dump_memory_region(unsigned char *mem_ptr, unsigned int size);
890extern void print_block(bhdr_t * b);
891extern void print_tlsf(tlsf_t * tlsf);
892void print_all_blocks(tlsf_t * tlsf);
893
894void dump_memory_region(unsigned char *mem_ptr, unsigned int size)
895{
896
897 unsigned long begin = (unsigned long) mem_ptr;
898 unsigned long end = (unsigned long) mem_ptr + size;
899 int column = 0;
900
901 begin >>= 2;
902 begin <<= 2;
903
904 end >>= 2;
905 end++;
906 end <<= 2;
907
908 PRINT_MSG("\nMemory region dumped: 0x%lx - 0x%lx\n\n", begin, end);
909
910 column = 0;
911 PRINT_MSG("0x%lx ", begin);
912
913 while (begin < end) {
914 if (((unsigned char *) begin)[0] == 0)
915 PRINT_MSG("00");
916 else
917 PRINT_MSG("%02x", ((unsigned char *) begin)[0]);
918 if (((unsigned char *) begin)[1] == 0)
919 PRINT_MSG("00 ");
920 else
921 PRINT_MSG("%02x ", ((unsigned char *) begin)[1]);
922 begin += 2;
923 column++;
924 if (column == 8) {
925 PRINT_MSG("\n0x%lx ", begin);
926 column = 0;
927 }
928
929 }
930 PRINT_MSG("\n\n");
931}
932
933void print_block(bhdr_t * b)
934{
935 if (!b)
936 return;
937 PRINT_MSG(">> [%p] (", b);
938 if ((b->size & BLOCK_SIZE))
939 PRINT_MSG("%lu bytes, ", (unsigned long) (b->size & BLOCK_SIZE));
940 else
941 PRINT_MSG("sentinel, ");
942 if ((b->size & BLOCK_STATE) == FREE_BLOCK)
943 PRINT_MSG("free [%p, %p], ", b->ptr.free_ptr.prev, b->ptr.free_ptr.next);
944 else
945 PRINT_MSG("used, ");
946 if ((b->size & PREV_STATE) == PREV_FREE)
947 PRINT_MSG("prev. free [%p])\n", b->prev_hdr);
948 else
949 PRINT_MSG("prev used)\n");
950}
951
952void print_tlsf(tlsf_t * tlsf)
953{
954 bhdr_t *next;
955 int i, j;
956
957 PRINT_MSG("\nTLSF at %p\n", tlsf);
958
959 PRINT_MSG("FL bitmap: 0x%x\n\n", (unsigned) tlsf->fl_bitmap);
960
961 for (i = 0; i < REAL_FLI; i++) {
962 if (tlsf->sl_bitmap[i])
963 PRINT_MSG("SL bitmap 0x%x\n", (unsigned) tlsf->sl_bitmap[i]);
964 for (j = 0; j < MAX_SLI; j++) {
965 next = tlsf->matrix[i][j];
966 if (next)
967 PRINT_MSG("-> [%d][%d]\n", i, j);
968 while (next) {
969 print_block(next);
970 next = next->ptr.free_ptr.next;
971 }
972 }
973 }
974}
975
976void print_all_blocks(tlsf_t * tlsf)
977{
978 area_info_t *ai;
979 bhdr_t *next;
980 PRINT_MSG("\nTLSF at %p\nALL BLOCKS\n\n", tlsf);
981 ai = tlsf->area_head;
982 while (ai) {
983 next = (bhdr_t *) ((char *) ai - BHDR_OVERHEAD);
984 while (next) {
985 print_block(next);
986 if ((next->size & BLOCK_SIZE))
987 next = GET_NEXT_BLOCK(next->ptr.buffer, next->size & BLOCK_SIZE);
988 else
989 next = NULL;
990 }
991 ai = ai->next;
992 }
993}
994
995#endif