summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2023-01-02 21:58:57 +0000
committerAidan MacDonald <amachronic@protonmail.com>2023-01-15 10:04:13 +0000
commitf2f198663edc01a1f19e35b8a0c302f8ee47ae5e (patch)
treed018722172d0bbd7277e84c4550b280ff47bccfa
parentf995c26de92eadbf3d961ad3e0fb233410798dd2 (diff)
downloadrockbox-f2f198663edc01a1f19e35b8a0c302f8ee47ae5e.tar.gz
rockbox-f2f198663edc01a1f19e35b8a0c302f8ee47ae5e.zip
buflib: Move the API back into buflib.h
To minimize code duplication between buflib backends move the public part of the API to buflib.h. Also rewrote documentation for the whole API. Change-Id: I4d7ed6d02084d7130cb41511e63c25ec45b51703
-rw-r--r--firmware/include/buflib.h359
-rw-r--r--firmware/include/buflib_mempool.h325
2 files changed, 361 insertions, 323 deletions
diff --git a/firmware/include/buflib.h b/firmware/include/buflib.h
index 36d171963a..32a5a6abe0 100644
--- a/firmware/include/buflib.h
+++ b/firmware/include/buflib.h
@@ -7,6 +7,8 @@
7 * \/ \/ \/ \/ \/ 7 * \/ \/ \/ \/ \/
8 * $Id$ 8 * $Id$
9 * 9 *
10 * Copyright (C) 2009 Andrew Mahone
11 * Copyright (C) 2011 Thomas Martitz
10 * Copyright (C) 2023 Aidan MacDonald 12 * Copyright (C) 2023 Aidan MacDonald
11 * 13 *
12 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
@@ -22,9 +24,366 @@
22#define _BUFLIB_H_ 24#define _BUFLIB_H_
23 25
24#include "config.h" 26#include "config.h"
27#include <stdint.h>
28#include <stdbool.h>
29#include <string.h>
30
31/* Add extra checks to buflib_get_data to catch bad handles */
32//#define BUFLIB_DEBUG_GET_DATA
33
34/* Support integrity check */
35//#define BUFLIB_DEBUG_CHECK_VALID
36
37/* Support debug printing of memory blocks */
38//#define BUFLIB_DEBUG_PRINT
39
40/* Defined by the backend header. */
41struct buflib_context;
42
43/* Buflib callback return codes. */
44#define BUFLIB_CB_OK 0
45#define BUFLIB_CB_CANNOT_MOVE 1
46#define BUFLIB_CB_CANNOT_SHRINK 1
47
48/* Buflib shrink hints. */
49#define BUFLIB_SHRINK_SIZE_MASK (~BUFLIB_SHRINK_POS_MASK)
50#define BUFLIB_SHRINK_POS_FRONT (1u<<31)
51#define BUFLIB_SHRINK_POS_BACK (1u<<30)
52#define BUFLIB_SHRINK_POS_MASK (BUFLIB_SHRINK_POS_FRONT|BUFLIB_SHRINK_POS_BACK)
53
54/**
55 * Callbacks run by buflib to manage an allocation.
56 */
57struct buflib_callbacks
58{
59 /**
60 * \brief Called when buflib wants to move the buffer
61 * \param handle Handle being moved
62 * \param current Current address of the buffer
63 * \param new New address the buffer would have after moving
64 * \return BUFLIB_CB_OK - Allow the buffer to be moved.
65 * \return BUFLIB_CB_CANNOT_MOVE - Do not allow the buffer to be moved.
66 *
67 * This callback allows you to fix up any pointers that might
68 * be pointing to the buffer before it is moved. The task of
69 * actually moving the buffer contents is performed by buflib
70 * after the move callback returns, if movement is allowed.
71 *
72 * Care must be taken to ensure that the buffer is not accessed
73 * from outside the move callback until the move is complete. If
74 * this is a concern, eg. due to multi-threaded access, then you
75 * must implement a sync_callback() and guard any access to the
76 * buffer with a lock.
77 *
78 * If the move callback is NULL then buflib will never move
79 * the allocation, as if you returned BUFLIB_CB_CANNOT_MOVE.
80 */
81 int (*move_callback)(int handle, void* current, void* new);
82
83 /**
84 * \brief Called when buflib wants to shrink the buffer
85 * \param handle Handle to shrink
86 * \param hints Hints regarding the shrink request
87 * \param start Current address of the buffer
88 * \param size Current size of the buffer as seen by buflib.
89 * This may be rounded up compared to the nominal
90 * allocation size due to alignment requirements.
91 * \return BUFLIB_CB_OK - Was able to shrink the buffer.
92 * \return BUFLIB_CB_CANNOT_SHRINK - Buffer cannot shrink.
93 *
94 * This callback is run by buflib when it runs out of memory
95 * and starts a compaction run. Buflib will not actually try
96 * to shrink or move memory, you must do that yourself and
97 * call buflib_shrink() to report the new start address and
98 * size of the buffer.
99 *
100 * If the shrink callback is NULL then buflib will regard the
101 * buffer as non-shrinkable.
102 */
103 int (*shrink_callback)(int handle, unsigned hints,
104 void *start, size_t size);
105
106 /**
107 * \brief Called before and after attempting to move the buffer
108 * \param handle Handle being moved
109 * \param lock True to lock, false to unlock
110 *
111 * The purpose of this callback is to block access to the buffer
112 * from other threads while a buffer is being moved, using a lock
113 * such as a mutex.
114 *
115 * It is called with `sync_callback(handle, true)` before running
116 * the move callback and `sync_callback(handle, false)` after the
117 * move is complete, regardless of whether the buffer was actually
118 * moved or not.
119 */
120 void (*sync_callback)(int handle, bool lock);
121};
122
123/**
124 * A set of all NULL callbacks for use with allocations that need to stay
125 * locked in RAM and not moved or shrunk. These type of allocations should
126 * be avoided as much as possible to avoid memory fragmentation but it can
127 * suitable for short-lived allocations.
128 *
129 * \note Use of this is discouraged. Prefer to use normal moveable
130 * allocations and pin them.
131 */
132extern struct buflib_callbacks buflib_ops_locked;
133
134/**
135 * \brief Intialize a buflib context
136 * \param ctx Context to initialize
137 * \param buf Buffer which will be used as the context's memory pool
138 * \param size Size of the buffer
139 */
140void buflib_init(struct buflib_context *ctx, void *buf, size_t size);
141
142/**
143 * Returns the amount of unallocated bytes. It does not mean this amount
144 * can be actually allocated because they might not be contiguous.
145 */
146size_t buflib_available(struct buflib_context *ctx);
147
148/**
149 * Returns the size of the largest possible contiguous allocation, given
150 * the current state of the memory pool. A larger allocation may still
151 * succeed if compaction is able to create a larger contiguous area.
152 */
153size_t buflib_allocatable(struct buflib_context *ctx);
154
155/**
156 * \brief Relocate the buflib memory pool to a new address
157 * \param ctx Context to relocate
158 * \param buf New memory pool address
159 * \return True if relocation should proceed, false if it cannot.
160 *
161 * Updates all pointers inside the buflib context to point to a new pool
162 * address. You must call this function before moving the pool and move
163 * the data manually afterwards only if this function returns true.
164 *
165 * This is intended from a move_callback() in buflib-on-buflib scenarios,
166 * where the memory pool of the "inner" buflib is allocated from an "outer"
167 * buflib.
168 *
169 * \warning This does not run any move callbacks, so it is not safe to
170 * use if any allocations require them.
171 */
172bool buflib_context_relocate(struct buflib_context *ctx, void *buf);
173
174/**
175 * \brief Allocate memory from buflib
176 * \param ctx Context to allocate from
177 * \param size Allocation size
178 * \return Handle for the allocation (> 0) or a negative value on error
179 *
180 * This is the same as calling buflib_alloc_ex() with a NULL callbacks
181 * struct. The resulting allocation can be moved by buflib; use pinning
182 * if you need to prevent moves.
183 *
184 * Note that zero is not a valid handle, and will never be returned by
185 * this function. However, this may change, and you should treat a zero
186 * or negative return value as an allocation failure.
187 */
188int buflib_alloc(struct buflib_context *ctx, size_t size);
189
190/**
191 * \brief Allocate memory from buflib with custom buffer ops
192 * \param ctx Context to allocate from
193 * \param size Allocation size
194 * \param ops Pointer to ops struct or NULL if no ops are needed.
195 * \return Handle for the allocation (> 0) or a negative value on error.
196 *
197 * Use this if you need to pass custom callbacks for responding to buflib
198 * move or shrink operations. Passing a NULL ops pointer means the buffer
199 * can be moved by buflib at any time.
200 *
201 * Note that zero is not a valid handle, and will never be returned by
202 * this function. However, this may change, and you should treat a zero
203 * or negative return value as an allocation failure.
204 */
205int buflib_alloc_ex(struct buflib_context *ctx, size_t size,
206 struct buflib_callbacks *ops);
207
208/**
209 * \brief Attempt a maximum size allocation
210 * \param ctx Context to allocate from
211 * \param size Size of the allocation will be written here on success.
212 * \param ops Pointer to ops struct or NULL if no ops are needed.
213 * \return Handle for the allocation (> 0) or a negative value on error.
214 *
215 * Buflib will attempt to compact and shrink other allocations as much as
216 * possible and then allocate the largest contigous free area. Since this
217 * will consume effectively *all* available memory, future allocations are
218 * likely to fail.
219 *
220 * \note There is rarely any justification to use this with the core_alloc
221 * context due to the impact it has on the entire system. You should
222 * change your code if you think you need this. Of course, if you are
223 * using a private buflib context then this warning does not apply.
224 */
225int buflib_alloc_maximum(struct buflib_context *ctx,
226 size_t *size, struct buflib_callbacks *ops);
227
228/**
229 * \brief Reduce the size of a buflib allocation
230 * \param ctx Buflib context of the allocation
231 * \param handle Handle identifying the allocation
232 * \param newstart New start address. Must be within the current bounds
233 * of the allocation, as returned by buflib_get_data().
234 * \param new_size New size of the buffer.
235 * \return True if shrinking was successful; otherwise, returns false and
236 * does not modify the allocation.
237 *
238 * Shrinking always succeeds provided the new allocation is contained
239 * within the current allocation. A failure is always a programming
240 * error, so you need not check for it and in the future the failure
241 * case may be changed to a panic or undefined behavior with no return
242 * code.
243 *
244 * The new start address and size need not have any particular alignment,
245 * however buflib cannot work with unaligned addresses so there is rarely
246 * any purpose to creating unaligned allocations.
247 *
248 * Shrinking is typically done from a shrink_callback(), but can be done
249 * at any time if you want to reduce the size of a buflib allocation.
250 */
251bool buflib_shrink(struct buflib_context *ctx, int handle,
252 void *newstart, size_t new_size);
253
254/**
255 * \brief Increment an allocation's pin count
256 * \param ctx Buflib context of the allocation
257 * \param handle Handle identifying the allocation
258 *
259 * The pin count acts like a reference count. Buflib will not attempt to
260 * move any buffer with a positive pin count, nor invoke any move or sync
261 * callbacks. Hence, when pinned, it is safe to hold pointers to a buffer
262 * across yields or use them for I/O.
263 *
264 * Note that shrink callbacks can still be invoked for pinned handles.
265 */
266void buflib_pin(struct buflib_context *ctx, int handle);
267
268/**
269 * \brief Decrement an allocation's pin count
270 * \param ctx Buflib context of the allocation
271 * \param handle Handle identifying the allocation
272 */
273void buflib_unpin(struct buflib_context *ctx, int handle);
274
275/**
276 * \brief Return the pin count of an allocation
277 * \param ctx Buflib context of the allocation
278 * \param handle Handle identifying the allocation
279 * \return Current pin count; zero means the handle is not pinned.
280 */
281unsigned buflib_pin_count(struct buflib_context *ctx, int handle);
282
283/**
284 * \brief Free an allocation and return its memory to the pool
285 * \param ctx Buflib context of the allocation
286 * \param handle Handle identifying the allocation
287 * \return Always returns zero (zero is not a valid handle, so this can
288 * be used to invalidate the variable containing the handle).
289 */
290int buflib_free(struct buflib_context *context, int handle);
291
292/**
293 * \brief Get a pointer to the buffer for an allocation
294 * \param ctx Buflib context of the allocation
295 * \param handle Handle identifying the allocation
296 * \return Pointer to the allocation's memory.
297 *
298 * Note that buflib can move allocations in order to free up space when
299 * making new allocations. For this reason, it's unsafe to hold a pointer
300 * to a buffer across a yield() or any other operation that can cause a
301 * context switch. This includes any function that may block, and even
302 * some functions that might not block -- eg. if a low priority thread
303 * acquires a mutex, calling mutex_unlock() may trigger a context switch
304 * to a higher-priority thread.
305 *
306 * buflib_get_data() is a very cheap operation, however, costing only
307 * a few pointer lookups. Don't hesitate to use it extensively.
308 *
309 * If you need to hold a pointer across a possible context switch, pin
310 * the handle with buflib_pin() to prevent the buffer from being moved.
311 * This is required when doing I/O into buflib allocations, for example.
312 */
313#ifdef BUFLIB_DEBUG_GET_DATA
314void *buflib_get_data(struct buflib_context *ctx, int handle);
315#else
316static inline void *buflib_get_data(struct buflib_context *ctx, int handle);
317#endif
318
319/**
320 * \brief Shift allocations up to free space at the start of the pool
321 * \param ctx Context to operate on
322 * \param size Indicates number of bytes to free up, or 0 to free
323 * up as much as possible. On return, the actual number
324 * of bytes freed is written here.
325 * \return Pointer to the start of the free area
326 *
327 * If `*size` is non-zero, the actual amount of space freed up might
328 * be less than `*size`.
329 *
330 * \warning This will move data around in the pool without calling any
331 * move callbacks!
332 * \warning This function is deprecated and will eventually be removed.
333 */
334void* buflib_buffer_out(struct buflib_context *ctx, size_t *size);
335
336/**
337 * \brief Shift allocations down into free space below the pool
338 * \param ctx Context to operate on
339 * \param size Number of bytes to add to the pool.
340 *
341 * This operation should only be used to return memory that was previously
342 * taken from the pool with buflib_buffer_out(), by passing the same size
343 * that you got from that function.
344 *
345 * \warning This will move data around in the pool without calling any
346 * move callbacks!
347 * \warning This function is deprecated and will eventually be removed.
348 */
349void buflib_buffer_in(struct buflib_context *ctx, int size);
350
351#ifdef BUFLIB_DEBUG_PRINT
352/**
353 * Return the number of blocks in the buffer, allocated or unallocated.
354 *
355 * Only available if BUFLIB_DEBUG_PRINT is defined.
356 */
357int buflib_get_num_blocks(struct buflib_context *ctx);
358
359/**
360 * Write a string describing the block at index block_num to the
361 * provided buffer. The buffer will always be null terminated and
362 * there is no provision to detect truncation. (A 40-byte buffer
363 * is enough to contain any returned string.)
364 *
365 * Returns false if the block index is out of bounds, and writes
366 * an empty string.
367 *
368 * Only available if BUFLIB_DEBUG_PRINT is defined.
369 */
370bool buflib_print_block_at(struct buflib_context *ctx, int block_num,
371 char *buf, size_t bufsize);
372#endif
373
374#ifdef BUFLIB_DEBUG_CHECK_VALID
375/**
376 * Check integrity of given buflib context
377 */
378void buflib_check_valid(struct buflib_context *ctx);
379#endif
25 380
26#if CONFIG_BUFLIB_BACKEND == BUFLIB_BACKEND_MEMPOOL 381#if CONFIG_BUFLIB_BACKEND == BUFLIB_BACKEND_MEMPOOL
27#include "buflib_mempool.h" 382#include "buflib_mempool.h"
28#endif 383#endif
29 384
385#ifndef BUFLIB_ALLOC_OVERHEAD
386# define BUFLIB_ALLOC_OVERHEAD 0
387#endif
388
30#endif /* _BUFLIB_H_ */ 389#endif /* _BUFLIB_H_ */
diff --git a/firmware/include/buflib_mempool.h b/firmware/include/buflib_mempool.h
index f261d4abda..4b01b629c3 100644
--- a/firmware/include/buflib_mempool.h
+++ b/firmware/include/buflib_mempool.h
@@ -30,19 +30,6 @@
30# error "include buflib.h instead" 30# error "include buflib.h instead"
31#endif 31#endif
32 32
33#include <stdint.h>
34#include <stdbool.h>
35#include <string.h>
36
37/* add extra checks to buflib_get_data to catch bad handles */
38//#define BUFLIB_DEBUG_GET_DATA
39
40/* support integrity check */
41//#define BUFLIB_DEBUG_CHECK_VALID
42
43/* support debug printing of memory blocks */
44//#define BUFLIB_DEBUG_PRINT
45
46union buflib_data 33union buflib_data
47{ 34{
48 intptr_t val; /* length of the block in n*sizeof(union buflib_data). 35 intptr_t val; /* length of the block in n*sizeof(union buflib_data).
@@ -65,321 +52,13 @@ struct buflib_context
65 bool compact; 52 bool compact;
66}; 53};
67 54
68/** 55#define BUFLIB_ALLOC_OVERHEAD (4 * sizeof(union buflib_data))
69 * This declares the minimal overhead that is required per alloc. These
70 * are bytes that are allocated from the context's pool in addition
71 * to the actually requested number of bytes.
72 *
73 * The total number of bytes consumed by an allocation is
74 * BUFLIB_ALLOC_OVERHEAD + requested bytes + pad to pointer size
75 */
76#define BUFLIB_ALLOC_OVERHEAD (4*sizeof(union buflib_data))
77
78/**
79 * Callbacks used by the buflib to inform allocation that compaction
80 * is happening (before data is moved)
81 *
82 * Note that buflib tries to move to satisfy new allocations before shrinking.
83 * So if you have something to resize try to do it outside of the callback.
84 *
85 * Regardless of the above, if the allocation is SHRINKABLE, but not
86 * MUST_NOT_MOVE buflib will move the allocation before even attempting to
87 * shrink.
88 */
89struct buflib_callbacks {
90 /**
91 * This is called before data is moved. Use this to fix up any cached
92 * pointers pointing to inside the allocation. The size is unchanged.
93 *
94 * This is not needed if you don't cache the data pointer (but always
95 * call buflib_get_data()) and don't pass pointer to the data to yielding
96 * functions.
97 *
98 * handle: The corresponding handle
99 * current: The current start of the allocation
100 * new: The new start of the allocation, after data movement
101 *
102 * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_MOVE if movement
103 * is impossible at this moment.
104 *
105 * If NULL: this allocation must not be moved around
106 * by the buflib when compaction occurs. Attention: Don't confuse
107 * that with passing NULL for the whole callback structure
108 * to buflib_alloc_ex(). This would enable moving buffers by default.
109 * You have to pass NULL inside the "struct buflib_callbacks" structure.
110 */
111 int (*move_callback)(int handle, void* current, void* new);
112 /**
113 * This is called when the buflib desires to shrink a buffer
114 * in order to satisfy new allocation. This happens when buflib runs
115 * out of memory, e.g. because buflib_alloc_maximum() was called.
116 * Move data around as you need to make space and call core_shrink() as
117 * appropriate from within the callback to complete the shrink operation.
118 * buflib will not move data as part of shrinking.
119 *
120 * hint: bit mask containing hints on how shrinking is desired (see below)
121 * handle: The corresponding handle
122 * start: The old start of the allocation
123 *
124 * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_SHRINK if shirinking
125 * is impossible at this moment.
126 *
127 * if NULL: this allocation cannot be resized.
128 * It is recommended that allocation that must not move are
129 * at least shrinkable
130 */
131 int (*shrink_callback)(int handle, unsigned hints, void* start, size_t old_size);
132 /**
133 * This is called when special steps must be taken for synchronization
134 * both before the move_callback is called and after the data has been
135 * moved.
136 */
137 void (*sync_callback)(int handle, bool sync_on);
138};
139
140/** A set of all NULL callbacks for use with allocations that need to stay
141 * locked in RAM and not moved or shrunk. These type of allocations should
142 * be avoided as much as possible to avoid memory fragmentation but it can
143 * suitable for short-lived allocations. */
144extern struct buflib_callbacks buflib_ops_locked;
145
146#define BUFLIB_SHRINK_SIZE_MASK (~BUFLIB_SHRINK_POS_MASK)
147#define BUFLIB_SHRINK_POS_FRONT (1u<<31)
148#define BUFLIB_SHRINK_POS_BACK (1u<<30)
149#define BUFLIB_SHRINK_POS_MASK (BUFLIB_SHRINK_POS_FRONT|BUFLIB_SHRINK_POS_BACK)
150
151/**
152 * Possible return values for the callbacks, some of them can cause
153 * compaction to fail and therefore new allocations to fail
154 */
155/* Everything alright */
156#define BUFLIB_CB_OK 0
157/* Tell buflib that moving failed. Buflib may retry to move at any point */
158#define BUFLIB_CB_CANNOT_MOVE 1
159/* Tell buflib that resizing failed, possibly future making allocations fail */
160#define BUFLIB_CB_CANNOT_SHRINK 1
161
162/**
163 * Initializes buflib with a caller allocated context instance and memory pool.
164 *
165 * The buflib_context instance needs to be passed to every other buflib
166 * function. It's should be considered opaque, even though it is not yet
167 * (that's to make inlining core_get_data() possible). The documentation
168 * of the other functions will not describe the context
169 * instance parameter further as it's obligatory.
170 *
171 * context: The new buflib instance to be initialized, allocated by the caller
172 * size: The size of the memory pool
173 */
174void buflib_init(struct buflib_context *context, void *buf, size_t size);
175
176
177/**
178 * Returns the amount of unallocated bytes. It does not mean this amount
179 * can be actually allocated because they might not be contiguous.
180 *
181 * Returns: The number of unallocated bytes in the memory pool.
182 */
183size_t buflib_available(struct buflib_context *ctx);
184
185/**
186 * Returns the biggest possible allocation that can be determined to succeed.
187 *
188 * Returns: The amount of bytes of the biggest unallocated, contiguous region.
189 */
190size_t buflib_allocatable(struct buflib_context *ctx);
191
192/**
193 * Relocates the fields in *ctx to the new buffer position pointed to by buf.
194 * This does _not_ move any data but updates the pointers. The data has
195 * to be moved afterwards manually and only if this function returned true.
196 *
197 * This is intended to be called from within a move_callback(), for
198 * buflib-on-buflib scenarios (i.e. a new buflib instance backed by a buffer
199 * that was allocated by another buflib instance). Be aware that if the parent
200 * move_callback() moves the underlying buffer _no_ move_callback() of the
201 * underlying buffer are called.
202 *
203 * Returns true of the relocation was successful. If it returns false no
204 * change to *ctx was made.
205 */
206bool buflib_context_relocate(struct buflib_context *ctx, void *buf);
207
208/**
209 * Allocates memory from buflib's memory pool
210 *
211 * size: How many bytes to allocate
212 *
213 * This function passes NULL for the callback structure "ops", so buffers
214 * are movable. Don't pass them to functions that yield().
215 *
216 * Returns: A positive integer handle identifying this allocation, or
217 * a negative value on error (0 is also not a valid handle)
218 */
219int buflib_alloc(struct buflib_context *context, size_t size);
220
221 56
222/** 57#ifndef BUFLIB_DEBUG_GET_DATA
223 * Allocates memory from the buflib's memory pool with additional callbacks
224 * and flags
225 *
226 * size: How many bytes to allocate
227 * ops: a struct with pointers to callback functions (see above).
228 * if "ops" is NULL: Buffer is movable.
229 *
230 * Returns: A positive integer handle identifying this allocation, or
231 * a negative value on error (0 is also not a valid handle)
232 */
233int buflib_alloc_ex(struct buflib_context *ctx, size_t size,
234 struct buflib_callbacks *ops);
235
236
237/**
238 * Gets all available memory from buflib, for temporary use.
239 *
240 * Since this effectively makes all future allocations fail (unless
241 * another allocation is freed in the meantime), you should definitely provide
242 * a shrink callback if you plan to hold the buffer for a longer period. This
243 * will allow buflib to permit allocations by shrinking the buffer returned by
244 * this function.
245 *
246 * Note that this might return many more bytes than buflib_available() or
247 * buflib_allocatable() return, because it aggressively compacts the pool
248 * and even shrinks other allocations. However, do not depend on this behavior,
249 * it may change.
250 *
251 * size: The actual size will be returned into size
252 * ops: a struct with pointers to callback functions
253 *
254 * Returns: A positive integer handle identifying this allocation, or
255 * a negative value on error (0 is also not a valid handle)
256 */
257int buflib_alloc_maximum(struct buflib_context* ctx,
258 size_t *size, struct buflib_callbacks *ops);
259
260/**
261 * Queries the data pointer for the given handle. It's actually a cheap
262 * operation, don't hesitate using it extensively.
263 *
264 * Notice that you need to re-query after every direct or indirect yield(),
265 * because compaction can happen by other threads which may get your data
266 * moved around (or you can get notified about changes by callbacks,
267 * see further above).
268 *
269 * handle: The handle corresponding to the allocation
270 *
271 * Returns: The start pointer of the allocation
272 */
273#ifdef BUFLIB_DEBUG_GET_DATA
274void *buflib_get_data(struct buflib_context *ctx, int handle);
275#else
276static inline void *buflib_get_data(struct buflib_context *ctx, int handle) 58static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
277{ 59{
278 return (void *)ctx->handle_table[-handle].alloc; 60 return (void *)ctx->handle_table[-handle].alloc;
279} 61}
280#endif 62#endif
281 63
282/**
283 * Shrink the memory allocation associated with the given handle
284 * Mainly intended to be used with the shrink callback, but it can also
285 * be called outside as well, e.g. to give back buffer space allocated
286 * with buflib_alloc_maximum().
287 *
288 * Note that you must move/copy data around yourself before calling this,
289 * buflib will not do this as part of shrinking.
290 *
291 * handle: The handle identifying this allocation
292 * new_start: the new start of the allocation
293 * new_size: the new size of the allocation
294 *
295 * Returns: true if shrinking was successful. Otherwise it returns false,
296 * without having modified memory.
297 *
298 */
299bool buflib_shrink(struct buflib_context *ctx, int handle, void* newstart, size_t new_size);
300
301/**
302 * Increment the pin count for a handle. When pinned the handle will not
303 * be moved and move callbacks will not be triggered, allowing a pointer
304 * to the buffer to be kept across yields or used for I/O.
305 *
306 * Note that shrink callbacks can still be invoked for pinned handles.
307 */
308void buflib_pin(struct buflib_context *ctx, int handle);
309
310/**
311 * Decrement the pin count for a handle.
312 */
313void buflib_unpin(struct buflib_context *ctx, int handle);
314
315/**
316 * Get the current pin count of a handle. Zero means the handle is not pinned.
317 */
318unsigned buflib_pin_count(struct buflib_context *ctx, int handle);
319
320/**
321 * Frees memory associated with the given handle
322 *
323 * Returns: 0 (to invalidate handles in one line, 0 is not a valid handle)
324 */
325int buflib_free(struct buflib_context *context, int handle);
326
327/**
328 * Moves the underlying buflib buffer up by size bytes (as much as
329 * possible for size == 0) without moving the end. This effectively
330 * reduces the available space by taking away manageable space from the
331 * front. This space is not available for new allocations anymore.
332 *
333 * To make space available in the front, everything is moved up.
334 * It does _NOT_ call the move callbacks
335 *
336 *
337 * size: size in bytes to move the buffer up (take away). The actual
338 * bytes moved is returned in this
339 * Returns: The new start of the underlying buflib buffer
340 */
341void* buflib_buffer_out(struct buflib_context *ctx, size_t *size);
342
343/**
344 * Moves the underlying buflib buffer down by size bytes without
345 * moving the end. This grows the buflib buffer by adding space to the front.
346 * The new bytes are available for new allocations.
347 *
348 * Everything is moved down, and the new free space will be in the middle.
349 * It does _NOT_ call the move callbacks.
350 *
351 * size: size in bytes to move the buffer down (new free space)
352 */
353void buflib_buffer_in(struct buflib_context *ctx, int size);
354
355#ifdef BUFLIB_DEBUG_PRINT
356/**
357 * Return the number of blocks in the buffer, allocated or unallocated.
358 *
359 * Only available if BUFLIB_DEBUG_PRINT is defined.
360 */
361int buflib_get_num_blocks(struct buflib_context *ctx);
362
363/**
364 * Write a string describing the block at index block_num to the
365 * provided buffer. The buffer will always be null terminated and
366 * there is no provision to detect truncation. (A 40-byte buffer
367 * is enough to contain any returned string.)
368 *
369 * Returns false if the block index is out of bounds, and writes
370 * an empty string.
371 *
372 * Only available if BUFLIB_DEBUG_PRINT is defined.
373 */
374bool buflib_print_block_at(struct buflib_context *ctx, int block_num,
375 char *buf, size_t bufsize);
376#endif
377
378#ifdef BUFLIB_DEBUG_CHECK_VALID
379/**
380 * Check integrity of given buflib context
381 */
382void buflib_check_valid(struct buflib_context *ctx);
383#endif
384
385#endif /* _BUFLIB_MEMPOOL_H_ */ 64#endif /* _BUFLIB_MEMPOOL_H_ */