summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2023-01-14 18:20:59 +0000
committerAidan MacDonald <amachronic@protonmail.com>2023-01-15 11:06:27 +0000
commit800bc000a08b37e22d2b36d32fd448624712a881 (patch)
tree87149cae9acb83db4a98a873b81515a99200d790
parent92565e9246f3a47b90fea4a436ecfd8e7a1198b8 (diff)
downloadrockbox-800bc000a08b37e22d2b36d32fd448624712a881.tar.gz
rockbox-800bc000a08b37e22d2b36d32fd448624712a881.zip
buflib: Add pinned get/put data functions
These are more efficient than separate pin/unpin calls because pin count increment and decrement can be done cheaply when the data pointer is known. Secondly, pinned access can be made safe against preemption by hardware interrupts or other CPU cores; buflib_get_data() can't. This makes it more useful under different threading models and for SMP targets; both of which are not particularly relevant to Rockbox now, but might be in the future. Change-Id: I09284251b83bbbc59ef88a494c8fda26a7f7ef26
-rw-r--r--firmware/buflib_malloc.c12
-rw-r--r--firmware/buflib_mempool.c59
-rw-r--r--firmware/include/buflib.h26
-rw-r--r--firmware/include/buflib_malloc.h12
-rw-r--r--firmware/include/buflib_mempool.h35
5 files changed, 109 insertions, 35 deletions
diff --git a/firmware/buflib_malloc.c b/firmware/buflib_malloc.c
index fdc2b5b925..2ac3441ec6 100644
--- a/firmware/buflib_malloc.c
+++ b/firmware/buflib_malloc.c
@@ -168,6 +168,18 @@ unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
168 return h->pin_count; 168 return h->pin_count;
169} 169}
170 170
171void _buflib_malloc_put_data_pinned(struct buflib_context *ctx, void *data)
172{
173 for (int i = 0; i < ctx->num_allocs; ++i)
174 {
175 if (ctx->allocs[i].user == data)
176 {
177 ctx->allocs[i].pin_count--;
178 break;
179 }
180 }
181}
182
171int buflib_free(struct buflib_context *ctx, int handle) 183int buflib_free(struct buflib_context *ctx, int handle)
172{ 184{
173 if (handle <= 0) 185 if (handle <= 0)
diff --git a/firmware/buflib_mempool.c b/firmware/buflib_mempool.c
index cb35290c03..9d1c055bb9 100644
--- a/firmware/buflib_mempool.c
+++ b/firmware/buflib_mempool.c
@@ -107,22 +107,14 @@
107 (PARANOIA_CHECK_LENGTH | \ 107 (PARANOIA_CHECK_LENGTH | \
108 PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) 108 PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING)
109 109
110/* Indices used to access block fields as block[idx_XXX] */
111enum {
112 idx_LEN, /* length of the block, must come first */
113 idx_HANDLE, /* pointer to entry in the handle table */
114 idx_OPS, /* pointer to an ops struct */
115 idx_PIN, /* pin count */
116 BUFLIB_NUM_FIELDS,
117};
118
119struct buflib_callbacks buflib_ops_locked = { 110struct buflib_callbacks buflib_ops_locked = {
120 .move_callback = NULL, 111 .move_callback = NULL,
121 .shrink_callback = NULL, 112 .shrink_callback = NULL,
122 .sync_callback = NULL, 113 .sync_callback = NULL,
123}; 114};
124 115
125#define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback) 116#define IS_MOVABLE(a) \
117 (!a[BUFLIB_IDX_OPS].ops || a[BUFLIB_IDX_OPS].ops->move_callback)
126 118
127static union buflib_data* find_first_free(struct buflib_context *ctx); 119static union buflib_data* find_first_free(struct buflib_context *ctx);
128static union buflib_data* find_block_before(struct buflib_context *ctx, 120static union buflib_data* find_block_before(struct buflib_context *ctx,
@@ -281,8 +273,7 @@ union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
281 if (!ptr) 273 if (!ptr)
282 return NULL; 274 return NULL;
283 275
284 union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data)); 276 return _buflib_get_block_header(ptr);
285 return data - BUFLIB_NUM_FIELDS;
286} 277}
287 278
288/* Shrink the handle table, returning true if its size was reduced, false if 279/* Shrink the handle table, returning true if its size was reduced, false if
@@ -318,9 +309,9 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
318 union buflib_data *new_block; 309 union buflib_data *new_block;
319 310
320 check_block_handle(ctx, block); 311 check_block_handle(ctx, block);
321 union buflib_data *h_entry = block[idx_HANDLE].handle; 312 union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
322 313
323 if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0) 314 if (!IS_MOVABLE(block) || block[BUFLIB_IDX_PIN].pincount > 0)
324 return false; 315 return false;
325 316
326 int handle = ctx->handle_table - h_entry; 317 int handle = ctx->handle_table - h_entry;
@@ -329,7 +320,7 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
329 new_block = block + shift; 320 new_block = block + shift;
330 new_start = h_entry->alloc + shift*sizeof(union buflib_data); 321 new_start = h_entry->alloc + shift*sizeof(union buflib_data);
331 322
332 struct buflib_callbacks *ops = block[idx_OPS].ops; 323 struct buflib_callbacks *ops = block[BUFLIB_IDX_OPS].ops;
333 324
334 /* If move must be synchronized with use, user should have specified a 325 /* If move must be synchronized with use, user should have specified a
335 callback that handles this */ 326 callback that handles this */
@@ -459,12 +450,12 @@ buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
459 if (this->val < 0) 450 if (this->val < 0)
460 continue; 451 continue;
461 452
462 struct buflib_callbacks *ops = this[idx_OPS].ops; 453 struct buflib_callbacks *ops = this[BUFLIB_IDX_OPS].ops;
463 if (!ops || !ops->shrink_callback) 454 if (!ops || !ops->shrink_callback)
464 continue; 455 continue;
465 456
466 check_block_handle(ctx, this); 457 check_block_handle(ctx, this);
467 union buflib_data* h_entry = this[idx_HANDLE].handle; 458 union buflib_data* h_entry = this[BUFLIB_IDX_HANDLE].handle;
468 int handle = ctx->handle_table - h_entry; 459 int handle = ctx->handle_table - h_entry;
469 460
470 unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; 461 unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK;
@@ -596,7 +587,7 @@ handle_alloc:
596 */ 587 */
597 union buflib_data* last_block = find_block_before(ctx, 588 union buflib_data* last_block = find_block_before(ctx,
598 ctx->alloc_end, false); 589 ctx->alloc_end, false);
599 struct buflib_callbacks* ops = last_block[idx_OPS].ops; 590 struct buflib_callbacks* ops = last_block[BUFLIB_IDX_OPS].ops;
600 unsigned hints = 0; 591 unsigned hints = 0;
601 if (!ops || !ops->shrink_callback) 592 if (!ops || !ops->shrink_callback)
602 { /* the last one isn't shrinkable 593 { /* the last one isn't shrinkable
@@ -666,10 +657,10 @@ buffer_alloc:
666 /* Set up the allocated block, by marking the size allocated, and storing 657 /* Set up the allocated block, by marking the size allocated, and storing
667 * a pointer to the handle. 658 * a pointer to the handle.
668 */ 659 */
669 block[idx_LEN].val = size; 660 block[BUFLIB_IDX_LEN].val = size;
670 block[idx_HANDLE].handle = handle; 661 block[BUFLIB_IDX_HANDLE].handle = handle;
671 block[idx_OPS].ops = ops; 662 block[BUFLIB_IDX_OPS].ops = ops;
672 block[idx_PIN].pincount = 0; 663 block[BUFLIB_IDX_PIN].pincount = 0;
673 664
674 handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; 665 handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS];
675 666
@@ -916,10 +907,10 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
916 metadata_size.val = aligned_oldstart - block; 907 metadata_size.val = aligned_oldstart - block;
917 /* update val and the handle table entry */ 908 /* update val and the handle table entry */
918 new_block = aligned_newstart - metadata_size.val; 909 new_block = aligned_newstart - metadata_size.val;
919 block[idx_LEN].val = new_next_block - new_block; 910 block[BUFLIB_IDX_LEN].val = new_next_block - new_block;
920 911
921 check_block_handle(ctx, block); 912 check_block_handle(ctx, block);
922 block[idx_HANDLE].handle->alloc = newstart; 913 block[BUFLIB_IDX_HANDLE].handle->alloc = newstart;
923 if (block != new_block) 914 if (block != new_block)
924 { 915 {
925 /* move metadata over, i.e. pointer to handle table entry and name 916 /* move metadata over, i.e. pointer to handle table entry and name
@@ -964,7 +955,7 @@ void buflib_pin(struct buflib_context *ctx, int handle)
964 buflib_panic(ctx, "invalid handle pin: %d", handle); 955 buflib_panic(ctx, "invalid handle pin: %d", handle);
965 956
966 union buflib_data *data = handle_to_block(ctx, handle); 957 union buflib_data *data = handle_to_block(ctx, handle);
967 data[idx_PIN].pincount++; 958 data[BUFLIB_IDX_PIN].pincount++;
968} 959}
969 960
970void buflib_unpin(struct buflib_context *ctx, int handle) 961void buflib_unpin(struct buflib_context *ctx, int handle)
@@ -975,11 +966,11 @@ void buflib_unpin(struct buflib_context *ctx, int handle)
975 union buflib_data *data = handle_to_block(ctx, handle); 966 union buflib_data *data = handle_to_block(ctx, handle);
976 if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) 967 if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING)
977 { 968 {
978 if (data[idx_PIN].pincount == 0) 969 if (data[BUFLIB_IDX_PIN].pincount == 0)
979 buflib_panic(ctx, "handle pin underflow: %d", handle); 970 buflib_panic(ctx, "handle pin underflow: %d", handle);
980 } 971 }
981 972
982 data[idx_PIN].pincount--; 973 data[BUFLIB_IDX_PIN].pincount--;
983} 974}
984 975
985unsigned buflib_pin_count(struct buflib_context *ctx, int handle) 976unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
@@ -988,7 +979,7 @@ unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
988 buflib_panic(ctx, "invalid handle: %d", handle); 979 buflib_panic(ctx, "invalid handle: %d", handle);
989 980
990 union buflib_data *data = handle_to_block(ctx, handle); 981 union buflib_data *data = handle_to_block(ctx, handle);
991 return data[idx_PIN].pincount; 982 return data[BUFLIB_IDX_PIN].pincount;
992} 983}
993 984
994#ifdef BUFLIB_DEBUG_GET_DATA 985#ifdef BUFLIB_DEBUG_GET_DATA
@@ -1062,13 +1053,13 @@ static void check_block_length(struct buflib_context *ctx,
1062{ 1053{
1063 if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) 1054 if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH)
1064 { 1055 {
1065 intptr_t length = block[idx_LEN].val; 1056 intptr_t length = block[BUFLIB_IDX_LEN].val;
1066 1057
1067 /* Check the block length does not pass beyond the end */ 1058 /* Check the block length does not pass beyond the end */
1068 if (length == 0 || block > ctx->alloc_end - abs(length)) 1059 if (length == 0 || block > ctx->alloc_end - abs(length))
1069 { 1060 {
1070 buflib_panic(ctx, "block len wacky [%p]=%ld", 1061 buflib_panic(ctx, "block len wacky [%p]=%ld",
1071 (void*)&block[idx_LEN], (long)length); 1062 (void*)&block[BUFLIB_IDX_LEN], (long)length);
1072 } 1063 }
1073 } 1064 }
1074} 1065}
@@ -1078,8 +1069,8 @@ static void check_block_handle(struct buflib_context *ctx,
1078{ 1069{
1079 if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) 1070 if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE)
1080 { 1071 {
1081 intptr_t length = block[idx_LEN].val; 1072 intptr_t length = block[BUFLIB_IDX_LEN].val;
1082 union buflib_data *h_entry = block[idx_HANDLE].handle; 1073 union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
1083 1074
1084 /* Check the handle pointer is properly aligned */ 1075 /* Check the handle pointer is properly aligned */
1085 /* TODO: Can we ensure the compiler doesn't optimize this out? 1076 /* TODO: Can we ensure the compiler doesn't optimize this out?
@@ -1088,14 +1079,14 @@ static void check_block_handle(struct buflib_context *ctx,
1088 if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) 1079 if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry)))
1089 { 1080 {
1090 buflib_panic(ctx, "handle unaligned [%p]=%p", 1081 buflib_panic(ctx, "handle unaligned [%p]=%p",
1091 &block[idx_HANDLE], h_entry); 1082 &block[BUFLIB_IDX_HANDLE], h_entry);
1092 } 1083 }
1093 1084
1094 /* Check the pointer is actually inside the handle table */ 1085 /* Check the pointer is actually inside the handle table */
1095 if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) 1086 if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table)
1096 { 1087 {
1097 buflib_panic(ctx, "handle out of bounds [%p]=%p", 1088 buflib_panic(ctx, "handle out of bounds [%p]=%p",
1098 &block[idx_HANDLE], h_entry); 1089 &block[BUFLIB_IDX_HANDLE], h_entry);
1099 } 1090 }
1100 1091
1101 /* Now check the allocation is within the block. 1092 /* Now check the allocation is within the block.
diff --git a/firmware/include/buflib.h b/firmware/include/buflib.h
index c4865b6d9b..3fe8ac1430 100644
--- a/firmware/include/buflib.h
+++ b/firmware/include/buflib.h
@@ -317,6 +317,32 @@ static inline void *buflib_get_data(struct buflib_context *ctx, int handle);
317#endif 317#endif
318 318
319/** 319/**
320 * \brief Get a pinned pointer to a buflib allocation
321 * \param ctx Buflib context of the allocation
322 * \param handle Handle identifying the allocation
323 * \return Pointer to the allocation's memory.
324 *
325 * Functionally equivalent to buflib_pin() followed by buflib_get_data(),
326 * but this call is more efficient and should be preferred over separate
327 * calls.
328 *
329 * To unpin the data, call buflib_put_data_pinned() and pass the pointer
330 * returned by this function.
331 */
332static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle);
333
334/**
335 * \brief Release a pinned pointer to a buflib allocation
336 * \param ctx Buflib context of the allocation
337 * \param data Pointer returned by buflib_get_data()
338 *
339 * Decrements the pin count, allowing the buffer to be moved once the
340 * pin count drops to zero. This is more efficient than buflib_unpin()
341 * and should be preferred when you have a pointer to the buflib data.
342 */
343static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data);
344
345/**
320 * \brief Shift allocations up to free space at the start of the pool 346 * \brief Shift allocations up to free space at the start of the pool
321 * \param ctx Context to operate on 347 * \param ctx Context to operate on
322 * \param size Indicates number of bytes to free up, or 0 to free 348 * \param size Indicates number of bytes to free up, or 0 to free
diff --git a/firmware/include/buflib_malloc.h b/firmware/include/buflib_malloc.h
index 32c837e7b7..a17c75c29a 100644
--- a/firmware/include/buflib_malloc.h
+++ b/firmware/include/buflib_malloc.h
@@ -50,4 +50,16 @@ static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
50} 50}
51#endif 51#endif
52 52
53static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle)
54{
55 buflib_pin(ctx, handle);
56 return buflib_get_data(ctx, handle);
57}
58
59void _buflib_malloc_put_data_pinned(struct buflib_context *ctx, void *data);
60static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data)
61{
62 _buflib_malloc_put_data_pinned(ctx, data);
63}
64
53#endif /* _BUFLIB_MALLOC_H_ */ 65#endif /* _BUFLIB_MALLOC_H_ */
diff --git a/firmware/include/buflib_mempool.h b/firmware/include/buflib_mempool.h
index 4b01b629c3..448e40963a 100644
--- a/firmware/include/buflib_mempool.h
+++ b/firmware/include/buflib_mempool.h
@@ -30,6 +30,17 @@
30# error "include buflib.h instead" 30# error "include buflib.h instead"
31#endif 31#endif
32 32
33#include "system.h"
34
35/* Indices used to access block fields as block[BUFLIB_IDX_XXX] */
36enum {
37 BUFLIB_IDX_LEN, /* length of the block, must come first */
38 BUFLIB_IDX_HANDLE, /* pointer to entry in the handle table */
39 BUFLIB_IDX_OPS, /* pointer to an ops struct */
40 BUFLIB_IDX_PIN, /* pin count */
41 BUFLIB_NUM_FIELDS,
42};
43
33union buflib_data 44union buflib_data
34{ 45{
35 intptr_t val; /* length of the block in n*sizeof(union buflib_data). 46 intptr_t val; /* length of the block in n*sizeof(union buflib_data).
@@ -52,7 +63,7 @@ struct buflib_context
52 bool compact; 63 bool compact;
53}; 64};
54 65
55#define BUFLIB_ALLOC_OVERHEAD (4 * sizeof(union buflib_data)) 66#define BUFLIB_ALLOC_OVERHEAD (BUFLIB_NUM_FIELDS * sizeof(union buflib_data))
56 67
57#ifndef BUFLIB_DEBUG_GET_DATA 68#ifndef BUFLIB_DEBUG_GET_DATA
58static inline void *buflib_get_data(struct buflib_context *ctx, int handle) 69static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
@@ -61,4 +72,26 @@ static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
61} 72}
62#endif 73#endif
63 74
75static inline union buflib_data *_buflib_get_block_header(void *data)
76{
77 union buflib_data *bd = ALIGN_DOWN(data, sizeof(*bd));
78 return bd - BUFLIB_NUM_FIELDS;
79}
80
81static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle)
82{
83 void *data = buflib_get_data(ctx, handle);
84 union buflib_data *bd = _buflib_get_block_header(data);
85
86 bd[BUFLIB_IDX_PIN].pincount++;
87 return data;
88}
89
90static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data)
91{
92 (void)ctx;
93 union buflib_data *bd = _buflib_get_block_header(data);
94 bd[BUFLIB_IDX_PIN].pincount--;
95}
96
64#endif /* _BUFLIB_MEMPOOL_H_ */ 97#endif /* _BUFLIB_MEMPOOL_H_ */