diff options
author | Aidan MacDonald <amachronic@protonmail.com> | 2023-01-14 18:20:59 +0000 |
---|---|---|
committer | Aidan MacDonald <amachronic@protonmail.com> | 2023-01-15 11:06:27 +0000 |
commit | 800bc000a08b37e22d2b36d32fd448624712a881 (patch) | |
tree | 87149cae9acb83db4a98a873b81515a99200d790 /firmware/buflib_mempool.c | |
parent | 92565e9246f3a47b90fea4a436ecfd8e7a1198b8 (diff) | |
download | rockbox-800bc000a08b37e22d2b36d32fd448624712a881.tar.gz rockbox-800bc000a08b37e22d2b36d32fd448624712a881.zip |
buflib: Add pinned get/put data functions
These are more efficient than separate pin/unpin calls because
pin count increment and decrement can be done cheaply when the
data pointer is known.
Secondly, pinned access can be made safe against preemption by
hardware interrupts or other CPU cores; buflib_get_data() can't.
This makes it more useful under different threading models and
for SMP targets; both of which are not particularly relevant to
Rockbox now, but might be in the future.
Change-Id: I09284251b83bbbc59ef88a494c8fda26a7f7ef26
Diffstat (limited to 'firmware/buflib_mempool.c')
-rw-r--r-- | firmware/buflib_mempool.c | 59 |
1 files changed, 25 insertions, 34 deletions
diff --git a/firmware/buflib_mempool.c b/firmware/buflib_mempool.c index cb35290c03..9d1c055bb9 100644 --- a/firmware/buflib_mempool.c +++ b/firmware/buflib_mempool.c | |||
@@ -107,22 +107,14 @@ | |||
107 | (PARANOIA_CHECK_LENGTH | \ | 107 | (PARANOIA_CHECK_LENGTH | \ |
108 | PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) | 108 | PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) |
109 | 109 | ||
110 | /* Indices used to access block fields as block[idx_XXX] */ | ||
111 | enum { | ||
112 | idx_LEN, /* length of the block, must come first */ | ||
113 | idx_HANDLE, /* pointer to entry in the handle table */ | ||
114 | idx_OPS, /* pointer to an ops struct */ | ||
115 | idx_PIN, /* pin count */ | ||
116 | BUFLIB_NUM_FIELDS, | ||
117 | }; | ||
118 | |||
119 | struct buflib_callbacks buflib_ops_locked = { | 110 | struct buflib_callbacks buflib_ops_locked = { |
120 | .move_callback = NULL, | 111 | .move_callback = NULL, |
121 | .shrink_callback = NULL, | 112 | .shrink_callback = NULL, |
122 | .sync_callback = NULL, | 113 | .sync_callback = NULL, |
123 | }; | 114 | }; |
124 | 115 | ||
125 | #define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback) | 116 | #define IS_MOVABLE(a) \ |
117 | (!a[BUFLIB_IDX_OPS].ops || a[BUFLIB_IDX_OPS].ops->move_callback) | ||
126 | 118 | ||
127 | static union buflib_data* find_first_free(struct buflib_context *ctx); | 119 | static union buflib_data* find_first_free(struct buflib_context *ctx); |
128 | static union buflib_data* find_block_before(struct buflib_context *ctx, | 120 | static union buflib_data* find_block_before(struct buflib_context *ctx, |
@@ -281,8 +273,7 @@ union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) | |||
281 | if (!ptr) | 273 | if (!ptr) |
282 | return NULL; | 274 | return NULL; |
283 | 275 | ||
284 | union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data)); | 276 | return _buflib_get_block_header(ptr); |
285 | return data - BUFLIB_NUM_FIELDS; | ||
286 | } | 277 | } |
287 | 278 | ||
288 | /* Shrink the handle table, returning true if its size was reduced, false if | 279 | /* Shrink the handle table, returning true if its size was reduced, false if |
@@ -318,9 +309,9 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift) | |||
318 | union buflib_data *new_block; | 309 | union buflib_data *new_block; |
319 | 310 | ||
320 | check_block_handle(ctx, block); | 311 | check_block_handle(ctx, block); |
321 | union buflib_data *h_entry = block[idx_HANDLE].handle; | 312 | union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle; |
322 | 313 | ||
323 | if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0) | 314 | if (!IS_MOVABLE(block) || block[BUFLIB_IDX_PIN].pincount > 0) |
324 | return false; | 315 | return false; |
325 | 316 | ||
326 | int handle = ctx->handle_table - h_entry; | 317 | int handle = ctx->handle_table - h_entry; |
@@ -329,7 +320,7 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift) | |||
329 | new_block = block + shift; | 320 | new_block = block + shift; |
330 | new_start = h_entry->alloc + shift*sizeof(union buflib_data); | 321 | new_start = h_entry->alloc + shift*sizeof(union buflib_data); |
331 | 322 | ||
332 | struct buflib_callbacks *ops = block[idx_OPS].ops; | 323 | struct buflib_callbacks *ops = block[BUFLIB_IDX_OPS].ops; |
333 | 324 | ||
334 | /* If move must be synchronized with use, user should have specified a | 325 | /* If move must be synchronized with use, user should have specified a |
335 | callback that handles this */ | 326 | callback that handles this */ |
@@ -459,12 +450,12 @@ buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) | |||
459 | if (this->val < 0) | 450 | if (this->val < 0) |
460 | continue; | 451 | continue; |
461 | 452 | ||
462 | struct buflib_callbacks *ops = this[idx_OPS].ops; | 453 | struct buflib_callbacks *ops = this[BUFLIB_IDX_OPS].ops; |
463 | if (!ops || !ops->shrink_callback) | 454 | if (!ops || !ops->shrink_callback) |
464 | continue; | 455 | continue; |
465 | 456 | ||
466 | check_block_handle(ctx, this); | 457 | check_block_handle(ctx, this); |
467 | union buflib_data* h_entry = this[idx_HANDLE].handle; | 458 | union buflib_data* h_entry = this[BUFLIB_IDX_HANDLE].handle; |
468 | int handle = ctx->handle_table - h_entry; | 459 | int handle = ctx->handle_table - h_entry; |
469 | 460 | ||
470 | unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; | 461 | unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; |
@@ -596,7 +587,7 @@ handle_alloc: | |||
596 | */ | 587 | */ |
597 | union buflib_data* last_block = find_block_before(ctx, | 588 | union buflib_data* last_block = find_block_before(ctx, |
598 | ctx->alloc_end, false); | 589 | ctx->alloc_end, false); |
599 | struct buflib_callbacks* ops = last_block[idx_OPS].ops; | 590 | struct buflib_callbacks* ops = last_block[BUFLIB_IDX_OPS].ops; |
600 | unsigned hints = 0; | 591 | unsigned hints = 0; |
601 | if (!ops || !ops->shrink_callback) | 592 | if (!ops || !ops->shrink_callback) |
602 | { /* the last one isn't shrinkable | 593 | { /* the last one isn't shrinkable |
@@ -666,10 +657,10 @@ buffer_alloc: | |||
666 | /* Set up the allocated block, by marking the size allocated, and storing | 657 | /* Set up the allocated block, by marking the size allocated, and storing |
667 | * a pointer to the handle. | 658 | * a pointer to the handle. |
668 | */ | 659 | */ |
669 | block[idx_LEN].val = size; | 660 | block[BUFLIB_IDX_LEN].val = size; |
670 | block[idx_HANDLE].handle = handle; | 661 | block[BUFLIB_IDX_HANDLE].handle = handle; |
671 | block[idx_OPS].ops = ops; | 662 | block[BUFLIB_IDX_OPS].ops = ops; |
672 | block[idx_PIN].pincount = 0; | 663 | block[BUFLIB_IDX_PIN].pincount = 0; |
673 | 664 | ||
674 | handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; | 665 | handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; |
675 | 666 | ||
@@ -916,10 +907,10 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne | |||
916 | metadata_size.val = aligned_oldstart - block; | 907 | metadata_size.val = aligned_oldstart - block; |
917 | /* update val and the handle table entry */ | 908 | /* update val and the handle table entry */ |
918 | new_block = aligned_newstart - metadata_size.val; | 909 | new_block = aligned_newstart - metadata_size.val; |
919 | block[idx_LEN].val = new_next_block - new_block; | 910 | block[BUFLIB_IDX_LEN].val = new_next_block - new_block; |
920 | 911 | ||
921 | check_block_handle(ctx, block); | 912 | check_block_handle(ctx, block); |
922 | block[idx_HANDLE].handle->alloc = newstart; | 913 | block[BUFLIB_IDX_HANDLE].handle->alloc = newstart; |
923 | if (block != new_block) | 914 | if (block != new_block) |
924 | { | 915 | { |
925 | /* move metadata over, i.e. pointer to handle table entry and name | 916 | /* move metadata over, i.e. pointer to handle table entry and name |
@@ -964,7 +955,7 @@ void buflib_pin(struct buflib_context *ctx, int handle) | |||
964 | buflib_panic(ctx, "invalid handle pin: %d", handle); | 955 | buflib_panic(ctx, "invalid handle pin: %d", handle); |
965 | 956 | ||
966 | union buflib_data *data = handle_to_block(ctx, handle); | 957 | union buflib_data *data = handle_to_block(ctx, handle); |
967 | data[idx_PIN].pincount++; | 958 | data[BUFLIB_IDX_PIN].pincount++; |
968 | } | 959 | } |
969 | 960 | ||
970 | void buflib_unpin(struct buflib_context *ctx, int handle) | 961 | void buflib_unpin(struct buflib_context *ctx, int handle) |
@@ -975,11 +966,11 @@ void buflib_unpin(struct buflib_context *ctx, int handle) | |||
975 | union buflib_data *data = handle_to_block(ctx, handle); | 966 | union buflib_data *data = handle_to_block(ctx, handle); |
976 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) | 967 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) |
977 | { | 968 | { |
978 | if (data[idx_PIN].pincount == 0) | 969 | if (data[BUFLIB_IDX_PIN].pincount == 0) |
979 | buflib_panic(ctx, "handle pin underflow: %d", handle); | 970 | buflib_panic(ctx, "handle pin underflow: %d", handle); |
980 | } | 971 | } |
981 | 972 | ||
982 | data[idx_PIN].pincount--; | 973 | data[BUFLIB_IDX_PIN].pincount--; |
983 | } | 974 | } |
984 | 975 | ||
985 | unsigned buflib_pin_count(struct buflib_context *ctx, int handle) | 976 | unsigned buflib_pin_count(struct buflib_context *ctx, int handle) |
@@ -988,7 +979,7 @@ unsigned buflib_pin_count(struct buflib_context *ctx, int handle) | |||
988 | buflib_panic(ctx, "invalid handle: %d", handle); | 979 | buflib_panic(ctx, "invalid handle: %d", handle); |
989 | 980 | ||
990 | union buflib_data *data = handle_to_block(ctx, handle); | 981 | union buflib_data *data = handle_to_block(ctx, handle); |
991 | return data[idx_PIN].pincount; | 982 | return data[BUFLIB_IDX_PIN].pincount; |
992 | } | 983 | } |
993 | 984 | ||
994 | #ifdef BUFLIB_DEBUG_GET_DATA | 985 | #ifdef BUFLIB_DEBUG_GET_DATA |
@@ -1062,13 +1053,13 @@ static void check_block_length(struct buflib_context *ctx, | |||
1062 | { | 1053 | { |
1063 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) | 1054 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) |
1064 | { | 1055 | { |
1065 | intptr_t length = block[idx_LEN].val; | 1056 | intptr_t length = block[BUFLIB_IDX_LEN].val; |
1066 | 1057 | ||
1067 | /* Check the block length does not pass beyond the end */ | 1058 | /* Check the block length does not pass beyond the end */ |
1068 | if (length == 0 || block > ctx->alloc_end - abs(length)) | 1059 | if (length == 0 || block > ctx->alloc_end - abs(length)) |
1069 | { | 1060 | { |
1070 | buflib_panic(ctx, "block len wacky [%p]=%ld", | 1061 | buflib_panic(ctx, "block len wacky [%p]=%ld", |
1071 | (void*)&block[idx_LEN], (long)length); | 1062 | (void*)&block[BUFLIB_IDX_LEN], (long)length); |
1072 | } | 1063 | } |
1073 | } | 1064 | } |
1074 | } | 1065 | } |
@@ -1078,8 +1069,8 @@ static void check_block_handle(struct buflib_context *ctx, | |||
1078 | { | 1069 | { |
1079 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) | 1070 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) |
1080 | { | 1071 | { |
1081 | intptr_t length = block[idx_LEN].val; | 1072 | intptr_t length = block[BUFLIB_IDX_LEN].val; |
1082 | union buflib_data *h_entry = block[idx_HANDLE].handle; | 1073 | union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle; |
1083 | 1074 | ||
1084 | /* Check the handle pointer is properly aligned */ | 1075 | /* Check the handle pointer is properly aligned */ |
1085 | /* TODO: Can we ensure the compiler doesn't optimize this out? | 1076 | /* TODO: Can we ensure the compiler doesn't optimize this out? |
@@ -1088,14 +1079,14 @@ static void check_block_handle(struct buflib_context *ctx, | |||
1088 | if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) | 1079 | if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) |
1089 | { | 1080 | { |
1090 | buflib_panic(ctx, "handle unaligned [%p]=%p", | 1081 | buflib_panic(ctx, "handle unaligned [%p]=%p", |
1091 | &block[idx_HANDLE], h_entry); | 1082 | &block[BUFLIB_IDX_HANDLE], h_entry); |
1092 | } | 1083 | } |
1093 | 1084 | ||
1094 | /* Check the pointer is actually inside the handle table */ | 1085 | /* Check the pointer is actually inside the handle table */ |
1095 | if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) | 1086 | if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) |
1096 | { | 1087 | { |
1097 | buflib_panic(ctx, "handle out of bounds [%p]=%p", | 1088 | buflib_panic(ctx, "handle out of bounds [%p]=%p", |
1098 | &block[idx_HANDLE], h_entry); | 1089 | &block[BUFLIB_IDX_HANDLE], h_entry); |
1099 | } | 1090 | } |
1100 | 1091 | ||
1101 | /* Now check the allocation is within the block. | 1092 | /* Now check the allocation is within the block. |