summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--firmware/buflib.c123
1 files changed, 105 insertions, 18 deletions
diff --git a/firmware/buflib.c b/firmware/buflib.c
index 6ffe9335a7..6734f21036 100644
--- a/firmware/buflib.c
+++ b/firmware/buflib.c
@@ -98,7 +98,9 @@
98#define BPANICF panicf 98#define BPANICF panicf
99 99
100/* Available paranoia checks */ 100/* Available paranoia checks */
101#define PARANOIA_CHECK_LENGTH (1 << 0) 101#define PARANOIA_CHECK_LENGTH (1 << 0)
102#define PARANOIA_CHECK_HANDLE (1 << 1)
103#define PARANOIA_CHECK_BLOCK_HANDLE (1 << 2)
102/* Bitmask of enabled paranoia checks */ 104/* Bitmask of enabled paranoia checks */
103#define BUFLIB_PARANOIA 0 105#define BUFLIB_PARANOIA 0
104 106
@@ -143,6 +145,26 @@ static union buflib_data* find_block_before(struct buflib_context *ctx,
143static void check_block_length(struct buflib_context *ctx, 145static void check_block_length(struct buflib_context *ctx,
144 union buflib_data *block); 146 union buflib_data *block);
145 147
148/* Check a handle table entry to ensure the user pointer is within the
149 * bounds of the allocated area and there is enough room for a minimum
150 * size block header.
151 *
152 * This verifies that it is safe to convert the entry's pointer to a
153 * block end pointer and dereference fields at the block end.
154 */
155static void check_handle(struct buflib_context *ctx,
156 union buflib_data *h_entry);
157
158/* Check a block's handle pointer to ensure it is within the handle
159 * table, and that the user pointer is pointing within the block.
160 *
161 * This verifies that it is safe to dereference the entry, in addition
162 * to all checks performed by check_handle(). It also ensures that the
163 * pointer in the handle table points within the block, as determined
164 * by the length field at the start of the block.
165 */
166static void check_block_handle(struct buflib_context *ctx,
167 union buflib_data *block);
146 168
147/* Initialize buffer manager */ 169/* Initialize buffer manager */
148void 170void
@@ -271,15 +293,28 @@ void handle_free(struct buflib_context *ctx, union buflib_data *handle)
271static inline 293static inline
272union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) 294union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
273{ 295{
274 union buflib_data *data = ALIGN_DOWN(buflib_get_data(ctx, handle), sizeof (*data)); 296 void *ptr = buflib_get_data(ctx, handle);
275 /* this is a valid case, e.g. during buflib_alloc_ex() when the handle 297
276 * has already been allocated but not the data */ 298 /* this is a valid case for shrinking if handle
277 if (!data) 299 * was freed by the shrink callback */
300 if (!ptr)
278 return NULL; 301 return NULL;
279 302
303 union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data));
280 return data - data[-bidx_BSIZE].val; 304 return data - data[-bidx_BSIZE].val;
281} 305}
282 306
307/* Get the block end pointer from a handle table entry */
308static union buflib_data*
309h_entry_to_block_end(struct buflib_context *ctx, union buflib_data *h_entry)
310{
311 check_handle(ctx, h_entry);
312
313 void *alloc = h_entry->alloc;
314 union buflib_data *data = ALIGN_DOWN(alloc, sizeof(*data));
315 return data;
316}
317
283/* Shrink the handle table, returning true if its size was reduced, false if 318/* Shrink the handle table, returning true if its size was reduced, false if
284 * not 319 * not
285 */ 320 */
@@ -299,13 +334,6 @@ static inline bool handle_table_shrink(struct buflib_context *ctx)
299 return handle != old_last; 334 return handle != old_last;
300} 335}
301 336
302static inline
303union buflib_data* userpointer_to_block_end(void *userpointer)
304{
305 union buflib_data *data = ALIGN_DOWN(userpointer, sizeof(*data));
306 return data;
307}
308
309static uint32_t calc_block_crc(union buflib_data *block, 337static uint32_t calc_block_crc(union buflib_data *block,
310 union buflib_data *block_end) 338 union buflib_data *block_end)
311{ 339{
@@ -327,11 +355,9 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
327 char* new_start; 355 char* new_start;
328 union buflib_data *new_block; 356 union buflib_data *new_block;
329 357
330 if (block < ctx->buf_start || block > ctx->alloc_end) 358 check_block_handle(ctx, block);
331 buflib_panic(ctx, "buflib data corrupted %p", block);
332
333 union buflib_data *h_entry = block[fidx_HANDLE].handle; 359 union buflib_data *h_entry = block[fidx_HANDLE].handle;
334 union buflib_data *block_end = userpointer_to_block_end(h_entry->alloc); 360 union buflib_data *block_end = h_entry_to_block_end(ctx, h_entry);
335 361
336 uint32_t crc = calc_block_crc(block, block_end); 362 uint32_t crc = calc_block_crc(block, block_end);
337 if (crc != block_end[-bidx_CRC].crc) 363 if (crc != block_end[-bidx_CRC].crc)
@@ -481,6 +507,7 @@ buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
481 if (!ops || !ops->shrink_callback) 507 if (!ops || !ops->shrink_callback)
482 continue; 508 continue;
483 509
510 check_block_handle(ctx, this);
484 union buflib_data* h_entry = this[fidx_HANDLE].handle; 511 union buflib_data* h_entry = this[fidx_HANDLE].handle;
485 int handle = ctx->handle_table - h_entry; 512 int handle = ctx->handle_table - h_entry;
486 513
@@ -949,6 +976,7 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
949 new_block = aligned_newstart - metadata_size.val; 976 new_block = aligned_newstart - metadata_size.val;
950 block[fidx_LEN].val = new_next_block - new_block; 977 block[fidx_LEN].val = new_next_block - new_block;
951 978
979 check_block_handle(ctx, block);
952 block[fidx_HANDLE].handle->alloc = newstart; 980 block[fidx_HANDLE].handle->alloc = newstart;
953 if (block != new_block) 981 if (block != new_block)
954 { 982 {
@@ -971,7 +999,7 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
971 999
972 /* update crc of the metadata */ 1000 /* update crc of the metadata */
973 union buflib_data *new_h_entry = new_block[fidx_HANDLE].handle; 1001 union buflib_data *new_h_entry = new_block[fidx_HANDLE].handle;
974 union buflib_data *new_block_end = userpointer_to_block_end(new_h_entry->alloc); 1002 union buflib_data *new_block_end = h_entry_to_block_end(ctx, new_h_entry);
975 new_block_end[-bidx_CRC].crc = calc_block_crc(new_block, new_block_end); 1003 new_block_end[-bidx_CRC].crc = calc_block_crc(new_block, new_block_end);
976 1004
977 /* Now deal with size changes that create free blocks after the allocation */ 1005 /* Now deal with size changes that create free blocks after the allocation */
@@ -1024,8 +1052,9 @@ void buflib_check_valid(struct buflib_context *ctx)
1024 if (block->val < 0) 1052 if (block->val < 0)
1025 continue; 1053 continue;
1026 1054
1055 check_block_handle(ctx, block);
1027 union buflib_data *h_entry = block[fidx_HANDLE].handle; 1056 union buflib_data *h_entry = block[fidx_HANDLE].handle;
1028 union buflib_data *block_end = userpointer_to_block_end(h_entry->alloc); 1057 union buflib_data *block_end = h_entry_to_block_end(ctx, h_entry);
1029 uint32_t crc = calc_block_crc(block, block_end); 1058 uint32_t crc = calc_block_crc(block, block_end);
1030 if (crc != block_end[-bidx_CRC].crc) 1059 if (crc != block_end[-bidx_CRC].crc)
1031 { 1060 {
@@ -1130,3 +1159,61 @@ static void check_block_length(struct buflib_context *ctx,
1130 } 1159 }
1131 } 1160 }
1132} 1161}
1162
1163static void check_handle(struct buflib_context *ctx,
1164 union buflib_data *h_entry)
1165{
1166 if (BUFLIB_PARANOIA & PARANOIA_CHECK_HANDLE)
1167 {
1168 /* For the pointer to be valid there needs to be room for a minimum
1169 * size block header, so we add BUFLIB_NUM_FIELDS to ctx->buf_start. */
1170 void *alloc = h_entry->alloc;
1171 void *alloc_begin = ctx->buf_start + BUFLIB_NUM_FIELDS;
1172 void *alloc_end = ctx->alloc_end;
1173 /* buflib allows zero length allocations, so alloc_end is inclusive */
1174 if (alloc < alloc_begin || alloc > alloc_end)
1175 {
1176 buflib_panic(ctx, "alloc outside buf [%p]=%p, %p-%p",
1177 h_entry, alloc, alloc_begin, alloc_end);
1178 }
1179 }
1180}
1181
1182static void check_block_handle(struct buflib_context *ctx,
1183 union buflib_data *block)
1184{
1185 if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE)
1186 {
1187 intptr_t length = block[fidx_LEN].val;
1188 union buflib_data *h_entry = block[fidx_HANDLE].handle;
1189
1190 /* Check the handle pointer is properly aligned */
1191 /* TODO: Can we ensure the compiler doesn't optimize this out?
1192 * I dunno, maybe the compiler can assume the pointer is always
1193 * properly aligned due to some C standard voodoo?? */
1194 if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry)))
1195 {
1196 buflib_panic(ctx, "handle unaligned [%p]=%p",
1197 &block[fidx_HANDLE], h_entry);
1198 }
1199
1200 /* Check the pointer is actually inside the handle table */
1201 if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table)
1202 {
1203 buflib_panic(ctx, "handle out of bounds [%p]=%p",
1204 &block[fidx_HANDLE], h_entry);
1205 }
1206
1207 /* Now check the allocation is within the block.
1208 * This is stricter than check_handle(). */
1209 void *alloc = h_entry->alloc;
1210 void *alloc_begin = block;
1211 void *alloc_end = block + length;
1212 /* buflib allows zero length allocations, so alloc_end is inclusive */
1213 if (alloc < alloc_begin || alloc > alloc_end)
1214 {
1215 buflib_panic(ctx, "alloc outside block [%p]=%p, %p-%p",
1216 h_entry, alloc, alloc_begin, alloc_end);
1217 }
1218 }
1219}