From 7b4eb44395bced7073e37d0b8b0d83fb2f518482 Mon Sep 17 00:00:00 2001 From: Michael Sevakis Date: Mon, 27 Dec 2010 10:05:09 +0000 Subject: Certain data accesses in the kernel should have volatile semantics to be correct and not rely on the whims of the compiler. Change queue clearing to simply catch read up to write rather than reset both to 0 to ensure sane results for queue_count and queue_empty with concurrency. Binsize may or may not increase a bit depending upon whether the output was as intended in all places; wrong stuff was already unlikely to cause any issue. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28909 a1c6a512-1295-4272-9138-f99709370657 --- firmware/export/kernel.h | 10 +++++----- firmware/export/thread.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'firmware/export') diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h index c7fcd93284..4656d87fb2 100644 --- a/firmware/export/kernel.h +++ b/firmware/export/kernel.h @@ -106,7 +106,7 @@ struct queue_sender_list struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */ struct thread_entry *list; /* list of senders in map */ /* Send info for last message dequeued or NULL if replied or not sent */ - struct thread_entry *curr_sender; + struct thread_entry * volatile curr_sender; #ifdef HAVE_PRIORITY_SCHEDULING struct blocker blocker; #endif @@ -126,10 +126,10 @@ struct event_queue { struct thread_entry *queue; /* waiter list */ struct queue_event events[QUEUE_LENGTH]; /* list of events */ - unsigned int read; /* head of queue */ - unsigned int write; /* tail of queue */ + unsigned int volatile read; /* head of queue */ + unsigned int volatile write; /* tail of queue */ #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME - struct queue_sender_list *send; /* list of threads waiting for + struct queue_sender_list * volatile send; /* list of threads waiting for reply to an event */ #ifdef HAVE_PRIORITY_SCHEDULING struct blocker *blocker_p; /* priority inheritance info @@ -171,7 +171,7 @@ struct semaphore struct wakeup { struct thread_entry *queue; /* waiter list */ - bool signalled; /* signalled status */ + bool volatile signalled; /* signalled status */ IF_COP( struct corelock cl; ) /* multiprocessor sync */ }; #endif diff --git a/firmware/export/thread.h b/firmware/export/thread.h index 87c2d2d709..ba777dc3d1 100644 --- a/firmware/export/thread.h +++ b/firmware/export/thread.h @@ -269,7 +269,7 @@ struct thread_entry /* Only enabled when using queue_send for now */ #endif #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1 - intptr_t retval; /* Return value from a blocked operation/ + volatile intptr_t retval; /* Return value from a blocked operation/ misc. use */ #endif #ifdef HAVE_PRIORITY_SCHEDULING -- cgit v1.2.3