diff options
author | Thomas Martitz <kugel@rockbox.org> | 2011-03-05 17:48:06 +0000 |
---|---|---|
committer | Thomas Martitz <kugel@rockbox.org> | 2011-03-05 17:48:06 +0000 |
commit | cc889e9d608e6b07b78541849b7e63b6fb3f6058 (patch) | |
tree | ea01a47602b68561d294f705e8ab7669fb00ae9a /firmware/kernel.c | |
parent | 0b0f99b18ebe6305c9cab12bf8b36d154fc9c87f (diff) | |
download | rockbox-cc889e9d608e6b07b78541849b7e63b6fb3f6058.tar.gz rockbox-cc889e9d608e6b07b78541849b7e63b6fb3f6058.zip |
Change the thread api a bit.
* Remove THREAD_ID_CURRENT macro in favor of a thread_self() function, this allows thread functions to be simpler.
* thread_self_entry() shortcut for kernel.c.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29521 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r-- | firmware/kernel.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c index a8718ebf34..e0879a3c59 100644 --- a/firmware/kernel.c +++ b/firmware/kernel.c | |||
@@ -509,7 +509,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
509 | 509 | ||
510 | #ifdef HAVE_PRIORITY_SCHEDULING | 510 | #ifdef HAVE_PRIORITY_SCHEDULING |
511 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 511 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
512 | QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT), | 512 | QUEUE_GET_THREAD(q) == thread_self_entry(), |
513 | "queue_wait->wrong thread\n"); | 513 | "queue_wait->wrong thread\n"); |
514 | #endif | 514 | #endif |
515 | 515 | ||
@@ -527,7 +527,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
527 | if (rd != q->write) /* A waking message could disappear */ | 527 | if (rd != q->write) /* A waking message could disappear */ |
528 | break; | 528 | break; |
529 | 529 | ||
530 | current = thread_id_entry(THREAD_ID_CURRENT); | 530 | current = thread_self_entry(); |
531 | 531 | ||
532 | IF_COP( current->obj_cl = &q->cl; ) | 532 | IF_COP( current->obj_cl = &q->cl; ) |
533 | current->bqp = &q->queue; | 533 | current->bqp = &q->queue; |
@@ -559,7 +559,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
559 | 559 | ||
560 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 560 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
561 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 561 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
562 | QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT), | 562 | QUEUE_GET_THREAD(q) == thread_self_entry(), |
563 | "queue_wait_w_tmo->wrong thread\n"); | 563 | "queue_wait_w_tmo->wrong thread\n"); |
564 | #endif | 564 | #endif |
565 | 565 | ||
@@ -573,7 +573,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
573 | wr = q->write; | 573 | wr = q->write; |
574 | if (rd == wr && ticks > 0) | 574 | if (rd == wr && ticks > 0) |
575 | { | 575 | { |
576 | struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); | 576 | struct thread_entry *current = thread_self_entry(); |
577 | 577 | ||
578 | IF_COP( current->obj_cl = &q->cl; ) | 578 | IF_COP( current->obj_cl = &q->cl; ) |
579 | current->bqp = &q->queue; | 579 | current->bqp = &q->queue; |
@@ -658,7 +658,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
658 | { | 658 | { |
659 | struct queue_sender_list *send = q->send; | 659 | struct queue_sender_list *send = q->send; |
660 | struct thread_entry **spp = &send->senders[wr]; | 660 | struct thread_entry **spp = &send->senders[wr]; |
661 | struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); | 661 | struct thread_entry *current = thread_self_entry(); |
662 | 662 | ||
663 | if(UNLIKELY(*spp)) | 663 | if(UNLIKELY(*spp)) |
664 | { | 664 | { |
@@ -893,7 +893,7 @@ void mutex_init(struct mutex *m) | |||
893 | /* Gain ownership of a mutex object or block until it becomes free */ | 893 | /* Gain ownership of a mutex object or block until it becomes free */ |
894 | void mutex_lock(struct mutex *m) | 894 | void mutex_lock(struct mutex *m) |
895 | { | 895 | { |
896 | struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); | 896 | struct thread_entry *current = thread_self_entry(); |
897 | 897 | ||
898 | if(current == mutex_get_thread(m)) | 898 | if(current == mutex_get_thread(m)) |
899 | { | 899 | { |
@@ -932,10 +932,10 @@ void mutex_lock(struct mutex *m) | |||
932 | void mutex_unlock(struct mutex *m) | 932 | void mutex_unlock(struct mutex *m) |
933 | { | 933 | { |
934 | /* unlocker not being the owner is an unlocking violation */ | 934 | /* unlocker not being the owner is an unlocking violation */ |
935 | KERNEL_ASSERT(mutex_get_thread(m) == thread_id_entry(THREAD_ID_CURRENT), | 935 | KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), |
936 | "mutex_unlock->wrong thread (%s != %s)\n", | 936 | "mutex_unlock->wrong thread (%s != %s)\n", |
937 | mutex_get_thread(m)->name, | 937 | mutex_get_thread(m)->name, |
938 | thread_id_entry(THREAD_ID_CURRENT)->name); | 938 | thread_self_entry()->name); |
939 | 939 | ||
940 | if(m->recursion > 0) | 940 | if(m->recursion > 0) |
941 | { | 941 | { |
@@ -1019,7 +1019,7 @@ int semaphore_wait(struct semaphore *s, int timeout) | |||
1019 | else | 1019 | else |
1020 | { | 1020 | { |
1021 | /* too many waits - block until count is upped... */ | 1021 | /* too many waits - block until count is upped... */ |
1022 | struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT); | 1022 | struct thread_entry * current = thread_self_entry(); |
1023 | IF_COP( current->obj_cl = &s->cl; ) | 1023 | IF_COP( current->obj_cl = &s->cl; ) |
1024 | current->bqp = &s->queue; | 1024 | current->bqp = &s->queue; |
1025 | /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was | 1025 | /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was |