summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-04-24 04:09:18 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-06 02:47:47 +0200
commit533d396761b630e372166f6f0522ba1c2d128d70 (patch)
tree823a5f800049f62d4ea9f573b4cdeb3e7ff9b3e1 /firmware/kernel/thread.c
parent6536f1db3eedf0a12d16c5504cba94725eb6500d (diff)
downloadrockbox-533d396761b630e372166f6f0522ba1c2d128d70.tar.gz
rockbox-533d396761b630e372166f6f0522ba1c2d128d70.zip
Add multi-reader, single-writer locks to kernel.
Any number of readers may be in the critical section at a time and writers are mutually exclusive to all other threads. They are a better choice when data is rarely modified but often read and multiple threads can safely access it for reading. Priority inheritance is fully implemented along with other changes to the kernel to fully support it on multiowner objects. This also cleans up priority code in the kernel and updates some associated structures in existing objects to the cleaner form. Currently doesn't add the mrsw_lock.[ch] files since they're not yet needed by anything but the supporting improvements are still useful. This includes a typed bitarray API (bitarray.h) which is pretty basic for now. Change-Id: Idbe43dcd9170358e06d48d00f1c69728ff45b0e3 Reviewed-on: http://gerrit.rockbox.org/801 Reviewed-by: Michael Sevakis <jethead71@rockbox.org> Tested: Michael Sevakis <jethead71@rockbox.org>
Diffstat (limited to 'firmware/kernel/thread.c')
-rw-r--r--firmware/kernel/thread.c961
1 files changed, 502 insertions, 459 deletions
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
index 43ff584a68..0a47f97e93 100644
--- a/firmware/kernel/thread.c
+++ b/firmware/kernel/thread.c
@@ -246,13 +246,13 @@ static void thread_stkov(struct thread_entry *thread)
246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) 246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
247#else 247#else
248#define LOCK_THREAD(thread) \ 248#define LOCK_THREAD(thread) \
249 ({ }) 249 ({ (void)(thread); })
250#define TRY_LOCK_THREAD(thread) \ 250#define TRY_LOCK_THREAD(thread) \
251 ({ }) 251 ({ (void)(thread); })
252#define UNLOCK_THREAD(thread) \ 252#define UNLOCK_THREAD(thread) \
253 ({ }) 253 ({ (void)(thread); })
254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
255 ({ }) 255 ({ (void)(thread); })
256#endif 256#endif
257 257
258/* RTR list */ 258/* RTR list */
@@ -279,6 +279,100 @@ static void thread_stkov(struct thread_entry *thread)
279#define rtr_move_entry_inl(core, from, to) 279#define rtr_move_entry_inl(core, from, to)
280#endif 280#endif
281 281
282static inline void thread_store_context(struct thread_entry *thread)
283{
284#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
285 thread->__errno = errno;
286#endif
287 store_context(&thread->context);
288}
289
290static inline void thread_load_context(struct thread_entry *thread)
291{
292 load_context(&thread->context);
293#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
294 errno = thread->__errno;
295#endif
296}
297
298static inline unsigned int should_switch_tasks(void)
299{
300 unsigned int result = THREAD_OK;
301
302#ifdef HAVE_PRIORITY_SCHEDULING
303 struct thread_entry *current = cores[CURRENT_CORE].running;
304 if (current &&
305 priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask)
306 < current->priority)
307 {
308 /* There is a thread ready to run of higher priority on the same
309 * core as the current one; recommend a task switch. */
310 result |= THREAD_SWITCH;
311 }
312#endif /* HAVE_PRIORITY_SCHEDULING */
313
314 return result;
315}
316
317#ifdef HAVE_PRIORITY_SCHEDULING
318/*---------------------------------------------------------------------------
319 * Locks the thread registered as the owner of the block and makes sure it
320 * didn't change in the meantime
321 *---------------------------------------------------------------------------
322 */
323#if NUM_CORES == 1
324static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
325{
326 return bl->thread;
327}
328#else /* NUM_CORES > 1 */
329static struct thread_entry * lock_blocker_thread(struct blocker *bl)
330{
331 /* The blocker thread may change during the process of trying to
332 capture it */
333 while (1)
334 {
335 struct thread_entry *t = bl->thread;
336
337 /* TRY, or else deadlocks are possible */
338 if (!t)
339 {
340 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
341 if (corelock_try_lock(&blsplay->cl))
342 {
343 if (!bl->thread)
344 return NULL; /* Still multi */
345
346 corelock_unlock(&blsplay->cl);
347 }
348 }
349 else
350 {
351 if (TRY_LOCK_THREAD(t))
352 {
353 if (bl->thread == t)
354 return t;
355
356 UNLOCK_THREAD(t);
357 }
358 }
359 }
360}
361#endif /* NUM_CORES */
362
363static inline void unlock_blocker_thread(struct blocker *bl)
364{
365#if NUM_CORES > 1
366 struct thread_entry *blt = bl->thread;
367 if (blt)
368 UNLOCK_THREAD(blt);
369 else
370 corelock_unlock(&((struct blocker_splay *)bl)->cl);
371#endif /* NUM_CORES > 1*/
372 (void)bl;
373}
374#endif /* HAVE_PRIORITY_SCHEDULING */
375
282/*--------------------------------------------------------------------------- 376/*---------------------------------------------------------------------------
283 * Thread list structure - circular: 377 * Thread list structure - circular:
284 * +------------------------------+ 378 * +------------------------------+
@@ -420,7 +514,6 @@ static void remove_from_list_tmo(struct thread_entry *thread)
420 } 514 }
421} 515}
422 516
423
424#ifdef HAVE_PRIORITY_SCHEDULING 517#ifdef HAVE_PRIORITY_SCHEDULING
425/*--------------------------------------------------------------------------- 518/*---------------------------------------------------------------------------
426 * Priority distribution structure (one category for each possible priority): 519 * Priority distribution structure (one category for each possible priority):
@@ -476,19 +569,9 @@ static void remove_from_list_tmo(struct thread_entry *thread)
476static inline unsigned int prio_add_entry( 569static inline unsigned int prio_add_entry(
477 struct priority_distribution *pd, int priority) 570 struct priority_distribution *pd, int priority)
478{ 571{
479 unsigned int count; 572 unsigned int count = ++pd->hist[priority];
480 /* Enough size/instruction count difference for ARM makes it worth it to 573 if (count == 1)
481 * use different code (192 bytes for ARM). Only thing better is ASM. */ 574 priobit_set_bit(&pd->mask, priority);
482#ifdef CPU_ARM
483 count = pd->hist[priority];
484 if (++count == 1)
485 pd->mask |= 1 << priority;
486 pd->hist[priority] = count;
487#else /* This one's better for Coldfire */
488 if ((count = ++pd->hist[priority]) == 1)
489 pd->mask |= 1 << priority;
490#endif
491
492 return count; 575 return count;
493} 576}
494 577
@@ -499,18 +582,9 @@ static inline unsigned int prio_add_entry(
499static inline unsigned int prio_subtract_entry( 582static inline unsigned int prio_subtract_entry(
500 struct priority_distribution *pd, int priority) 583 struct priority_distribution *pd, int priority)
501{ 584{
502 unsigned int count; 585 unsigned int count = --pd->hist[priority];
503 586 if (count == 0)
504#ifdef CPU_ARM 587 priobit_clear_bit(&pd->mask, priority);
505 count = pd->hist[priority];
506 if (--count == 0)
507 pd->mask &= ~(1 << priority);
508 pd->hist[priority] = count;
509#else
510 if ((count = --pd->hist[priority]) == 0)
511 pd->mask &= ~(1 << priority);
512#endif
513
514 return count; 588 return count;
515} 589}
516 590
@@ -521,31 +595,38 @@ static inline unsigned int prio_subtract_entry(
521static inline void prio_move_entry( 595static inline void prio_move_entry(
522 struct priority_distribution *pd, int from, int to) 596 struct priority_distribution *pd, int from, int to)
523{ 597{
524 uint32_t mask = pd->mask; 598 if (--pd->hist[from] == 0)
599 priobit_clear_bit(&pd->mask, from);
600
601 if (++pd->hist[to] == 1)
602 priobit_set_bit(&pd->mask, to);
603}
604#endif /* HAVE_PRIORITY_SCHEDULING */
605
606/*---------------------------------------------------------------------------
607 * Move a thread back to a running state on its core.
608 *---------------------------------------------------------------------------
609 */
610static void core_schedule_wakeup(struct thread_entry *thread)
611{
612 const unsigned int core = IF_COP_CORE(thread->core);
525 613
526#ifdef CPU_ARM 614 RTR_LOCK(core);
527 unsigned int count;
528 615
529 count = pd->hist[from]; 616 thread->state = STATE_RUNNING;
530 if (--count == 0)
531 mask &= ~(1 << from);
532 pd->hist[from] = count;
533 617
534 count = pd->hist[to]; 618 add_to_list_l(&cores[core].running, thread);
535 if (++count == 1) 619 rtr_add_entry(core, thread->priority);
536 mask |= 1 << to;
537 pd->hist[to] = count;
538#else
539 if (--pd->hist[from] == 0)
540 mask &= ~(1 << from);
541 620
542 if (++pd->hist[to] == 1) 621 RTR_UNLOCK(core);
543 mask |= 1 << to;
544#endif
545 622
546 pd->mask = mask; 623#if NUM_CORES > 1
624 if (core != CURRENT_CORE)
625 core_wake(core);
626#endif
547} 627}
548 628
629#ifdef HAVE_PRIORITY_SCHEDULING
549/*--------------------------------------------------------------------------- 630/*---------------------------------------------------------------------------
550 * Change the priority and rtr entry for a running thread 631 * Change the priority and rtr entry for a running thread
551 *--------------------------------------------------------------------------- 632 *---------------------------------------------------------------------------
@@ -605,191 +686,211 @@ static int find_highest_priority_in_list_l(
605 * those are prevented, right? :-) 686 * those are prevented, right? :-)
606 *--------------------------------------------------------------------------- 687 *---------------------------------------------------------------------------
607 */ 688 */
608static struct thread_entry * 689static void inherit_priority(
609 blocker_inherit_priority(struct thread_entry *current) 690 struct blocker * const blocker0, struct blocker *bl,
691 struct thread_entry *blt, int newblpr)
610{ 692{
611 const int priority = current->priority; 693 int oldblpr = bl->priority;
612 struct blocker *bl = current->blocker;
613 struct thread_entry * const tstart = current;
614 struct thread_entry *bl_t = bl->thread;
615
616 /* Blocker cannot change since the object protection is held */
617 LOCK_THREAD(bl_t);
618 694
619 for (;;) 695 while (1)
620 { 696 {
621 struct thread_entry *next; 697 if (blt == NULL)
622 int bl_pr = bl->priority; 698 {
699 /* Multiple owners */
700 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
701
702 /* Recurse down the all the branches of this; it's the only way.
703 We might meet the same queue several times if more than one of
704 these threads is waiting the same queue. That isn't a problem
705 for us since we early-terminate, just notable. */
706 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
707 {
708 bl->priority = oldblpr; /* To see the change each time */
709 blt = &threads[slotnum];
710 LOCK_THREAD(blt);
711 inherit_priority(blocker0, bl, blt, newblpr);
712 }
623 713
624 if (priority >= bl_pr) 714 corelock_unlock(&blsplay->cl);
625 break; /* Object priority already high enough */ 715 return;
716 }
626 717
627 bl->priority = priority; 718 bl->priority = newblpr;
628 719
629 /* Add this one */ 720 /* Update blocker thread inheritance record */
630 prio_add_entry(&bl_t->pdist, priority); 721 if (newblpr < PRIORITY_IDLE)
722 prio_add_entry(&blt->pdist, newblpr);
631 723
632 if (bl_pr < PRIORITY_IDLE) 724 if (oldblpr < PRIORITY_IDLE)
633 { 725 prio_subtract_entry(&blt->pdist, oldblpr);
634 /* Not first waiter - subtract old one */
635 prio_subtract_entry(&bl_t->pdist, bl_pr);
636 }
637 726
638 if (priority >= bl_t->priority) 727 int oldpr = blt->priority;
639 break; /* Thread priority high enough */ 728 int newpr = priobit_ffs(&blt->pdist.mask);
729 if (newpr == oldpr)
730 break; /* No blocker thread priority change */
640 731
641 if (bl_t->state == STATE_RUNNING) 732 if (blt->state == STATE_RUNNING)
642 { 733 {
643 /* Blocking thread is a running thread therefore there are no 734 set_running_thread_priority(blt, newpr);
644 * further blockers. Change the "run queue" on which it 735 break; /* Running: last in chain */
645 * resides. */
646 set_running_thread_priority(bl_t, priority);
647 break;
648 } 736 }
649 737
650 bl_t->priority = priority; 738 /* Blocker is blocked */
739 blt->priority = newpr;
651 740
652 /* If blocking thread has a blocker, apply transitive inheritance */ 741 bl = blt->blocker;
653 bl = bl_t->blocker; 742 if (LIKELY(bl == NULL))
743 break; /* Block doesn't support PIP */
654 744
655 if (bl == NULL) 745 if (UNLIKELY(bl == blocker0))
656 break; /* End of chain or object doesn't support inheritance */ 746 break; /* Full circle - deadlock! */
657 747
658 next = bl->thread; 748 /* Blocker becomes current thread and the process repeats */
749 struct thread_entry **bqp = blt->bqp;
750 struct thread_entry *t = blt;
751 blt = lock_blocker_thread(bl);
659 752
660 if (UNLIKELY(next == tstart)) 753 UNLOCK_THREAD(t);
661 break; /* Full-circle - deadlock! */
662 754
663 UNLOCK_THREAD(current); 755 /* Adjust this wait queue */
756 oldblpr = bl->priority;
757 if (newpr <= oldblpr)
758 newblpr = newpr;
759 else if (oldpr <= oldblpr)
760 newblpr = find_highest_priority_in_list_l(*bqp);
664 761
665#if NUM_CORES > 1 762 if (newblpr == oldblpr)
666 for (;;) 763 break; /* Queue priority not changing */
667 { 764 }
668 LOCK_THREAD(next);
669 765
670 /* Blocker could change - retest condition */ 766 UNLOCK_THREAD(blt);
671 if (LIKELY(bl->thread == next)) 767}
672 break;
673 768
674 UNLOCK_THREAD(next); 769/*---------------------------------------------------------------------------
675 next = bl->thread; 770 * Quick-disinherit of priority elevation. 'thread' must be a running thread.
676 } 771 *---------------------------------------------------------------------------
677#endif 772 */
678 current = bl_t; 773static void priority_disinherit_internal(struct thread_entry *thread,
679 bl_t = next; 774 int blpr)
775{
776 if (blpr < PRIORITY_IDLE &&
777 prio_subtract_entry(&thread->pdist, blpr) == 0 &&
778 blpr <= thread->priority)
779 {
780 int priority = priobit_ffs(&thread->pdist.mask);
781 if (priority != thread->priority)
782 set_running_thread_priority(thread, priority);
680 } 783 }
784}
681 785
682 UNLOCK_THREAD(bl_t); 786void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
683 787{
684 return current; 788 LOCK_THREAD(thread);
789 priority_disinherit_internal(thread, bl->priority);
790 UNLOCK_THREAD(thread);
685} 791}
686 792
687/*--------------------------------------------------------------------------- 793/*---------------------------------------------------------------------------
688 * Readjust priorities when waking a thread blocked waiting for another 794 * Transfer ownership from a single owner to a multi-owner splay from a wait
689 * in essence "releasing" the thread's effect on the object owner. Can be 795 * queue
690 * performed from any context.
691 *--------------------------------------------------------------------------- 796 *---------------------------------------------------------------------------
692 */ 797 */
693struct thread_entry * 798static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
694 wakeup_priority_protocol_release(struct thread_entry *thread)
695{ 799{
696 const int priority = thread->priority; 800 /* All threads will have the same blocker and queue; only we are changing
697 struct blocker *bl = thread->blocker; 801 it now */
698 struct thread_entry * const tstart = thread; 802 struct thread_entry **bqp = thread->bqp;
699 struct thread_entry *bl_t = bl->thread; 803 struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker;
804 struct thread_entry *blt = blsplay->blocker.thread;
805
806 /* The first thread is already locked and is assumed tagged "multi" */
807 int count = 1;
808 struct thread_entry *temp_queue = NULL;
809
810 /* 'thread' is locked on entry */
811 while (1)
812 {
813 LOCK_THREAD(blt);
700 814
701 /* Blocker cannot change since object will be locked */ 815 remove_from_list_l(bqp, thread);
702 LOCK_THREAD(bl_t); 816 thread->blocker = NULL;
703 817
704 thread->blocker = NULL; /* Thread not blocked */ 818 struct thread_entry *tnext = *bqp;
819 if (tnext == NULL || tnext->retval == 0)
820 break;
705 821
706 for (;;) 822 add_to_list_l(&temp_queue, thread);
707 {
708 struct thread_entry *next;
709 int bl_pr = bl->priority;
710 823
711 if (priority > bl_pr) 824 UNLOCK_THREAD(thread);
712 break; /* Object priority higher */ 825 UNLOCK_THREAD(blt);
713 826
714 next = *thread->bqp; 827 count++;
828 thread = tnext;
715 829
716 if (next == NULL) 830 LOCK_THREAD(thread);
717 { 831 }
718 /* No more threads in queue */
719 prio_subtract_entry(&bl_t->pdist, bl_pr);
720 bl->priority = PRIORITY_IDLE;
721 }
722 else
723 {
724 /* Check list for highest remaining priority */
725 int queue_pr = find_highest_priority_in_list_l(next);
726 832
727 if (queue_pr == bl_pr) 833 int blpr = blsplay->blocker.priority;
728 break; /* Object priority not changing */ 834 priority_disinherit_internal(blt, blpr);
729 835
730 /* Change queue priority */ 836 /* Locking order reverses here since the threads are no longer on the
731 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr); 837 queue side */
732 bl->priority = queue_pr; 838 if (count > 1)
733 } 839 {
840 add_to_list_l(&temp_queue, thread);
841 UNLOCK_THREAD(thread);
842 corelock_lock(&blsplay->cl);
843
844 blpr = find_highest_priority_in_list_l(*bqp);
845 blsplay->blocker.thread = NULL;
734 846
735 if (bl_pr > bl_t->priority) 847 thread = temp_queue;
736 break; /* thread priority is higher */ 848 LOCK_THREAD(thread);
849 }
850 else
851 {
852 /* Becomes a simple, direct transfer */
853 if (thread->priority <= blpr)
854 blpr = find_highest_priority_in_list_l(*bqp);
855 blsplay->blocker.thread = thread;
856 }
737 857
738 bl_pr = find_first_set_bit(bl_t->pdist.mask); 858 blsplay->blocker.priority = blpr;
739 859
740 if (bl_pr == bl_t->priority) 860 while (1)
741 break; /* Thread priority not changing */ 861 {
862 unsigned int slotnum = THREAD_ID_SLOT(thread->id);
863 threadbit_set_bit(&blsplay->mask, slotnum);
742 864
743 if (bl_t->state == STATE_RUNNING) 865 if (blpr < PRIORITY_IDLE)
744 { 866 {
745 /* No further blockers */ 867 prio_add_entry(&thread->pdist, blpr);
746 set_running_thread_priority(bl_t, bl_pr); 868 if (blpr < thread->priority)
747 break; 869 thread->priority = blpr;
748 } 870 }
749 871
750 bl_t->priority = bl_pr; 872 if (count > 1)
751 873 remove_from_list_l(&temp_queue, thread);
752 /* If blocking thread has a blocker, apply transitive inheritance */
753 bl = bl_t->blocker;
754 874
755 if (bl == NULL) 875 core_schedule_wakeup(thread);
756 break; /* End of chain or object doesn't support inheritance */
757
758 next = bl->thread;
759
760 if (UNLIKELY(next == tstart))
761 break; /* Full-circle - deadlock! */
762 876
763 UNLOCK_THREAD(thread); 877 UNLOCK_THREAD(thread);
764 878
765#if NUM_CORES > 1 879 thread = temp_queue;
766 for (;;) 880 if (thread == NULL)
767 { 881 break;
768 LOCK_THREAD(next);
769
770 /* Blocker could change - retest condition */
771 if (LIKELY(bl->thread == next))
772 break;
773 882
774 UNLOCK_THREAD(next); 883 LOCK_THREAD(thread);
775 next = bl->thread;
776 }
777#endif
778 thread = bl_t;
779 bl_t = next;
780 } 884 }
781 885
782 UNLOCK_THREAD(bl_t); 886 UNLOCK_THREAD(blt);
783 887
784#if NUM_CORES > 1 888 if (count > 1)
785 if (UNLIKELY(thread != tstart))
786 { 889 {
787 /* Relock original if it changed */ 890 corelock_unlock(&blsplay->cl);
788 LOCK_THREAD(tstart);
789 } 891 }
790#endif
791 892
792 return cores[CURRENT_CORE].running; 893 blt->retval = count;
793} 894}
794 895
795/*--------------------------------------------------------------------------- 896/*---------------------------------------------------------------------------
@@ -801,67 +902,95 @@ struct thread_entry *
801 * it is the running thread is made. 902 * it is the running thread is made.
802 *--------------------------------------------------------------------------- 903 *---------------------------------------------------------------------------
803 */ 904 */
804struct thread_entry * 905static void wakeup_thread_transfer(struct thread_entry *thread)
805 wakeup_priority_protocol_transfer(struct thread_entry *thread)
806{ 906{
807 /* Waking thread inherits priority boost from object owner */ 907 /* Waking thread inherits priority boost from object owner (blt) */
808 struct blocker *bl = thread->blocker; 908 struct blocker *bl = thread->blocker;
809 struct thread_entry *bl_t = bl->thread; 909 struct thread_entry *blt = bl->thread;
810 struct thread_entry *next;
811 int bl_pr;
812 910
813 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t, 911 THREAD_ASSERT(cores[CURRENT_CORE].running == blt,
814 "UPPT->wrong thread", cores[CURRENT_CORE].running); 912 "UPPT->wrong thread", cores[CURRENT_CORE].running);
815 913
816 LOCK_THREAD(bl_t); 914 LOCK_THREAD(blt);
915
916 struct thread_entry **bqp = thread->bqp;
917 remove_from_list_l(bqp, thread);
918 thread->blocker = NULL;
817 919
818 bl_pr = bl->priority; 920 int blpr = bl->priority;
819 921
820 /* Remove the object's boost from the owning thread */ 922 /* Remove the object's boost from the owning thread */
821 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 && 923 if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority)
822 bl_pr <= bl_t->priority)
823 { 924 {
824 /* No more threads at this priority are waiting and the old level is 925 /* No more threads at this priority are waiting and the old level is
825 * at least the thread level */ 926 * at least the thread level */
826 int priority = find_first_set_bit(bl_t->pdist.mask); 927 int priority = priobit_ffs(&blt->pdist.mask);
827 928 if (priority != blt->priority)
828 if (priority != bl_t->priority) 929 set_running_thread_priority(blt, priority);
829 {
830 /* Adjust this thread's priority */
831 set_running_thread_priority(bl_t, priority);
832 }
833 } 930 }
834 931
835 next = *thread->bqp; 932 struct thread_entry *tnext = *bqp;
836 933
837 if (LIKELY(next == NULL)) 934 if (LIKELY(tnext == NULL))
838 { 935 {
839 /* Expected shortcut - no more waiters */ 936 /* Expected shortcut - no more waiters */
840 bl_pr = PRIORITY_IDLE; 937 blpr = PRIORITY_IDLE;
841 } 938 }
842 else 939 else
843 { 940 {
844 if (thread->priority <= bl_pr) 941 /* If lowering, we need to scan threads remaining in queue */
845 { 942 int priority = thread->priority;
846 /* Need to scan threads remaining in queue */ 943 if (priority <= blpr)
847 bl_pr = find_highest_priority_in_list_l(next); 944 blpr = find_highest_priority_in_list_l(tnext);
848 }
849 945
850 if (prio_add_entry(&thread->pdist, bl_pr) == 1 && 946 if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority)
851 bl_pr < thread->priority) 947 thread->priority = blpr; /* Raise new owner */
852 {
853 /* Thread priority must be raised */
854 thread->priority = bl_pr;
855 }
856 } 948 }
857 949
858 bl->thread = thread; /* This thread pwns */ 950 core_schedule_wakeup(thread);
859 bl->priority = bl_pr; /* Save highest blocked priority */ 951 UNLOCK_THREAD(thread);
860 thread->blocker = NULL; /* Thread not blocked */ 952
953 bl->thread = thread; /* This thread pwns */
954 bl->priority = blpr; /* Save highest blocked priority */
955 UNLOCK_THREAD(blt);
956}
957
958/*---------------------------------------------------------------------------
959 * Readjust priorities when waking a thread blocked waiting for another
960 * in essence "releasing" the thread's effect on the object owner. Can be
961 * performed from any context.
962 *---------------------------------------------------------------------------
963 */
964static void wakeup_thread_release(struct thread_entry *thread)
965{
966 struct blocker *bl = thread->blocker;
967 struct thread_entry *blt = lock_blocker_thread(bl);
968 struct thread_entry **bqp = thread->bqp;
969 remove_from_list_l(bqp, thread);
970 thread->blocker = NULL;
971
972 /* Off to see the wizard... */
973 core_schedule_wakeup(thread);
974
975 if (thread->priority > bl->priority)
976 {
977 /* Queue priority won't change */
978 UNLOCK_THREAD(thread);
979 unlock_blocker_thread(bl);
980 return;
981 }
982
983 UNLOCK_THREAD(thread);
861 984
862 UNLOCK_THREAD(bl_t); 985 int newblpr = find_highest_priority_in_list_l(*bqp);
986 if (newblpr == bl->priority)
987 {
988 /* Blocker priority won't change */
989 unlock_blocker_thread(bl);
990 return;
991 }
863 992
864 return bl_t; 993 inherit_priority(bl, bl, blt, newblpr);
865} 994}
866 995
867/*--------------------------------------------------------------------------- 996/*---------------------------------------------------------------------------
@@ -877,9 +1006,8 @@ static void __attribute__((noinline)) check_for_obj_waiters(
877{ 1006{
878 /* Only one bit in the mask should be set with a frequency on 1 which 1007 /* Only one bit in the mask should be set with a frequency on 1 which
879 * represents the thread's own base priority */ 1008 * represents the thread's own base priority */
880 uint32_t mask = thread->pdist.mask; 1009 if (priobit_popcount(&thread->pdist.mask) != 1 ||
881 if ((mask & (mask - 1)) != 0 || 1010 thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1)
882 thread->pdist.hist[find_first_set_bit(mask)] > 1)
883 { 1011 {
884 unsigned char name[32]; 1012 unsigned char name[32];
885 thread_get_name(name, 32, thread); 1013 thread_get_name(name, 32, thread);
@@ -889,26 +1017,72 @@ static void __attribute__((noinline)) check_for_obj_waiters(
889#endif /* HAVE_PRIORITY_SCHEDULING */ 1017#endif /* HAVE_PRIORITY_SCHEDULING */
890 1018
891/*--------------------------------------------------------------------------- 1019/*---------------------------------------------------------------------------
892 * Move a thread back to a running state on its core. 1020 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1021 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1022 *
1023 * This code should be considered a critical section by the caller meaning
1024 * that the object's corelock should be held.
1025 *
1026 * INTERNAL: Intended for use by kernel objects and not for programs.
893 *--------------------------------------------------------------------------- 1027 *---------------------------------------------------------------------------
894 */ 1028 */
895static void core_schedule_wakeup(struct thread_entry *thread) 1029unsigned int wakeup_thread_(struct thread_entry **list
1030 IF_PRIO(, enum wakeup_thread_protocol proto))
896{ 1031{
897 const unsigned int core = IF_COP_CORE(thread->core); 1032 struct thread_entry *thread = *list;
898 1033
899 RTR_LOCK(core); 1034 /* Check if there is a blocked thread at all. */
1035 if (*list == NULL)
1036 return THREAD_NONE;
900 1037
901 thread->state = STATE_RUNNING; 1038 LOCK_THREAD(thread);
902 1039
903 add_to_list_l(&cores[core].running, thread); 1040 /* Determine thread's current state. */
904 rtr_add_entry(core, thread->priority); 1041 switch (thread->state)
1042 {
1043 case STATE_BLOCKED:
1044 case STATE_BLOCKED_W_TMO:
1045#ifdef HAVE_PRIORITY_SCHEDULING
1046 /* Threads with PIP blockers cannot specify "WAKEUP_DEFAULT" */
1047 if (thread->blocker != NULL)
1048 {
1049 static void (* const funcs[])(struct thread_entry *thread)
1050 ICONST_ATTR =
1051 {
1052 [WAKEUP_DEFAULT] = NULL,
1053 [WAKEUP_TRANSFER] = wakeup_thread_transfer,
1054 [WAKEUP_RELEASE] = wakeup_thread_release,
1055 [WAKEUP_TRANSFER_MULTI] = wakeup_thread_queue_multi_transfer,
1056 };
1057
1058 /* Call the specified unblocking PIP (does the rest) */
1059 funcs[proto](thread);
1060 }
1061 else
1062#endif /* HAVE_PRIORITY_SCHEDULING */
1063 {
1064 /* No PIP - just boost the thread by aging */
1065#ifdef HAVE_PRIORITY_SCHEDULING
1066 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1067 thread->skip_count = thread->priority;
1068#endif /* HAVE_PRIORITY_SCHEDULING */
1069 remove_from_list_l(list, thread);
1070 core_schedule_wakeup(thread);
1071 UNLOCK_THREAD(thread);
1072 }
905 1073
906 RTR_UNLOCK(core); 1074 return should_switch_tasks();
907 1075
908#if NUM_CORES > 1 1076 /* Nothing to do. State is not blocked. */
909 if (core != CURRENT_CORE) 1077 default:
910 core_wake(core); 1078#if THREAD_EXTRA_CHECKS
1079 THREAD_PANICF("wakeup_thread->block invalid", thread);
1080 case STATE_RUNNING:
1081 case STATE_KILLED:
911#endif 1082#endif
1083 UNLOCK_THREAD(thread);
1084 return THREAD_NONE;
1085 }
912} 1086}
913 1087
914/*--------------------------------------------------------------------------- 1088/*---------------------------------------------------------------------------
@@ -990,8 +1164,6 @@ void check_tmo_threads(void)
990 } 1164 }
991#endif /* NUM_CORES */ 1165#endif /* NUM_CORES */
992 1166
993 remove_from_list_l(curr->bqp, curr);
994
995#ifdef HAVE_WAKEUP_EXT_CB 1167#ifdef HAVE_WAKEUP_EXT_CB
996 if (curr->wakeup_ext_cb != NULL) 1168 if (curr->wakeup_ext_cb != NULL)
997 curr->wakeup_ext_cb(curr); 1169 curr->wakeup_ext_cb(curr);
@@ -999,8 +1171,11 @@ void check_tmo_threads(void)
999 1171
1000#ifdef HAVE_PRIORITY_SCHEDULING 1172#ifdef HAVE_PRIORITY_SCHEDULING
1001 if (curr->blocker != NULL) 1173 if (curr->blocker != NULL)
1002 wakeup_priority_protocol_release(curr); 1174 wakeup_thread_release(curr);
1175 else
1003#endif 1176#endif
1177 remove_from_list_l(curr->bqp, curr);
1178
1004 corelock_unlock(ocl); 1179 corelock_unlock(ocl);
1005 } 1180 }
1006 /* else state == STATE_SLEEPING */ 1181 /* else state == STATE_SLEEPING */
@@ -1161,8 +1336,7 @@ void switch_thread(void)
1161 /* Begin task switching by saving our current context so that we can 1336 /* Begin task switching by saving our current context so that we can
1162 * restore the state of the current thread later to the point prior 1337 * restore the state of the current thread later to the point prior
1163 * to this call. */ 1338 * to this call. */
1164 store_context(&thread->context); 1339 thread_store_context(thread);
1165
1166#ifdef DEBUG 1340#ifdef DEBUG
1167 /* Check core_ctx buflib integrity */ 1341 /* Check core_ctx buflib integrity */
1168 core_check_valid(); 1342 core_check_valid();
@@ -1212,8 +1386,7 @@ void switch_thread(void)
1212 /* Select the new task based on priorities and the last time a 1386 /* Select the new task based on priorities and the last time a
1213 * process got CPU time relative to the highest priority runnable 1387 * process got CPU time relative to the highest priority runnable
1214 * task. */ 1388 * task. */
1215 struct priority_distribution *pd = &cores[core].rtr; 1389 int max = priobit_ffs(&cores[core].rtr.mask);
1216 int max = find_first_set_bit(pd->mask);
1217 1390
1218 if (block == NULL) 1391 if (block == NULL)
1219 { 1392 {
@@ -1269,7 +1442,7 @@ void switch_thread(void)
1269 } 1442 }
1270 1443
1271 /* And finally give control to the next thread. */ 1444 /* And finally give control to the next thread. */
1272 load_context(&thread->context); 1445 thread_load_context(thread);
1273 1446
1274#ifdef RB_PROFILE 1447#ifdef RB_PROFILE
1275 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); 1448 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
@@ -1291,140 +1464,59 @@ void sleep_thread(int ticks)
1291 LOCK_THREAD(current); 1464 LOCK_THREAD(current);
1292 1465
1293 /* Set our timeout, remove from run list and join timeout list. */ 1466 /* Set our timeout, remove from run list and join timeout list. */
1294 current->tmo_tick = current_tick + ticks + 1; 1467 current->tmo_tick = current_tick + MAX(ticks, 0) + 1;
1295 block_thread_on_l(current, STATE_SLEEPING); 1468 block_thread_on_l(current, STATE_SLEEPING);
1296 1469
1297 UNLOCK_THREAD(current); 1470 UNLOCK_THREAD(current);
1298} 1471}
1299 1472
1300/*--------------------------------------------------------------------------- 1473/*---------------------------------------------------------------------------
1301 * Indefinitely block a thread on a blocking queue for explicit wakeup. 1474 * Block a thread on a blocking queue for explicit wakeup. If timeout is
1475 * negative, the block is infinite.
1302 * 1476 *
1303 * INTERNAL: Intended for use by kernel objects and not for programs. 1477 * INTERNAL: Intended for use by kernel objects and not for programs.
1304 *--------------------------------------------------------------------------- 1478 *---------------------------------------------------------------------------
1305 */ 1479 */
1306void block_thread(struct thread_entry *current) 1480void block_thread(struct thread_entry *current, int timeout)
1307{ 1481{
1308 /* Set the state to blocked and take us off of the run queue until we
1309 * are explicitly woken */
1310 LOCK_THREAD(current); 1482 LOCK_THREAD(current);
1311 1483
1312 /* Set the list for explicit wakeup */ 1484 struct blocker *bl = NULL;
1313 block_thread_on_l(current, STATE_BLOCKED);
1314
1315#ifdef HAVE_PRIORITY_SCHEDULING 1485#ifdef HAVE_PRIORITY_SCHEDULING
1316 if (current->blocker != NULL) 1486 bl = current->blocker;
1487 struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL;
1488#endif /* HAVE_PRIORITY_SCHEDULING */
1489
1490 if (LIKELY(timeout < 0))
1317 { 1491 {
1318 /* Object supports PIP */ 1492 /* Block until explicitly woken */
1319 current = blocker_inherit_priority(current); 1493 block_thread_on_l(current, STATE_BLOCKED);
1320 } 1494 }
1321#endif 1495 else
1322
1323 UNLOCK_THREAD(current);
1324}
1325
1326/*---------------------------------------------------------------------------
1327 * Block a thread on a blocking queue for a specified time interval or until
1328 * explicitly woken - whichever happens first.
1329 *
1330 * INTERNAL: Intended for use by kernel objects and not for programs.
1331 *---------------------------------------------------------------------------
1332 */
1333void block_thread_w_tmo(struct thread_entry *current, int timeout)
1334{
1335 /* Get the entry for the current running thread. */
1336 LOCK_THREAD(current);
1337
1338 /* Set the state to blocked with the specified timeout */
1339 current->tmo_tick = current_tick + timeout;
1340
1341 /* Set the list for explicit wakeup */
1342 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1343
1344#ifdef HAVE_PRIORITY_SCHEDULING
1345 if (current->blocker != NULL)
1346 { 1496 {
1347 /* Object supports PIP */ 1497 /* Set the state to blocked with the specified timeout */
1348 current = blocker_inherit_priority(current); 1498 current->tmo_tick = current_tick + timeout;
1499 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1349 } 1500 }
1350#endif
1351 1501
1352 UNLOCK_THREAD(current); 1502 if (bl == NULL)
1353}
1354
1355/*---------------------------------------------------------------------------
1356 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1357 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1358 *
1359 * This code should be considered a critical section by the caller meaning
1360 * that the object's corelock should be held.
1361 *
1362 * INTERNAL: Intended for use by kernel objects and not for programs.
1363 *---------------------------------------------------------------------------
1364 */
1365unsigned int wakeup_thread(struct thread_entry **list)
1366{
1367 struct thread_entry *thread = *list;
1368 unsigned int result = THREAD_NONE;
1369
1370 /* Check if there is a blocked thread at all. */
1371 if (thread == NULL)
1372 return result;
1373
1374 LOCK_THREAD(thread);
1375
1376 /* Determine thread's current state. */
1377 switch (thread->state)
1378 { 1503 {
1379 case STATE_BLOCKED: 1504 UNLOCK_THREAD(current);
1380 case STATE_BLOCKED_W_TMO: 1505 return;
1381 remove_from_list_l(list, thread); 1506 }
1382
1383 result = THREAD_OK;
1384 1507
1385#ifdef HAVE_PRIORITY_SCHEDULING 1508#ifdef HAVE_PRIORITY_SCHEDULING
1386 struct thread_entry *current; 1509 int newblpr = current->priority;
1387 struct blocker *bl = thread->blocker; 1510 UNLOCK_THREAD(current);
1388
1389 if (bl == NULL)
1390 {
1391 /* No inheritance - just boost the thread by aging */
1392 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1393 thread->skip_count = thread->priority;
1394 current = cores[CURRENT_CORE].running;
1395 }
1396 else
1397 {
1398 /* Call the specified unblocking PIP */
1399 current = bl->wakeup_protocol(thread);
1400 }
1401
1402 if (current != NULL &&
1403 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1404 < current->priority)
1405 {
1406 /* There is a thread ready to run of higher or same priority on
1407 * the same core as the current one; recommend a task switch.
1408 * Knowing if this is an interrupt call would be helpful here. */
1409 result |= THREAD_SWITCH;
1410 }
1411#endif /* HAVE_PRIORITY_SCHEDULING */
1412
1413 core_schedule_wakeup(thread);
1414 break;
1415 1511
1416 /* Nothing to do. State is not blocked. */ 1512 if (newblpr >= bl->priority)
1417#if THREAD_EXTRA_CHECKS 1513 {
1418 default: 1514 unlock_blocker_thread(bl);
1419 THREAD_PANICF("wakeup_thread->block invalid", thread); 1515 return; /* Queue priority won't change */
1420 case STATE_RUNNING:
1421 case STATE_KILLED:
1422 break;
1423#endif
1424 } 1516 }
1425 1517
1426 UNLOCK_THREAD(thread); 1518 inherit_priority(bl, bl, blt, newblpr);
1427 return result; 1519#endif /* HAVE_PRIORITY_SCHEDULING */
1428} 1520}
1429 1521
1430/*--------------------------------------------------------------------------- 1522/*---------------------------------------------------------------------------
@@ -1435,25 +1527,31 @@ unsigned int wakeup_thread(struct thread_entry **list)
1435 * INTERNAL: Intended for use by kernel objects and not for programs. 1527 * INTERNAL: Intended for use by kernel objects and not for programs.
1436 *--------------------------------------------------------------------------- 1528 *---------------------------------------------------------------------------
1437 */ 1529 */
1438unsigned int thread_queue_wake(struct thread_entry **list) 1530unsigned int thread_queue_wake(struct thread_entry **list,
1531 volatile int *count)
1439{ 1532{
1533 int num = 0;
1440 unsigned result = THREAD_NONE; 1534 unsigned result = THREAD_NONE;
1441 1535
1442 for (;;) 1536 for (;;)
1443 { 1537 {
1444 unsigned int rc = wakeup_thread(list); 1538 unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
1445 1539
1446 if (rc == THREAD_NONE) 1540 if (rc == THREAD_NONE)
1447 break; /* No more threads */ 1541 break; /* No more threads */
1448 1542
1449 result |= rc; 1543 result |= rc;
1544 num++;
1450 } 1545 }
1451 1546
1547 if (count)
1548 *count = num;
1549
1452 return result; 1550 return result;
1453} 1551}
1454 1552
1455/*--------------------------------------------------------------------------- 1553/*---------------------------------------------------------------------------
1456 * Assign the thread slot a new ID. Version is 1-255. 1554 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
1457 *--------------------------------------------------------------------------- 1555 *---------------------------------------------------------------------------
1458 */ 1556 */
1459static void new_thread_id(unsigned int slot_num, 1557static void new_thread_id(unsigned int slot_num,
@@ -1693,7 +1791,7 @@ void thread_wait(unsigned int thread_id)
1693 current->bqp = &thread->queue; 1791 current->bqp = &thread->queue;
1694 1792
1695 disable_irq(); 1793 disable_irq();
1696 block_thread(current); 1794 block_thread(current, TIMEOUT_BLOCK);
1697 1795
1698 corelock_unlock(&thread->waiter_cl); 1796 corelock_unlock(&thread->waiter_cl);
1699 1797
@@ -1723,7 +1821,7 @@ static inline void thread_final_exit(struct thread_entry *current)
1723 * execution except the slot itself. */ 1821 * execution except the slot itself. */
1724 1822
1725 /* Signal this thread */ 1823 /* Signal this thread */
1726 thread_queue_wake(&current->queue); 1824 thread_queue_wake(&current->queue, NULL);
1727 corelock_unlock(&current->waiter_cl); 1825 corelock_unlock(&current->waiter_cl);
1728 switch_thread(); 1826 switch_thread();
1729 /* This should never and must never be reached - if it is, the 1827 /* This should never and must never be reached - if it is, the
@@ -1912,20 +2010,18 @@ IF_COP( retry_state: )
1912 } 2010 }
1913 } 2011 }
1914#endif 2012#endif
1915 remove_from_list_l(thread->bqp, thread);
1916
1917#ifdef HAVE_WAKEUP_EXT_CB 2013#ifdef HAVE_WAKEUP_EXT_CB
1918 if (thread->wakeup_ext_cb != NULL) 2014 if (thread->wakeup_ext_cb != NULL)
1919 thread->wakeup_ext_cb(thread); 2015 thread->wakeup_ext_cb(thread);
1920#endif 2016#endif
1921 2017
1922#ifdef HAVE_PRIORITY_SCHEDULING 2018#ifdef HAVE_PRIORITY_SCHEDULING
2019 /* Remove thread's priority influence from its chain if needed */
1923 if (thread->blocker != NULL) 2020 if (thread->blocker != NULL)
1924 {
1925 /* Remove thread's priority influence from its chain */
1926 wakeup_priority_protocol_release(thread); 2021 wakeup_priority_protocol_release(thread);
1927 } 2022 else
1928#endif 2023#endif
2024 remove_from_list_l(thread->bqp, thread);
1929 2025
1930#if NUM_CORES > 1 2026#if NUM_CORES > 1
1931 if (ocl != NULL) 2027 if (ocl != NULL)
@@ -1970,130 +2066,77 @@ thread_killed: /* Thread was already killed */
1970 */ 2066 */
1971int thread_set_priority(unsigned int thread_id, int priority) 2067int thread_set_priority(unsigned int thread_id, int priority)
1972{ 2068{
2069 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
2070 return -1; /* Invalid priority argument */
2071
1973 int old_base_priority = -1; 2072 int old_base_priority = -1;
1974 struct thread_entry *thread = thread_id_entry(thread_id); 2073 struct thread_entry *thread = thread_id_entry(thread_id);
1975 2074
1976 /* A little safety measure */
1977 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1978 return -1;
1979
1980 /* Thread could be on any list and therefore on an interrupt accessible 2075 /* Thread could be on any list and therefore on an interrupt accessible
1981 one - disable interrupts */ 2076 one - disable interrupts */
1982 int oldlevel = disable_irq_save(); 2077 const int oldlevel = disable_irq_save();
1983
1984 LOCK_THREAD(thread); 2078 LOCK_THREAD(thread);
1985 2079
1986 /* Make sure it's not killed */ 2080 if (thread->id != thread_id || thread->state == STATE_KILLED)
1987 if (thread->id == thread_id && thread->state != STATE_KILLED) 2081 goto done; /* Invalid thread */
1988 {
1989 int old_priority = thread->priority;
1990
1991 old_base_priority = thread->base_priority;
1992 thread->base_priority = priority;
1993
1994 prio_move_entry(&thread->pdist, old_base_priority, priority);
1995 priority = find_first_set_bit(thread->pdist.mask);
1996
1997 if (old_priority == priority)
1998 {
1999 /* No priority change - do nothing */
2000 }
2001 else if (thread->state == STATE_RUNNING)
2002 {
2003 /* This thread is running - change location on the run
2004 * queue. No transitive inheritance needed. */
2005 set_running_thread_priority(thread, priority);
2006 }
2007 else
2008 {
2009 thread->priority = priority;
2010
2011 if (thread->blocker != NULL)
2012 {
2013 /* Bubble new priority down the chain */
2014 struct blocker *bl = thread->blocker; /* Blocker struct */
2015 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2016 struct thread_entry * const tstart = thread; /* Initial thread */
2017 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2018
2019 for (;;)
2020 {
2021 struct thread_entry *next; /* Next thread to check */
2022 int bl_pr; /* Highest blocked thread */
2023 int queue_pr; /* New highest blocked thread */
2024#if NUM_CORES > 1
2025 /* Owner can change but thread cannot be dislodged - thread
2026 * may not be the first in the queue which allows other
2027 * threads ahead in the list to be given ownership during the
2028 * operation. If thread is next then the waker will have to
2029 * wait for us and the owner of the object will remain fixed.
2030 * If we successfully grab the owner -- which at some point
2031 * is guaranteed -- then the queue remains fixed until we
2032 * pass by. */
2033 for (;;)
2034 {
2035 LOCK_THREAD(bl_t);
2036
2037 /* Double-check the owner - retry if it changed */
2038 if (LIKELY(bl->thread == bl_t))
2039 break;
2040
2041 UNLOCK_THREAD(bl_t);
2042 bl_t = bl->thread;
2043 }
2044#endif
2045 bl_pr = bl->priority;
2046
2047 if (highest > bl_pr)
2048 break; /* Object priority won't change */
2049 2082
2050 /* This will include the thread being set */ 2083 old_base_priority = thread->base_priority;
2051 queue_pr = find_highest_priority_in_list_l(*thread->bqp); 2084 if (priority == old_base_priority)
2085 goto done; /* No base priority change */
2052 2086
2053 if (queue_pr == bl_pr) 2087 thread->base_priority = priority;
2054 break; /* Object priority not changing */
2055 2088
2056 /* Update thread boost for this object */ 2089 /* Adjust the thread's priority influence on itself */
2057 bl->priority = queue_pr; 2090 prio_move_entry(&thread->pdist, old_base_priority, priority);
2058 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2059 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2060 2091
2061 if (bl_t->priority == bl_pr) 2092 int old_priority = thread->priority;
2062 break; /* Blocking thread priority not changing */ 2093 int new_priority = priobit_ffs(&thread->pdist.mask);
2063 2094
2064 if (bl_t->state == STATE_RUNNING) 2095 if (old_priority == new_priority)
2065 { 2096 goto done; /* No running priority change */
2066 /* Thread not blocked - we're done */
2067 set_running_thread_priority(bl_t, bl_pr);
2068 break;
2069 }
2070 2097
2071 bl_t->priority = bl_pr; 2098 if (thread->state == STATE_RUNNING)
2072 bl = bl_t->blocker; /* Blocking thread has a blocker? */ 2099 {
2100 /* This thread is running - just change location on the run queue.
2101 Also sets thread->priority. */
2102 set_running_thread_priority(thread, new_priority);
2103 goto done;
2104 }
2073 2105
2074 if (bl == NULL) 2106 /* Thread is blocked */
2075 break; /* End of chain */ 2107 struct blocker *bl = thread->blocker;
2108 if (bl == NULL)
2109 {
2110 thread->priority = new_priority;
2111 goto done; /* End of transitive blocks */
2112 }
2076 2113
2077 next = bl->thread; 2114 struct thread_entry *blt = lock_blocker_thread(bl);
2115 struct thread_entry **bqp = thread->bqp;
2078 2116
2079 if (UNLIKELY(next == tstart)) 2117 thread->priority = new_priority;
2080 break; /* Full-circle */
2081 2118
2082 UNLOCK_THREAD(thread); 2119 UNLOCK_THREAD(thread);
2120 thread = NULL;
2083 2121
2084 thread = bl_t; 2122 int oldblpr = bl->priority;
2085 bl_t = next; 2123 int newblpr = oldblpr;
2086 } /* for (;;) */ 2124 if (new_priority < oldblpr)
2125 newblpr = new_priority;
2126 else if (old_priority <= oldblpr)
2127 newblpr = find_highest_priority_in_list_l(*bqp);
2087 2128
2088 UNLOCK_THREAD(bl_t); 2129 if (newblpr == oldblpr)
2089 } 2130 {
2090 } 2131 unlock_blocker_thread(bl);
2132 goto done;
2091 } 2133 }
2092 2134
2093 UNLOCK_THREAD(thread); 2135 inherit_priority(bl, bl, blt, newblpr);
2094 2136done:
2137 if (thread)
2138 UNLOCK_THREAD(thread);
2095 restore_irq(oldlevel); 2139 restore_irq(oldlevel);
2096
2097 return old_base_priority; 2140 return old_base_priority;
2098} 2141}
2099 2142