diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2008-12-10 08:57:10 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2008-12-10 08:57:10 +0000 |
commit | 8cfbd3604fac14f629244e521ad24ffa9938c790 (patch) | |
tree | 16dc096519b8b537bb7d4b73e0c97f5f33ee752b /firmware/thread.c | |
parent | 40ff47c7eea41ac893d7af5c5b97ace52a5ffade (diff) | |
download | rockbox-8cfbd3604fac14f629244e521ad24ffa9938c790.tar.gz rockbox-8cfbd3604fac14f629244e521ad24ffa9938c790.zip |
Use cookies for thread identification instead of pointers directly which gives a buffer against wrongly identifying a thread when the slot is recycled (which has been nagging me for awhile). A slot gets 255 uses before it repeats. Everything gets incompatible so a full update is required.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19377 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/thread.c')
-rw-r--r-- | firmware/thread.c | 140 |
1 files changed, 97 insertions, 43 deletions
diff --git a/firmware/thread.c b/firmware/thread.c index c500fc4818..377c3355b4 100644 --- a/firmware/thread.c +++ b/firmware/thread.c | |||
@@ -1691,8 +1691,8 @@ struct thread_entry * | |||
1691 | struct thread_entry *next; | 1691 | struct thread_entry *next; |
1692 | int bl_pr; | 1692 | int bl_pr; |
1693 | 1693 | ||
1694 | THREAD_ASSERT(thread_get_current() == bl_t, | 1694 | THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t, |
1695 | "UPPT->wrong thread", thread_get_current()); | 1695 | "UPPT->wrong thread", cores[CURRENT_CORE].running); |
1696 | 1696 | ||
1697 | LOCK_THREAD(bl_t); | 1697 | LOCK_THREAD(bl_t); |
1698 | 1698 | ||
@@ -2031,7 +2031,7 @@ void switch_thread(void) | |||
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | #ifdef RB_PROFILE | 2033 | #ifdef RB_PROFILE |
2034 | profile_thread_stopped(thread - threads); | 2034 | profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK); |
2035 | #endif | 2035 | #endif |
2036 | 2036 | ||
2037 | /* Begin task switching by saving our current context so that we can | 2037 | /* Begin task switching by saving our current context so that we can |
@@ -2136,7 +2136,7 @@ void switch_thread(void) | |||
2136 | load_context(&thread->context); | 2136 | load_context(&thread->context); |
2137 | 2137 | ||
2138 | #ifdef RB_PROFILE | 2138 | #ifdef RB_PROFILE |
2139 | profile_thread_started(thread - threads); | 2139 | profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); |
2140 | #endif | 2140 | #endif |
2141 | 2141 | ||
2142 | } | 2142 | } |
@@ -2316,6 +2316,24 @@ unsigned int thread_queue_wake(struct thread_entry **list) | |||
2316 | } | 2316 | } |
2317 | 2317 | ||
2318 | /*--------------------------------------------------------------------------- | 2318 | /*--------------------------------------------------------------------------- |
2319 | * Assign the thread slot a new ID. Version is 1-255. | ||
2320 | *--------------------------------------------------------------------------- | ||
2321 | */ | ||
2322 | static void new_thread_id(unsigned int slot_num, | ||
2323 | struct thread_entry *thread) | ||
2324 | { | ||
2325 | unsigned int version = | ||
2326 | (thread->id + (1u << THREAD_ID_VERSION_SHIFT)) | ||
2327 | & THREAD_ID_VERSION_MASK; | ||
2328 | |||
2329 | /* If wrapped to 0, make it 1 */ | ||
2330 | if (version == 0) | ||
2331 | version = 1u << THREAD_ID_VERSION_SHIFT; | ||
2332 | |||
2333 | thread->id = version | (slot_num & THREAD_ID_SLOT_MASK); | ||
2334 | } | ||
2335 | |||
2336 | /*--------------------------------------------------------------------------- | ||
2319 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned | 2337 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned |
2320 | * will be locked on multicore. | 2338 | * will be locked on multicore. |
2321 | *--------------------------------------------------------------------------- | 2339 | *--------------------------------------------------------------------------- |
@@ -2349,6 +2367,17 @@ static struct thread_entry * find_empty_thread_slot(void) | |||
2349 | return thread; | 2367 | return thread; |
2350 | } | 2368 | } |
2351 | 2369 | ||
2370 | /*--------------------------------------------------------------------------- | ||
2371 | * Return the thread_entry pointer for a thread_id. Return the current | ||
2372 | * thread if the ID is 0 (alias for current). | ||
2373 | *--------------------------------------------------------------------------- | ||
2374 | */ | ||
2375 | struct thread_entry * thread_id_entry(unsigned int thread_id) | ||
2376 | { | ||
2377 | return (thread_id == THREAD_ID_CURRENT) ? | ||
2378 | cores[CURRENT_CORE].running : | ||
2379 | &threads[thread_id & THREAD_ID_SLOT_MASK]; | ||
2380 | } | ||
2352 | 2381 | ||
2353 | /*--------------------------------------------------------------------------- | 2382 | /*--------------------------------------------------------------------------- |
2354 | * Place the current core in idle mode - woken up on interrupt or wake | 2383 | * Place the current core in idle mode - woken up on interrupt or wake |
@@ -2369,11 +2398,11 @@ void core_idle(void) | |||
2369 | * Return ID if context area could be allocated, else NULL. | 2398 | * Return ID if context area could be allocated, else NULL. |
2370 | *--------------------------------------------------------------------------- | 2399 | *--------------------------------------------------------------------------- |
2371 | */ | 2400 | */ |
2372 | struct thread_entry* | 2401 | unsigned int create_thread(void (*function)(void), |
2373 | create_thread(void (*function)(void), void* stack, size_t stack_size, | 2402 | void* stack, size_t stack_size, |
2374 | unsigned flags, const char *name | 2403 | unsigned flags, const char *name |
2375 | IF_PRIO(, int priority) | 2404 | IF_PRIO(, int priority) |
2376 | IF_COP(, unsigned int core)) | 2405 | IF_COP(, unsigned int core)) |
2377 | { | 2406 | { |
2378 | unsigned int i; | 2407 | unsigned int i; |
2379 | unsigned int stack_words; | 2408 | unsigned int stack_words; |
@@ -2385,7 +2414,7 @@ struct thread_entry* | |||
2385 | thread = find_empty_thread_slot(); | 2414 | thread = find_empty_thread_slot(); |
2386 | if (thread == NULL) | 2415 | if (thread == NULL) |
2387 | { | 2416 | { |
2388 | return NULL; | 2417 | return 0; |
2389 | } | 2418 | } |
2390 | 2419 | ||
2391 | oldlevel = disable_irq_save(); | 2420 | oldlevel = disable_irq_save(); |
@@ -2443,15 +2472,15 @@ struct thread_entry* | |||
2443 | THREAD_STARTUP_INIT(core, thread, function); | 2472 | THREAD_STARTUP_INIT(core, thread, function); |
2444 | 2473 | ||
2445 | thread->state = state; | 2474 | thread->state = state; |
2475 | i = thread->id; /* Snapshot while locked */ | ||
2446 | 2476 | ||
2447 | if (state == STATE_RUNNING) | 2477 | if (state == STATE_RUNNING) |
2448 | core_schedule_wakeup(thread); | 2478 | core_schedule_wakeup(thread); |
2449 | 2479 | ||
2450 | UNLOCK_THREAD(thread); | 2480 | UNLOCK_THREAD(thread); |
2451 | |||
2452 | restore_irq(oldlevel); | 2481 | restore_irq(oldlevel); |
2453 | 2482 | ||
2454 | return thread; | 2483 | return i; |
2455 | } | 2484 | } |
2456 | 2485 | ||
2457 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 2486 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
@@ -2489,18 +2518,17 @@ void cancel_cpu_boost(void) | |||
2489 | * Parameter is the ID as returned from create_thread(). | 2518 | * Parameter is the ID as returned from create_thread(). |
2490 | *--------------------------------------------------------------------------- | 2519 | *--------------------------------------------------------------------------- |
2491 | */ | 2520 | */ |
2492 | void thread_wait(struct thread_entry *thread) | 2521 | void thread_wait(unsigned int thread_id) |
2493 | { | 2522 | { |
2494 | struct thread_entry *current = cores[CURRENT_CORE].running; | 2523 | struct thread_entry *current = cores[CURRENT_CORE].running; |
2495 | 2524 | struct thread_entry *thread = thread_id_entry(thread_id); | |
2496 | if (thread == NULL) | ||
2497 | thread = current; | ||
2498 | 2525 | ||
2499 | /* Lock thread-as-waitable-object lock */ | 2526 | /* Lock thread-as-waitable-object lock */ |
2500 | corelock_lock(&thread->waiter_cl); | 2527 | corelock_lock(&thread->waiter_cl); |
2501 | 2528 | ||
2502 | /* Be sure it hasn't been killed yet */ | 2529 | /* Be sure it hasn't been killed yet */ |
2503 | if (thread->state != STATE_KILLED) | 2530 | if (thread_id == THREAD_ID_CURRENT || |
2531 | (thread->id == thread_id && thread->state != STATE_KILLED)) | ||
2504 | { | 2532 | { |
2505 | IF_COP( current->obj_cl = &thread->waiter_cl; ) | 2533 | IF_COP( current->obj_cl = &thread->waiter_cl; ) |
2506 | current->bqp = &thread->queue; | 2534 | current->bqp = &thread->queue; |
@@ -2538,9 +2566,10 @@ void thread_exit(void) | |||
2538 | if (current->name == THREAD_DESTRUCT) | 2566 | if (current->name == THREAD_DESTRUCT) |
2539 | { | 2567 | { |
2540 | /* Thread being killed - become a waiter */ | 2568 | /* Thread being killed - become a waiter */ |
2569 | unsigned int id = current->id; | ||
2541 | UNLOCK_THREAD(current); | 2570 | UNLOCK_THREAD(current); |
2542 | corelock_unlock(¤t->waiter_cl); | 2571 | corelock_unlock(¤t->waiter_cl); |
2543 | thread_wait(current); | 2572 | thread_wait(id); |
2544 | THREAD_PANICF("thread_exit->WK:*R", current); | 2573 | THREAD_PANICF("thread_exit->WK:*R", current); |
2545 | } | 2574 | } |
2546 | #endif | 2575 | #endif |
@@ -2568,7 +2597,13 @@ void thread_exit(void) | |||
2568 | } | 2597 | } |
2569 | 2598 | ||
2570 | flush_icache(); | 2599 | flush_icache(); |
2600 | |||
2601 | /* At this point, this thread isn't using resources allocated for | ||
2602 | * execution except the slot itself. */ | ||
2571 | #endif | 2603 | #endif |
2604 | |||
2605 | /* Update ID for this slot */ | ||
2606 | new_thread_id(current->id, current); | ||
2572 | current->name = NULL; | 2607 | current->name = NULL; |
2573 | 2608 | ||
2574 | /* Signal this thread */ | 2609 | /* Signal this thread */ |
@@ -2593,7 +2628,7 @@ void thread_exit(void) | |||
2593 | * leave various objects in an undefined state. | 2628 | * leave various objects in an undefined state. |
2594 | *--------------------------------------------------------------------------- | 2629 | *--------------------------------------------------------------------------- |
2595 | */ | 2630 | */ |
2596 | void remove_thread(struct thread_entry *thread) | 2631 | void remove_thread(unsigned int thread_id) |
2597 | { | 2632 | { |
2598 | #if NUM_CORES > 1 | 2633 | #if NUM_CORES > 1 |
2599 | /* core is not constant here because of core switching */ | 2634 | /* core is not constant here because of core switching */ |
@@ -2604,13 +2639,11 @@ void remove_thread(struct thread_entry *thread) | |||
2604 | const unsigned int core = CURRENT_CORE; | 2639 | const unsigned int core = CURRENT_CORE; |
2605 | #endif | 2640 | #endif |
2606 | struct thread_entry *current = cores[core].running; | 2641 | struct thread_entry *current = cores[core].running; |
2642 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2607 | 2643 | ||
2608 | unsigned state; | 2644 | unsigned state; |
2609 | int oldlevel; | 2645 | int oldlevel; |
2610 | 2646 | ||
2611 | if (thread == NULL) | ||
2612 | thread = current; | ||
2613 | |||
2614 | if (thread == current) | 2647 | if (thread == current) |
2615 | thread_exit(); /* Current thread - do normal exit */ | 2648 | thread_exit(); /* Current thread - do normal exit */ |
2616 | 2649 | ||
@@ -2621,10 +2654,8 @@ void remove_thread(struct thread_entry *thread) | |||
2621 | 2654 | ||
2622 | state = thread->state; | 2655 | state = thread->state; |
2623 | 2656 | ||
2624 | if (state == STATE_KILLED) | 2657 | if (thread->id != thread_id || state == STATE_KILLED) |
2625 | { | ||
2626 | goto thread_killed; | 2658 | goto thread_killed; |
2627 | } | ||
2628 | 2659 | ||
2629 | #if NUM_CORES > 1 | 2660 | #if NUM_CORES > 1 |
2630 | if (thread->name == THREAD_DESTRUCT) | 2661 | if (thread->name == THREAD_DESTRUCT) |
@@ -2633,7 +2664,7 @@ void remove_thread(struct thread_entry *thread) | |||
2633 | UNLOCK_THREAD(thread); | 2664 | UNLOCK_THREAD(thread); |
2634 | corelock_unlock(&thread->waiter_cl); | 2665 | corelock_unlock(&thread->waiter_cl); |
2635 | restore_irq(oldlevel); | 2666 | restore_irq(oldlevel); |
2636 | thread_wait(thread); | 2667 | thread_wait(thread_id); |
2637 | return; | 2668 | return; |
2638 | } | 2669 | } |
2639 | 2670 | ||
@@ -2741,6 +2772,7 @@ IF_COP( retry_state: ) | |||
2741 | /* Otherwise thread is frozen and hasn't run yet */ | 2772 | /* Otherwise thread is frozen and hasn't run yet */ |
2742 | } | 2773 | } |
2743 | 2774 | ||
2775 | new_thread_id(thread_id, thread); | ||
2744 | thread->state = STATE_KILLED; | 2776 | thread->state = STATE_KILLED; |
2745 | 2777 | ||
2746 | /* If thread was waiting on itself, it will have been removed above. | 2778 | /* If thread was waiting on itself, it will have been removed above. |
@@ -2773,17 +2805,15 @@ thread_killed: /* Thread was already killed */ | |||
2773 | * needed inheritance changes also may happen. | 2805 | * needed inheritance changes also may happen. |
2774 | *--------------------------------------------------------------------------- | 2806 | *--------------------------------------------------------------------------- |
2775 | */ | 2807 | */ |
2776 | int thread_set_priority(struct thread_entry *thread, int priority) | 2808 | int thread_set_priority(unsigned int thread_id, int priority) |
2777 | { | 2809 | { |
2778 | int old_base_priority = -1; | 2810 | int old_base_priority = -1; |
2811 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2779 | 2812 | ||
2780 | /* A little safety measure */ | 2813 | /* A little safety measure */ |
2781 | if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) | 2814 | if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) |
2782 | return -1; | 2815 | return -1; |
2783 | 2816 | ||
2784 | if (thread == NULL) | ||
2785 | thread = cores[CURRENT_CORE].running; | ||
2786 | |||
2787 | /* Thread could be on any list and therefore on an interrupt accessible | 2817 | /* Thread could be on any list and therefore on an interrupt accessible |
2788 | one - disable interrupts */ | 2818 | one - disable interrupts */ |
2789 | int oldlevel = disable_irq_save(); | 2819 | int oldlevel = disable_irq_save(); |
@@ -2791,7 +2821,8 @@ int thread_set_priority(struct thread_entry *thread, int priority) | |||
2791 | LOCK_THREAD(thread); | 2821 | LOCK_THREAD(thread); |
2792 | 2822 | ||
2793 | /* Make sure it's not killed */ | 2823 | /* Make sure it's not killed */ |
2794 | if (thread->state != STATE_KILLED) | 2824 | if (thread_id == THREAD_ID_CURRENT || |
2825 | (thread->id == thread_id && thread->state != STATE_KILLED)) | ||
2795 | { | 2826 | { |
2796 | int old_priority = thread->priority; | 2827 | int old_priority = thread->priority; |
2797 | 2828 | ||
@@ -2908,13 +2939,19 @@ int thread_set_priority(struct thread_entry *thread, int priority) | |||
2908 | * Returns the current base priority for a thread. | 2939 | * Returns the current base priority for a thread. |
2909 | *--------------------------------------------------------------------------- | 2940 | *--------------------------------------------------------------------------- |
2910 | */ | 2941 | */ |
2911 | int thread_get_priority(struct thread_entry *thread) | 2942 | int thread_get_priority(unsigned int thread_id) |
2912 | { | 2943 | { |
2913 | /* Simple, quick probe. */ | 2944 | struct thread_entry *thread = thread_id_entry(thread_id); |
2914 | if (thread == NULL) | 2945 | int base_priority = thread->base_priority; |
2915 | thread = cores[CURRENT_CORE].running; | ||
2916 | 2946 | ||
2917 | return thread->base_priority; | 2947 | /* Simply check without locking slot. It may or may not be valid by the |
2948 | * time the function returns anyway. If all tests pass, it is the | ||
2949 | * correct value for when it was valid. */ | ||
2950 | if (thread_id != THREAD_ID_CURRENT && | ||
2951 | (thread->id != thread_id || thread->state == STATE_KILLED)) | ||
2952 | base_priority = -1; | ||
2953 | |||
2954 | return base_priority; | ||
2918 | } | 2955 | } |
2919 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 2956 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
2920 | 2957 | ||
@@ -2924,12 +2961,16 @@ int thread_get_priority(struct thread_entry *thread) | |||
2924 | * virtue of the slot having a state of STATE_FROZEN. | 2961 | * virtue of the slot having a state of STATE_FROZEN. |
2925 | *--------------------------------------------------------------------------- | 2962 | *--------------------------------------------------------------------------- |
2926 | */ | 2963 | */ |
2927 | void thread_thaw(struct thread_entry *thread) | 2964 | void thread_thaw(unsigned int thread_id) |
2928 | { | 2965 | { |
2966 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2929 | int oldlevel = disable_irq_save(); | 2967 | int oldlevel = disable_irq_save(); |
2968 | |||
2930 | LOCK_THREAD(thread); | 2969 | LOCK_THREAD(thread); |
2931 | 2970 | ||
2932 | if (thread->state == STATE_FROZEN) | 2971 | /* If thread is the current one, it cannot be frozen, therefore |
2972 | * there is no need to check that. */ | ||
2973 | if (thread->id == thread_id && thread->state == STATE_FROZEN) | ||
2933 | core_schedule_wakeup(thread); | 2974 | core_schedule_wakeup(thread); |
2934 | 2975 | ||
2935 | UNLOCK_THREAD(thread); | 2976 | UNLOCK_THREAD(thread); |
@@ -2940,9 +2981,9 @@ void thread_thaw(struct thread_entry *thread) | |||
2940 | * Return the ID of the currently executing thread. | 2981 | * Return the ID of the currently executing thread. |
2941 | *--------------------------------------------------------------------------- | 2982 | *--------------------------------------------------------------------------- |
2942 | */ | 2983 | */ |
2943 | struct thread_entry * thread_get_current(void) | 2984 | unsigned int thread_get_current(void) |
2944 | { | 2985 | { |
2945 | return cores[CURRENT_CORE].running; | 2986 | return cores[CURRENT_CORE].running->id; |
2946 | } | 2987 | } |
2947 | 2988 | ||
2948 | #if NUM_CORES > 1 | 2989 | #if NUM_CORES > 1 |
@@ -2967,9 +3008,10 @@ unsigned int switch_core(unsigned int new_core) | |||
2967 | if (current->name == THREAD_DESTRUCT) | 3008 | if (current->name == THREAD_DESTRUCT) |
2968 | { | 3009 | { |
2969 | /* Thread being killed - deactivate and let process complete */ | 3010 | /* Thread being killed - deactivate and let process complete */ |
3011 | unsigned int id = current->id; | ||
2970 | UNLOCK_THREAD(current); | 3012 | UNLOCK_THREAD(current); |
2971 | restore_irq(oldlevel); | 3013 | restore_irq(oldlevel); |
2972 | thread_wait(current); | 3014 | thread_wait(id); |
2973 | /* Should never be reached */ | 3015 | /* Should never be reached */ |
2974 | THREAD_PANICF("switch_core->D:*R", current); | 3016 | THREAD_PANICF("switch_core->D:*R", current); |
2975 | } | 3017 | } |
@@ -3034,6 +3076,19 @@ void init_threads(void) | |||
3034 | const unsigned int core = CURRENT_CORE; | 3076 | const unsigned int core = CURRENT_CORE; |
3035 | struct thread_entry *thread; | 3077 | struct thread_entry *thread; |
3036 | 3078 | ||
3079 | if (core == CPU) | ||
3080 | { | ||
3081 | /* Initialize core locks and IDs in all slots */ | ||
3082 | int n; | ||
3083 | for (n = 0; n < MAXTHREADS; n++) | ||
3084 | { | ||
3085 | thread = &threads[n]; | ||
3086 | corelock_init(&thread->waiter_cl); | ||
3087 | corelock_init(&thread->slot_cl); | ||
3088 | thread->id = THREAD_ID_INIT(n); | ||
3089 | } | ||
3090 | } | ||
3091 | |||
3037 | /* CPU will initialize first and then sleep */ | 3092 | /* CPU will initialize first and then sleep */ |
3038 | thread = find_empty_thread_slot(); | 3093 | thread = find_empty_thread_slot(); |
3039 | 3094 | ||
@@ -3060,8 +3115,6 @@ void init_threads(void) | |||
3060 | thread->priority = PRIORITY_USER_INTERFACE; | 3115 | thread->priority = PRIORITY_USER_INTERFACE; |
3061 | rtr_add_entry(core, PRIORITY_USER_INTERFACE); | 3116 | rtr_add_entry(core, PRIORITY_USER_INTERFACE); |
3062 | #endif | 3117 | #endif |
3063 | corelock_init(&thread->waiter_cl); | ||
3064 | corelock_init(&thread->slot_cl); | ||
3065 | 3118 | ||
3066 | add_to_list_l(&cores[core].running, thread); | 3119 | add_to_list_l(&cores[core].running, thread); |
3067 | 3120 | ||
@@ -3070,6 +3123,7 @@ void init_threads(void) | |||
3070 | thread->stack = stackbegin; | 3123 | thread->stack = stackbegin; |
3071 | thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; | 3124 | thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; |
3072 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ | 3125 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ |
3126 | /* Initialize all locking for the slots */ | ||
3073 | /* Wait for other processors to finish their inits since create_thread | 3127 | /* Wait for other processors to finish their inits since create_thread |
3074 | * isn't safe to call until the kernel inits are done. The first | 3128 | * isn't safe to call until the kernel inits are done. The first |
3075 | * threads created in the system must of course be created by CPU. */ | 3129 | * threads created in the system must of course be created by CPU. */ |