diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2008-12-10 08:57:10 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2008-12-10 08:57:10 +0000 |
commit | 8cfbd3604fac14f629244e521ad24ffa9938c790 (patch) | |
tree | 16dc096519b8b537bb7d4b73e0c97f5f33ee752b /firmware | |
parent | 40ff47c7eea41ac893d7af5c5b97ace52a5ffade (diff) | |
download | rockbox-8cfbd3604fac14f629244e521ad24ffa9938c790.tar.gz rockbox-8cfbd3604fac14f629244e521ad24ffa9938c790.zip |
Use cookies for thread identification instead of pointers directly which gives a buffer against wrongly identifying a thread when the slot is recycled (which has been nagging me for awhile). A slot gets 255 uses before it repeats. Everything gets incompatible so a full update is required.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19377 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/backlight.c | 10 | ||||
-rw-r--r-- | firmware/drivers/ata.c | 15 | ||||
-rw-r--r-- | firmware/export/kernel.h | 2 | ||||
-rw-r--r-- | firmware/export/thread.h | 38 | ||||
-rw-r--r-- | firmware/kernel.c | 23 | ||||
-rw-r--r-- | firmware/target/arm/imx31/gigabeat-s/mc13783-imx31.c | 12 | ||||
-rw-r--r-- | firmware/thread.c | 140 | ||||
-rw-r--r-- | firmware/usb.c | 8 |
8 files changed, 158 insertions, 90 deletions
diff --git a/firmware/backlight.c b/firmware/backlight.c index 07cc9532be..66cc6df569 100644 --- a/firmware/backlight.c +++ b/firmware/backlight.c | |||
@@ -130,7 +130,7 @@ static long backlight_stack[DEFAULT_STACK_SIZE/sizeof(long)]; | |||
130 | static const char backlight_thread_name[] = "backlight"; | 130 | static const char backlight_thread_name[] = "backlight"; |
131 | static struct event_queue backlight_queue; | 131 | static struct event_queue backlight_queue; |
132 | #ifdef BACKLIGHT_DRIVER_CLOSE | 132 | #ifdef BACKLIGHT_DRIVER_CLOSE |
133 | static struct thread_entry *backlight_thread_p = NULL; | 133 | static unsigned int backlight_thread_id = 0; |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | static int backlight_timer SHAREDBSS_ATTR; | 136 | static int backlight_timer SHAREDBSS_ATTR; |
@@ -744,7 +744,7 @@ void backlight_init(void) | |||
744 | * call the appropriate backlight_set_*() functions, only changing light | 744 | * call the appropriate backlight_set_*() functions, only changing light |
745 | * status if necessary. */ | 745 | * status if necessary. */ |
746 | #ifdef BACKLIGHT_DRIVER_CLOSE | 746 | #ifdef BACKLIGHT_DRIVER_CLOSE |
747 | backlight_thread_p = | 747 | backlight_thread_id = |
748 | #endif | 748 | #endif |
749 | create_thread(backlight_thread, backlight_stack, | 749 | create_thread(backlight_thread, backlight_stack, |
750 | sizeof(backlight_stack), 0, backlight_thread_name | 750 | sizeof(backlight_stack), 0, backlight_thread_name |
@@ -756,13 +756,13 @@ void backlight_init(void) | |||
756 | #ifdef BACKLIGHT_DRIVER_CLOSE | 756 | #ifdef BACKLIGHT_DRIVER_CLOSE |
757 | void backlight_close(void) | 757 | void backlight_close(void) |
758 | { | 758 | { |
759 | struct thread_entry *thread = backlight_thread_p; | 759 | unsigned int thread = backlight_thread_id; |
760 | 760 | ||
761 | /* Wait for thread to exit */ | 761 | /* Wait for thread to exit */ |
762 | if (thread == NULL) | 762 | if (thread == 0) |
763 | return; | 763 | return; |
764 | 764 | ||
765 | backlight_thread_p = NULL; | 765 | backlight_thread_id = 0; |
766 | 766 | ||
767 | queue_post(&backlight_queue, BACKLIGHT_QUIT, 0); | 767 | queue_post(&backlight_queue, BACKLIGHT_QUIT, 0); |
768 | thread_wait(thread); | 768 | thread_wait(thread); |
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c index 00a7c3e19a..e3fa3e8958 100644 --- a/firmware/drivers/ata.c +++ b/firmware/drivers/ata.c | |||
@@ -71,7 +71,7 @@ | |||
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #ifdef ATA_DRIVER_CLOSE | 73 | #ifdef ATA_DRIVER_CLOSE |
74 | static struct thread_entry *ata_thread_p = NULL; | 74 | static unsigned int ata_thread_id = 0; |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 | 77 | #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 |
@@ -94,7 +94,8 @@ static void ata_lock_init(struct ata_lock *l) | |||
94 | 94 | ||
95 | static void ata_lock_lock(struct ata_lock *l) | 95 | static void ata_lock_lock(struct ata_lock *l) |
96 | { | 96 | { |
97 | struct thread_entry * const current = thread_get_current(); | 97 | struct thread_entry * const current = |
98 | thread_id_entry(THREAD_ID_CURRENT); | ||
98 | 99 | ||
99 | if (current == l->thread) | 100 | if (current == l->thread) |
100 | { | 101 | { |
@@ -1350,7 +1351,7 @@ int ata_init(void) | |||
1350 | 1351 | ||
1351 | last_disk_activity = current_tick; | 1352 | last_disk_activity = current_tick; |
1352 | #ifdef ATA_DRIVER_CLOSE | 1353 | #ifdef ATA_DRIVER_CLOSE |
1353 | ata_thread_p = | 1354 | ata_thread_id = |
1354 | #endif | 1355 | #endif |
1355 | create_thread(ata_thread, ata_stack, | 1356 | create_thread(ata_thread, ata_stack, |
1356 | sizeof(ata_stack), 0, ata_thread_name | 1357 | sizeof(ata_stack), 0, ata_thread_name |
@@ -1370,15 +1371,15 @@ int ata_init(void) | |||
1370 | #ifdef ATA_DRIVER_CLOSE | 1371 | #ifdef ATA_DRIVER_CLOSE |
1371 | void ata_close(void) | 1372 | void ata_close(void) |
1372 | { | 1373 | { |
1373 | struct thread_entry *thread = ata_thread_p; | 1374 | unsigned int thread_id = ata_thread_id; |
1374 | 1375 | ||
1375 | if (thread == NULL) | 1376 | if (thread_id == 0) |
1376 | return; | 1377 | return; |
1377 | 1378 | ||
1378 | ata_thread_p = NULL; | 1379 | ata_thread_id = 0; |
1379 | 1380 | ||
1380 | queue_post(&ata_queue, Q_CLOSE, 0); | 1381 | queue_post(&ata_queue, Q_CLOSE, 0); |
1381 | thread_wait(thread); | 1382 | thread_wait(thread_id); |
1382 | } | 1383 | } |
1383 | #endif /* ATA_DRIVER_CLOSE */ | 1384 | #endif /* ATA_DRIVER_CLOSE */ |
1384 | 1385 | ||
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h index beba58eb21..ef65463e5d 100644 --- a/firmware/export/kernel.h +++ b/firmware/export/kernel.h | |||
@@ -261,7 +261,7 @@ extern void queue_post(struct event_queue *q, long id, intptr_t data); | |||
261 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 261 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
262 | extern void queue_enable_queue_send(struct event_queue *q, | 262 | extern void queue_enable_queue_send(struct event_queue *q, |
263 | struct queue_sender_list *send, | 263 | struct queue_sender_list *send, |
264 | struct thread_entry *owner); | 264 | unsigned int owner_id); |
265 | extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data); | 265 | extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data); |
266 | extern void queue_reply(struct event_queue *q, intptr_t retval); | 266 | extern void queue_reply(struct event_queue *q, intptr_t retval); |
267 | extern bool queue_in_queue_send(struct event_queue *q); | 267 | extern bool queue_in_queue_send(struct event_queue *q); |
diff --git a/firmware/export/thread.h b/firmware/export/thread.h index c4dfbf4ed3..4c1e952347 100644 --- a/firmware/export/thread.h +++ b/firmware/export/thread.h | |||
@@ -58,9 +58,6 @@ | |||
58 | #define NUM_PRIORITIES 32 | 58 | #define NUM_PRIORITIES 32 |
59 | #define PRIORITY_IDLE 32 /* Priority representative of no tasks */ | 59 | #define PRIORITY_IDLE 32 /* Priority representative of no tasks */ |
60 | 60 | ||
61 | /* TODO: Only a minor tweak to create_thread would be needed to let | ||
62 | * thread slots be caller allocated - no essential threading functionality | ||
63 | * depends upon an array */ | ||
64 | #if CONFIG_CODEC == SWCODEC | 61 | #if CONFIG_CODEC == SWCODEC |
65 | 62 | ||
66 | #ifdef HAVE_RECORDING | 63 | #ifdef HAVE_RECORDING |
@@ -280,6 +277,7 @@ struct thread_entry | |||
280 | int skip_count; /* Number of times skipped if higher priority | 277 | int skip_count; /* Number of times skipped if higher priority |
281 | thread was running */ | 278 | thread was running */ |
282 | #endif | 279 | #endif |
280 | uint16_t id; /* Current slot id */ | ||
283 | unsigned short stack_size; /* Size of stack in bytes */ | 281 | unsigned short stack_size; /* Size of stack in bytes */ |
284 | #ifdef HAVE_PRIORITY_SCHEDULING | 282 | #ifdef HAVE_PRIORITY_SCHEDULING |
285 | unsigned char base_priority; /* Base priority (set explicitly during | 283 | unsigned char base_priority; /* Base priority (set explicitly during |
@@ -298,6 +296,16 @@ struct thread_entry | |||
298 | #endif | 296 | #endif |
299 | }; | 297 | }; |
300 | 298 | ||
299 | /*** Macros for internal use ***/ | ||
300 | /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ | ||
301 | #define THREAD_ID_VERSION_SHIFT 8 | ||
302 | #define THREAD_ID_VERSION_MASK 0xff00 | ||
303 | #define THREAD_ID_SLOT_MASK 0x00ff | ||
304 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
305 | |||
306 | /* Specify current thread in a function taking an ID. */ | ||
307 | #define THREAD_ID_CURRENT ((unsigned int)-1) | ||
308 | |||
301 | #if NUM_CORES > 1 | 309 | #if NUM_CORES > 1 |
302 | /* Operations to be performed just before stopping a thread and starting | 310 | /* Operations to be performed just before stopping a thread and starting |
303 | a new one if specified before calling switch_thread */ | 311 | a new one if specified before calling switch_thread */ |
@@ -475,11 +483,11 @@ void init_threads(void); | |||
475 | 483 | ||
476 | /* Allocate a thread in the scheduler */ | 484 | /* Allocate a thread in the scheduler */ |
477 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | 485 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ |
478 | struct thread_entry* | 486 | unsigned int create_thread(void (*function)(void), |
479 | create_thread(void (*function)(void), void* stack, size_t stack_size, | 487 | void* stack, size_t stack_size, |
480 | unsigned flags, const char *name | 488 | unsigned flags, const char *name |
481 | IF_PRIO(, int priority) | 489 | IF_PRIO(, int priority) |
482 | IF_COP(, unsigned int core)); | 490 | IF_COP(, unsigned int core)); |
483 | 491 | ||
484 | /* Set and clear the CPU frequency boost flag for the calling thread */ | 492 | /* Set and clear the CPU frequency boost flag for the calling thread */ |
485 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 493 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
@@ -489,17 +497,19 @@ void cancel_cpu_boost(void); | |||
489 | #define trigger_cpu_boost() | 497 | #define trigger_cpu_boost() |
490 | #define cancel_cpu_boost() | 498 | #define cancel_cpu_boost() |
491 | #endif | 499 | #endif |
500 | /* Return thread entry from id */ | ||
501 | struct thread_entry *thread_id_entry(unsigned int thread_id); | ||
492 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). | 502 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). |
493 | * Has no effect on a thread not frozen. */ | 503 | * Has no effect on a thread not frozen. */ |
494 | void thread_thaw(struct thread_entry *thread); | 504 | void thread_thaw(unsigned int thread_id); |
495 | /* Wait for a thread to exit */ | 505 | /* Wait for a thread to exit */ |
496 | void thread_wait(struct thread_entry *thread); | 506 | void thread_wait(unsigned int thread_id); |
497 | /* Exit the current thread */ | 507 | /* Exit the current thread */ |
498 | void thread_exit(void); | 508 | void thread_exit(void); |
499 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) | 509 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) |
500 | #define ALLOW_REMOVE_THREAD | 510 | #define ALLOW_REMOVE_THREAD |
501 | /* Remove a thread from the scheduler */ | 511 | /* Remove a thread from the scheduler */ |
502 | void remove_thread(struct thread_entry *thread); | 512 | void remove_thread(unsigned int thread_id); |
503 | #endif | 513 | #endif |
504 | 514 | ||
505 | /* Switch to next runnable thread */ | 515 | /* Switch to next runnable thread */ |
@@ -526,13 +536,13 @@ unsigned int thread_queue_wake(struct thread_entry **list); | |||
526 | unsigned int wakeup_thread(struct thread_entry **list); | 536 | unsigned int wakeup_thread(struct thread_entry **list); |
527 | 537 | ||
528 | #ifdef HAVE_PRIORITY_SCHEDULING | 538 | #ifdef HAVE_PRIORITY_SCHEDULING |
529 | int thread_set_priority(struct thread_entry *thread, int priority); | 539 | int thread_set_priority(unsigned int thread_id, int priority); |
530 | int thread_get_priority(struct thread_entry *thread); | 540 | int thread_get_priority(unsigned int thread_id); |
531 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 541 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
532 | #if NUM_CORES > 1 | 542 | #if NUM_CORES > 1 |
533 | unsigned int switch_core(unsigned int new_core); | 543 | unsigned int switch_core(unsigned int new_core); |
534 | #endif | 544 | #endif |
535 | struct thread_entry * thread_get_current(void); | 545 | unsigned int thread_get_current(void); |
536 | 546 | ||
537 | /* Debugging info - only! */ | 547 | /* Debugging info - only! */ |
538 | int thread_stack_usage(const struct thread_entry *thread); | 548 | int thread_stack_usage(const struct thread_entry *thread); |
diff --git a/firmware/kernel.c b/firmware/kernel.c index 920893818a..553f6721a1 100644 --- a/firmware/kernel.c +++ b/firmware/kernel.c | |||
@@ -352,11 +352,12 @@ static void queue_remove_sender_thread_cb(struct thread_entry *thread) | |||
352 | * specified for priority inheritance to operate. | 352 | * specified for priority inheritance to operate. |
353 | * | 353 | * |
354 | * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous | 354 | * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous |
355 | * messages results in an undefined order of message replies. | 355 | * messages results in an undefined order of message replies or possible default |
356 | * replies if two or more waits happen before a reply is done. | ||
356 | */ | 357 | */ |
357 | void queue_enable_queue_send(struct event_queue *q, | 358 | void queue_enable_queue_send(struct event_queue *q, |
358 | struct queue_sender_list *send, | 359 | struct queue_sender_list *send, |
359 | struct thread_entry *owner) | 360 | unsigned int owner_id) |
360 | { | 361 | { |
361 | int oldlevel = disable_irq_save(); | 362 | int oldlevel = disable_irq_save(); |
362 | corelock_lock(&q->cl); | 363 | corelock_lock(&q->cl); |
@@ -367,9 +368,11 @@ void queue_enable_queue_send(struct event_queue *q, | |||
367 | #ifdef HAVE_PRIORITY_SCHEDULING | 368 | #ifdef HAVE_PRIORITY_SCHEDULING |
368 | send->blocker.wakeup_protocol = wakeup_priority_protocol_release; | 369 | send->blocker.wakeup_protocol = wakeup_priority_protocol_release; |
369 | send->blocker.priority = PRIORITY_IDLE; | 370 | send->blocker.priority = PRIORITY_IDLE; |
370 | send->blocker.thread = owner; | 371 | if(owner_id != 0) |
371 | if(owner != NULL) | 372 | { |
373 | send->blocker.thread = thread_id_entry(owner_id); | ||
372 | q->blocker_p = &send->blocker; | 374 | q->blocker_p = &send->blocker; |
375 | } | ||
373 | #endif | 376 | #endif |
374 | q->send = send; | 377 | q->send = send; |
375 | } | 378 | } |
@@ -377,7 +380,7 @@ void queue_enable_queue_send(struct event_queue *q, | |||
377 | corelock_unlock(&q->cl); | 380 | corelock_unlock(&q->cl); |
378 | restore_irq(oldlevel); | 381 | restore_irq(oldlevel); |
379 | 382 | ||
380 | (void)owner; | 383 | (void)owner_id; |
381 | } | 384 | } |
382 | 385 | ||
383 | /* Unblock a blocked thread at a given event index */ | 386 | /* Unblock a blocked thread at a given event index */ |
@@ -532,7 +535,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
532 | 535 | ||
533 | #ifdef HAVE_PRIORITY_SCHEDULING | 536 | #ifdef HAVE_PRIORITY_SCHEDULING |
534 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 537 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
535 | QUEUE_GET_THREAD(q) == thread_get_current(), | 538 | QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running, |
536 | "queue_wait->wrong thread\n"); | 539 | "queue_wait->wrong thread\n"); |
537 | #endif | 540 | #endif |
538 | 541 | ||
@@ -579,7 +582,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
579 | 582 | ||
580 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 583 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
581 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 584 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
582 | QUEUE_GET_THREAD(q) == thread_get_current(), | 585 | QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running, |
583 | "queue_wait_w_tmo->wrong thread\n"); | 586 | "queue_wait_w_tmo->wrong thread\n"); |
584 | #endif | 587 | #endif |
585 | 588 | ||
@@ -914,10 +917,10 @@ void mutex_lock(struct mutex *m) | |||
914 | void mutex_unlock(struct mutex *m) | 917 | void mutex_unlock(struct mutex *m) |
915 | { | 918 | { |
916 | /* unlocker not being the owner is an unlocking violation */ | 919 | /* unlocker not being the owner is an unlocking violation */ |
917 | KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(), | 920 | KERNEL_ASSERT(MUTEX_GET_THREAD(m) == cores[CURRENT_CORE].running, |
918 | "mutex_unlock->wrong thread (%s != %s)\n", | 921 | "mutex_unlock->wrong thread (%s != %s)\n", |
919 | MUTEX_GET_THREAD(m)->name, | 922 | MUTEX_GET_THREAD(m)->name, |
920 | thread_get_current()->name); | 923 | cores[CURRENT_CORE].running->name); |
921 | 924 | ||
922 | if(m->count > 0) | 925 | if(m->count > 0) |
923 | { | 926 | { |
@@ -990,7 +993,7 @@ void spinlock_lock(struct spinlock *l) | |||
990 | void spinlock_unlock(struct spinlock *l) | 993 | void spinlock_unlock(struct spinlock *l) |
991 | { | 994 | { |
992 | /* unlocker not being the owner is an unlocking violation */ | 995 | /* unlocker not being the owner is an unlocking violation */ |
993 | KERNEL_ASSERT(l->thread == thread_get_current(), | 996 | KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running, |
994 | "spinlock_unlock->wrong thread\n"); | 997 | "spinlock_unlock->wrong thread\n"); |
995 | 998 | ||
996 | if(l->count > 0) | 999 | if(l->count > 0) |
diff --git a/firmware/target/arm/imx31/gigabeat-s/mc13783-imx31.c b/firmware/target/arm/imx31/gigabeat-s/mc13783-imx31.c index c185994bfc..81849d0852 100644 --- a/firmware/target/arm/imx31/gigabeat-s/mc13783-imx31.c +++ b/firmware/target/arm/imx31/gigabeat-s/mc13783-imx31.c | |||
@@ -69,7 +69,7 @@ static const unsigned char pmic_ints_regs[2] = | |||
69 | 69 | ||
70 | #ifdef PMIC_DRIVER_CLOSE | 70 | #ifdef PMIC_DRIVER_CLOSE |
71 | static bool pmic_close = false; | 71 | static bool pmic_close = false; |
72 | static struct thread_entry *mc13783_thread_p = NULL; | 72 | static unsigned int mc13783_thread_id = 0; |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | static void mc13783_interrupt_thread(void) | 75 | static void mc13783_interrupt_thread(void) |
@@ -149,7 +149,7 @@ void mc13783_init(void) | |||
149 | MC13783_GPIO_ISR = (1ul << MC13783_GPIO_LINE); | 149 | MC13783_GPIO_ISR = (1ul << MC13783_GPIO_LINE); |
150 | 150 | ||
151 | #ifdef PMIC_DRIVER_CLOSE | 151 | #ifdef PMIC_DRIVER_CLOSE |
152 | mc13783_thread_p = | 152 | mc13783_thread_id = |
153 | #endif | 153 | #endif |
154 | create_thread(mc13783_interrupt_thread, | 154 | create_thread(mc13783_interrupt_thread, |
155 | mc13783_thread_stack, sizeof(mc13783_thread_stack), 0, | 155 | mc13783_thread_stack, sizeof(mc13783_thread_stack), 0, |
@@ -159,16 +159,16 @@ void mc13783_init(void) | |||
159 | #ifdef PMIC_DRIVER_CLOSE | 159 | #ifdef PMIC_DRIVER_CLOSE |
160 | void mc13783_close(void) | 160 | void mc13783_close(void) |
161 | { | 161 | { |
162 | struct thread_entry *thread = mc13783_thread_p; | 162 | unsigned int thread_id = mc13783_thread_p; |
163 | 163 | ||
164 | if (thread == NULL) | 164 | if (thread_id == 0) |
165 | return; | 165 | return; |
166 | 166 | ||
167 | mc13783_thread_p = NULL; | 167 | mc13783_thread_id = 0; |
168 | 168 | ||
169 | pmic_close = true; | 169 | pmic_close = true; |
170 | wakeup_signal(&mc13783_wake); | 170 | wakeup_signal(&mc13783_wake); |
171 | thread_wait(thread); | 171 | thread_wait(thread_id); |
172 | } | 172 | } |
173 | #endif /* PMIC_DRIVER_CLOSE */ | 173 | #endif /* PMIC_DRIVER_CLOSE */ |
174 | 174 | ||
diff --git a/firmware/thread.c b/firmware/thread.c index c500fc4818..377c3355b4 100644 --- a/firmware/thread.c +++ b/firmware/thread.c | |||
@@ -1691,8 +1691,8 @@ struct thread_entry * | |||
1691 | struct thread_entry *next; | 1691 | struct thread_entry *next; |
1692 | int bl_pr; | 1692 | int bl_pr; |
1693 | 1693 | ||
1694 | THREAD_ASSERT(thread_get_current() == bl_t, | 1694 | THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t, |
1695 | "UPPT->wrong thread", thread_get_current()); | 1695 | "UPPT->wrong thread", cores[CURRENT_CORE].running); |
1696 | 1696 | ||
1697 | LOCK_THREAD(bl_t); | 1697 | LOCK_THREAD(bl_t); |
1698 | 1698 | ||
@@ -2031,7 +2031,7 @@ void switch_thread(void) | |||
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | #ifdef RB_PROFILE | 2033 | #ifdef RB_PROFILE |
2034 | profile_thread_stopped(thread - threads); | 2034 | profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK); |
2035 | #endif | 2035 | #endif |
2036 | 2036 | ||
2037 | /* Begin task switching by saving our current context so that we can | 2037 | /* Begin task switching by saving our current context so that we can |
@@ -2136,7 +2136,7 @@ void switch_thread(void) | |||
2136 | load_context(&thread->context); | 2136 | load_context(&thread->context); |
2137 | 2137 | ||
2138 | #ifdef RB_PROFILE | 2138 | #ifdef RB_PROFILE |
2139 | profile_thread_started(thread - threads); | 2139 | profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); |
2140 | #endif | 2140 | #endif |
2141 | 2141 | ||
2142 | } | 2142 | } |
@@ -2316,6 +2316,24 @@ unsigned int thread_queue_wake(struct thread_entry **list) | |||
2316 | } | 2316 | } |
2317 | 2317 | ||
2318 | /*--------------------------------------------------------------------------- | 2318 | /*--------------------------------------------------------------------------- |
2319 | * Assign the thread slot a new ID. Version is 1-255. | ||
2320 | *--------------------------------------------------------------------------- | ||
2321 | */ | ||
2322 | static void new_thread_id(unsigned int slot_num, | ||
2323 | struct thread_entry *thread) | ||
2324 | { | ||
2325 | unsigned int version = | ||
2326 | (thread->id + (1u << THREAD_ID_VERSION_SHIFT)) | ||
2327 | & THREAD_ID_VERSION_MASK; | ||
2328 | |||
2329 | /* If wrapped to 0, make it 1 */ | ||
2330 | if (version == 0) | ||
2331 | version = 1u << THREAD_ID_VERSION_SHIFT; | ||
2332 | |||
2333 | thread->id = version | (slot_num & THREAD_ID_SLOT_MASK); | ||
2334 | } | ||
2335 | |||
2336 | /*--------------------------------------------------------------------------- | ||
2319 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned | 2337 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned |
2320 | * will be locked on multicore. | 2338 | * will be locked on multicore. |
2321 | *--------------------------------------------------------------------------- | 2339 | *--------------------------------------------------------------------------- |
@@ -2349,6 +2367,17 @@ static struct thread_entry * find_empty_thread_slot(void) | |||
2349 | return thread; | 2367 | return thread; |
2350 | } | 2368 | } |
2351 | 2369 | ||
2370 | /*--------------------------------------------------------------------------- | ||
2371 | * Return the thread_entry pointer for a thread_id. Return the current | ||
2372 | * thread if the ID is 0 (alias for current). | ||
2373 | *--------------------------------------------------------------------------- | ||
2374 | */ | ||
2375 | struct thread_entry * thread_id_entry(unsigned int thread_id) | ||
2376 | { | ||
2377 | return (thread_id == THREAD_ID_CURRENT) ? | ||
2378 | cores[CURRENT_CORE].running : | ||
2379 | &threads[thread_id & THREAD_ID_SLOT_MASK]; | ||
2380 | } | ||
2352 | 2381 | ||
2353 | /*--------------------------------------------------------------------------- | 2382 | /*--------------------------------------------------------------------------- |
2354 | * Place the current core in idle mode - woken up on interrupt or wake | 2383 | * Place the current core in idle mode - woken up on interrupt or wake |
@@ -2369,11 +2398,11 @@ void core_idle(void) | |||
2369 | * Return ID if context area could be allocated, else NULL. | 2398 | * Return ID if context area could be allocated, else NULL. |
2370 | *--------------------------------------------------------------------------- | 2399 | *--------------------------------------------------------------------------- |
2371 | */ | 2400 | */ |
2372 | struct thread_entry* | 2401 | unsigned int create_thread(void (*function)(void), |
2373 | create_thread(void (*function)(void), void* stack, size_t stack_size, | 2402 | void* stack, size_t stack_size, |
2374 | unsigned flags, const char *name | 2403 | unsigned flags, const char *name |
2375 | IF_PRIO(, int priority) | 2404 | IF_PRIO(, int priority) |
2376 | IF_COP(, unsigned int core)) | 2405 | IF_COP(, unsigned int core)) |
2377 | { | 2406 | { |
2378 | unsigned int i; | 2407 | unsigned int i; |
2379 | unsigned int stack_words; | 2408 | unsigned int stack_words; |
@@ -2385,7 +2414,7 @@ struct thread_entry* | |||
2385 | thread = find_empty_thread_slot(); | 2414 | thread = find_empty_thread_slot(); |
2386 | if (thread == NULL) | 2415 | if (thread == NULL) |
2387 | { | 2416 | { |
2388 | return NULL; | 2417 | return 0; |
2389 | } | 2418 | } |
2390 | 2419 | ||
2391 | oldlevel = disable_irq_save(); | 2420 | oldlevel = disable_irq_save(); |
@@ -2443,15 +2472,15 @@ struct thread_entry* | |||
2443 | THREAD_STARTUP_INIT(core, thread, function); | 2472 | THREAD_STARTUP_INIT(core, thread, function); |
2444 | 2473 | ||
2445 | thread->state = state; | 2474 | thread->state = state; |
2475 | i = thread->id; /* Snapshot while locked */ | ||
2446 | 2476 | ||
2447 | if (state == STATE_RUNNING) | 2477 | if (state == STATE_RUNNING) |
2448 | core_schedule_wakeup(thread); | 2478 | core_schedule_wakeup(thread); |
2449 | 2479 | ||
2450 | UNLOCK_THREAD(thread); | 2480 | UNLOCK_THREAD(thread); |
2451 | |||
2452 | restore_irq(oldlevel); | 2481 | restore_irq(oldlevel); |
2453 | 2482 | ||
2454 | return thread; | 2483 | return i; |
2455 | } | 2484 | } |
2456 | 2485 | ||
2457 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 2486 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
@@ -2489,18 +2518,17 @@ void cancel_cpu_boost(void) | |||
2489 | * Parameter is the ID as returned from create_thread(). | 2518 | * Parameter is the ID as returned from create_thread(). |
2490 | *--------------------------------------------------------------------------- | 2519 | *--------------------------------------------------------------------------- |
2491 | */ | 2520 | */ |
2492 | void thread_wait(struct thread_entry *thread) | 2521 | void thread_wait(unsigned int thread_id) |
2493 | { | 2522 | { |
2494 | struct thread_entry *current = cores[CURRENT_CORE].running; | 2523 | struct thread_entry *current = cores[CURRENT_CORE].running; |
2495 | 2524 | struct thread_entry *thread = thread_id_entry(thread_id); | |
2496 | if (thread == NULL) | ||
2497 | thread = current; | ||
2498 | 2525 | ||
2499 | /* Lock thread-as-waitable-object lock */ | 2526 | /* Lock thread-as-waitable-object lock */ |
2500 | corelock_lock(&thread->waiter_cl); | 2527 | corelock_lock(&thread->waiter_cl); |
2501 | 2528 | ||
2502 | /* Be sure it hasn't been killed yet */ | 2529 | /* Be sure it hasn't been killed yet */ |
2503 | if (thread->state != STATE_KILLED) | 2530 | if (thread_id == THREAD_ID_CURRENT || |
2531 | (thread->id == thread_id && thread->state != STATE_KILLED)) | ||
2504 | { | 2532 | { |
2505 | IF_COP( current->obj_cl = &thread->waiter_cl; ) | 2533 | IF_COP( current->obj_cl = &thread->waiter_cl; ) |
2506 | current->bqp = &thread->queue; | 2534 | current->bqp = &thread->queue; |
@@ -2538,9 +2566,10 @@ void thread_exit(void) | |||
2538 | if (current->name == THREAD_DESTRUCT) | 2566 | if (current->name == THREAD_DESTRUCT) |
2539 | { | 2567 | { |
2540 | /* Thread being killed - become a waiter */ | 2568 | /* Thread being killed - become a waiter */ |
2569 | unsigned int id = current->id; | ||
2541 | UNLOCK_THREAD(current); | 2570 | UNLOCK_THREAD(current); |
2542 | corelock_unlock(¤t->waiter_cl); | 2571 | corelock_unlock(¤t->waiter_cl); |
2543 | thread_wait(current); | 2572 | thread_wait(id); |
2544 | THREAD_PANICF("thread_exit->WK:*R", current); | 2573 | THREAD_PANICF("thread_exit->WK:*R", current); |
2545 | } | 2574 | } |
2546 | #endif | 2575 | #endif |
@@ -2568,7 +2597,13 @@ void thread_exit(void) | |||
2568 | } | 2597 | } |
2569 | 2598 | ||
2570 | flush_icache(); | 2599 | flush_icache(); |
2600 | |||
2601 | /* At this point, this thread isn't using resources allocated for | ||
2602 | * execution except the slot itself. */ | ||
2571 | #endif | 2603 | #endif |
2604 | |||
2605 | /* Update ID for this slot */ | ||
2606 | new_thread_id(current->id, current); | ||
2572 | current->name = NULL; | 2607 | current->name = NULL; |
2573 | 2608 | ||
2574 | /* Signal this thread */ | 2609 | /* Signal this thread */ |
@@ -2593,7 +2628,7 @@ void thread_exit(void) | |||
2593 | * leave various objects in an undefined state. | 2628 | * leave various objects in an undefined state. |
2594 | *--------------------------------------------------------------------------- | 2629 | *--------------------------------------------------------------------------- |
2595 | */ | 2630 | */ |
2596 | void remove_thread(struct thread_entry *thread) | 2631 | void remove_thread(unsigned int thread_id) |
2597 | { | 2632 | { |
2598 | #if NUM_CORES > 1 | 2633 | #if NUM_CORES > 1 |
2599 | /* core is not constant here because of core switching */ | 2634 | /* core is not constant here because of core switching */ |
@@ -2604,13 +2639,11 @@ void remove_thread(struct thread_entry *thread) | |||
2604 | const unsigned int core = CURRENT_CORE; | 2639 | const unsigned int core = CURRENT_CORE; |
2605 | #endif | 2640 | #endif |
2606 | struct thread_entry *current = cores[core].running; | 2641 | struct thread_entry *current = cores[core].running; |
2642 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2607 | 2643 | ||
2608 | unsigned state; | 2644 | unsigned state; |
2609 | int oldlevel; | 2645 | int oldlevel; |
2610 | 2646 | ||
2611 | if (thread == NULL) | ||
2612 | thread = current; | ||
2613 | |||
2614 | if (thread == current) | 2647 | if (thread == current) |
2615 | thread_exit(); /* Current thread - do normal exit */ | 2648 | thread_exit(); /* Current thread - do normal exit */ |
2616 | 2649 | ||
@@ -2621,10 +2654,8 @@ void remove_thread(struct thread_entry *thread) | |||
2621 | 2654 | ||
2622 | state = thread->state; | 2655 | state = thread->state; |
2623 | 2656 | ||
2624 | if (state == STATE_KILLED) | 2657 | if (thread->id != thread_id || state == STATE_KILLED) |
2625 | { | ||
2626 | goto thread_killed; | 2658 | goto thread_killed; |
2627 | } | ||
2628 | 2659 | ||
2629 | #if NUM_CORES > 1 | 2660 | #if NUM_CORES > 1 |
2630 | if (thread->name == THREAD_DESTRUCT) | 2661 | if (thread->name == THREAD_DESTRUCT) |
@@ -2633,7 +2664,7 @@ void remove_thread(struct thread_entry *thread) | |||
2633 | UNLOCK_THREAD(thread); | 2664 | UNLOCK_THREAD(thread); |
2634 | corelock_unlock(&thread->waiter_cl); | 2665 | corelock_unlock(&thread->waiter_cl); |
2635 | restore_irq(oldlevel); | 2666 | restore_irq(oldlevel); |
2636 | thread_wait(thread); | 2667 | thread_wait(thread_id); |
2637 | return; | 2668 | return; |
2638 | } | 2669 | } |
2639 | 2670 | ||
@@ -2741,6 +2772,7 @@ IF_COP( retry_state: ) | |||
2741 | /* Otherwise thread is frozen and hasn't run yet */ | 2772 | /* Otherwise thread is frozen and hasn't run yet */ |
2742 | } | 2773 | } |
2743 | 2774 | ||
2775 | new_thread_id(thread_id, thread); | ||
2744 | thread->state = STATE_KILLED; | 2776 | thread->state = STATE_KILLED; |
2745 | 2777 | ||
2746 | /* If thread was waiting on itself, it will have been removed above. | 2778 | /* If thread was waiting on itself, it will have been removed above. |
@@ -2773,17 +2805,15 @@ thread_killed: /* Thread was already killed */ | |||
2773 | * needed inheritance changes also may happen. | 2805 | * needed inheritance changes also may happen. |
2774 | *--------------------------------------------------------------------------- | 2806 | *--------------------------------------------------------------------------- |
2775 | */ | 2807 | */ |
2776 | int thread_set_priority(struct thread_entry *thread, int priority) | 2808 | int thread_set_priority(unsigned int thread_id, int priority) |
2777 | { | 2809 | { |
2778 | int old_base_priority = -1; | 2810 | int old_base_priority = -1; |
2811 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2779 | 2812 | ||
2780 | /* A little safety measure */ | 2813 | /* A little safety measure */ |
2781 | if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) | 2814 | if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) |
2782 | return -1; | 2815 | return -1; |
2783 | 2816 | ||
2784 | if (thread == NULL) | ||
2785 | thread = cores[CURRENT_CORE].running; | ||
2786 | |||
2787 | /* Thread could be on any list and therefore on an interrupt accessible | 2817 | /* Thread could be on any list and therefore on an interrupt accessible |
2788 | one - disable interrupts */ | 2818 | one - disable interrupts */ |
2789 | int oldlevel = disable_irq_save(); | 2819 | int oldlevel = disable_irq_save(); |
@@ -2791,7 +2821,8 @@ int thread_set_priority(struct thread_entry *thread, int priority) | |||
2791 | LOCK_THREAD(thread); | 2821 | LOCK_THREAD(thread); |
2792 | 2822 | ||
2793 | /* Make sure it's not killed */ | 2823 | /* Make sure it's not killed */ |
2794 | if (thread->state != STATE_KILLED) | 2824 | if (thread_id == THREAD_ID_CURRENT || |
2825 | (thread->id == thread_id && thread->state != STATE_KILLED)) | ||
2795 | { | 2826 | { |
2796 | int old_priority = thread->priority; | 2827 | int old_priority = thread->priority; |
2797 | 2828 | ||
@@ -2908,13 +2939,19 @@ int thread_set_priority(struct thread_entry *thread, int priority) | |||
2908 | * Returns the current base priority for a thread. | 2939 | * Returns the current base priority for a thread. |
2909 | *--------------------------------------------------------------------------- | 2940 | *--------------------------------------------------------------------------- |
2910 | */ | 2941 | */ |
2911 | int thread_get_priority(struct thread_entry *thread) | 2942 | int thread_get_priority(unsigned int thread_id) |
2912 | { | 2943 | { |
2913 | /* Simple, quick probe. */ | 2944 | struct thread_entry *thread = thread_id_entry(thread_id); |
2914 | if (thread == NULL) | 2945 | int base_priority = thread->base_priority; |
2915 | thread = cores[CURRENT_CORE].running; | ||
2916 | 2946 | ||
2917 | return thread->base_priority; | 2947 | /* Simply check without locking slot. It may or may not be valid by the |
2948 | * time the function returns anyway. If all tests pass, it is the | ||
2949 | * correct value for when it was valid. */ | ||
2950 | if (thread_id != THREAD_ID_CURRENT && | ||
2951 | (thread->id != thread_id || thread->state == STATE_KILLED)) | ||
2952 | base_priority = -1; | ||
2953 | |||
2954 | return base_priority; | ||
2918 | } | 2955 | } |
2919 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 2956 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
2920 | 2957 | ||
@@ -2924,12 +2961,16 @@ int thread_get_priority(struct thread_entry *thread) | |||
2924 | * virtue of the slot having a state of STATE_FROZEN. | 2961 | * virtue of the slot having a state of STATE_FROZEN. |
2925 | *--------------------------------------------------------------------------- | 2962 | *--------------------------------------------------------------------------- |
2926 | */ | 2963 | */ |
2927 | void thread_thaw(struct thread_entry *thread) | 2964 | void thread_thaw(unsigned int thread_id) |
2928 | { | 2965 | { |
2966 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
2929 | int oldlevel = disable_irq_save(); | 2967 | int oldlevel = disable_irq_save(); |
2968 | |||
2930 | LOCK_THREAD(thread); | 2969 | LOCK_THREAD(thread); |
2931 | 2970 | ||
2932 | if (thread->state == STATE_FROZEN) | 2971 | /* If thread is the current one, it cannot be frozen, therefore |
2972 | * there is no need to check that. */ | ||
2973 | if (thread->id == thread_id && thread->state == STATE_FROZEN) | ||
2933 | core_schedule_wakeup(thread); | 2974 | core_schedule_wakeup(thread); |
2934 | 2975 | ||
2935 | UNLOCK_THREAD(thread); | 2976 | UNLOCK_THREAD(thread); |
@@ -2940,9 +2981,9 @@ void thread_thaw(struct thread_entry *thread) | |||
2940 | * Return the ID of the currently executing thread. | 2981 | * Return the ID of the currently executing thread. |
2941 | *--------------------------------------------------------------------------- | 2982 | *--------------------------------------------------------------------------- |
2942 | */ | 2983 | */ |
2943 | struct thread_entry * thread_get_current(void) | 2984 | unsigned int thread_get_current(void) |
2944 | { | 2985 | { |
2945 | return cores[CURRENT_CORE].running; | 2986 | return cores[CURRENT_CORE].running->id; |
2946 | } | 2987 | } |
2947 | 2988 | ||
2948 | #if NUM_CORES > 1 | 2989 | #if NUM_CORES > 1 |
@@ -2967,9 +3008,10 @@ unsigned int switch_core(unsigned int new_core) | |||
2967 | if (current->name == THREAD_DESTRUCT) | 3008 | if (current->name == THREAD_DESTRUCT) |
2968 | { | 3009 | { |
2969 | /* Thread being killed - deactivate and let process complete */ | 3010 | /* Thread being killed - deactivate and let process complete */ |
3011 | unsigned int id = current->id; | ||
2970 | UNLOCK_THREAD(current); | 3012 | UNLOCK_THREAD(current); |
2971 | restore_irq(oldlevel); | 3013 | restore_irq(oldlevel); |
2972 | thread_wait(current); | 3014 | thread_wait(id); |
2973 | /* Should never be reached */ | 3015 | /* Should never be reached */ |
2974 | THREAD_PANICF("switch_core->D:*R", current); | 3016 | THREAD_PANICF("switch_core->D:*R", current); |
2975 | } | 3017 | } |
@@ -3034,6 +3076,19 @@ void init_threads(void) | |||
3034 | const unsigned int core = CURRENT_CORE; | 3076 | const unsigned int core = CURRENT_CORE; |
3035 | struct thread_entry *thread; | 3077 | struct thread_entry *thread; |
3036 | 3078 | ||
3079 | if (core == CPU) | ||
3080 | { | ||
3081 | /* Initialize core locks and IDs in all slots */ | ||
3082 | int n; | ||
3083 | for (n = 0; n < MAXTHREADS; n++) | ||
3084 | { | ||
3085 | thread = &threads[n]; | ||
3086 | corelock_init(&thread->waiter_cl); | ||
3087 | corelock_init(&thread->slot_cl); | ||
3088 | thread->id = THREAD_ID_INIT(n); | ||
3089 | } | ||
3090 | } | ||
3091 | |||
3037 | /* CPU will initialize first and then sleep */ | 3092 | /* CPU will initialize first and then sleep */ |
3038 | thread = find_empty_thread_slot(); | 3093 | thread = find_empty_thread_slot(); |
3039 | 3094 | ||
@@ -3060,8 +3115,6 @@ void init_threads(void) | |||
3060 | thread->priority = PRIORITY_USER_INTERFACE; | 3115 | thread->priority = PRIORITY_USER_INTERFACE; |
3061 | rtr_add_entry(core, PRIORITY_USER_INTERFACE); | 3116 | rtr_add_entry(core, PRIORITY_USER_INTERFACE); |
3062 | #endif | 3117 | #endif |
3063 | corelock_init(&thread->waiter_cl); | ||
3064 | corelock_init(&thread->slot_cl); | ||
3065 | 3118 | ||
3066 | add_to_list_l(&cores[core].running, thread); | 3119 | add_to_list_l(&cores[core].running, thread); |
3067 | 3120 | ||
@@ -3070,6 +3123,7 @@ void init_threads(void) | |||
3070 | thread->stack = stackbegin; | 3123 | thread->stack = stackbegin; |
3071 | thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; | 3124 | thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; |
3072 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ | 3125 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ |
3126 | /* Initialize all locking for the slots */ | ||
3073 | /* Wait for other processors to finish their inits since create_thread | 3127 | /* Wait for other processors to finish their inits since create_thread |
3074 | * isn't safe to call until the kernel inits are done. The first | 3128 | * isn't safe to call until the kernel inits are done. The first |
3075 | * threads created in the system must of course be created by CPU. */ | 3129 | * threads created in the system must of course be created by CPU. */ |
diff --git a/firmware/usb.c b/firmware/usb.c index f9bfbc4dbf..2bff53e5d6 100644 --- a/firmware/usb.c +++ b/firmware/usb.c | |||
@@ -78,7 +78,7 @@ static int usb_mmc_countdown = 0; | |||
78 | #ifdef USB_FULL_INIT | 78 | #ifdef USB_FULL_INIT |
79 | static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)]; | 79 | static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)]; |
80 | static const char usb_thread_name[] = "usb"; | 80 | static const char usb_thread_name[] = "usb"; |
81 | static struct thread_entry *usb_thread_entry; | 81 | static unsigned int usb_thread_entry = 0; |
82 | #endif | 82 | #endif |
83 | static struct event_queue usb_queue; | 83 | static struct event_queue usb_queue; |
84 | static int last_usb_status; | 84 | static int last_usb_status; |
@@ -539,10 +539,10 @@ void usb_start_monitoring(void) | |||
539 | #ifdef USB_DRIVER_CLOSE | 539 | #ifdef USB_DRIVER_CLOSE |
540 | void usb_close(void) | 540 | void usb_close(void) |
541 | { | 541 | { |
542 | struct thread_entry *thread = usb_thread_entry; | 542 | uintptr_t thread = usb_thread_entry; |
543 | usb_thread_entry = NULL; | 543 | usb_thread_entry = 0; |
544 | 544 | ||
545 | if (thread == NULL) | 545 | if (thread == 0) |
546 | return; | 546 | return; |
547 | 547 | ||
548 | tick_remove_task(usb_tick); | 548 | tick_remove_task(usb_tick); |