summaryrefslogtreecommitdiff
path: root/firmware/export
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2013-12-04 17:06:17 +0100
committerThomas Martitz <kugel@rockbox.org>2014-03-03 18:11:57 +0100
commit382d1861af12741af4ff235b9d18f179c0adc4c5 (patch)
tree26166c130d2889bb1ae1082e8f7aba103534f49e /firmware/export
parent8bae5f2644b5d5759499fbf1066b9c35c6f859ad (diff)
downloadrockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.tar.gz
rockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.zip
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
Diffstat (limited to 'firmware/export')
-rw-r--r--firmware/export/kernel.h285
-rw-r--r--firmware/export/system.h5
-rw-r--r--firmware/export/thread.h408
3 files changed, 4 insertions, 694 deletions
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
deleted file mode 100644
index 3cadefdf68..0000000000
--- a/firmware/export/kernel.h
+++ /dev/null
@@ -1,285 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef _KERNEL_H_
22#define _KERNEL_H_
23
24#include <stdbool.h>
25#include <inttypes.h>
26#include "config.h"
27
28#include "thread.h"
29
30/* wrap-safe macros for tick comparison */
31#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
32#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
33
34#define HZ 100 /* number of ticks per second */
35
36#define MAX_NUM_TICK_TASKS 8
37
38#define MAX_NUM_QUEUES 32
39#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
40#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
41
42/* System defined message ID's - |sign bit = 1|class|id| */
43/* Event class list */
44#define SYS_EVENT_CLS_QUEUE 0
45#define SYS_EVENT_CLS_USB 1
46#define SYS_EVENT_CLS_POWER 2
47#define SYS_EVENT_CLS_FILESYS 3
48#define SYS_EVENT_CLS_PLUG 4
49#define SYS_EVENT_CLS_MISC 5
50#define SYS_EVENT_CLS_PRIVATE 7 /* For use inside plugins */
51/* make sure SYS_EVENT_CLS_BITS has enough range */
52
53/* Bit 31->|S|c...c|i...i| */
54#define SYS_EVENT ((long)(int)(1 << 31))
55#define SYS_EVENT_CLS_BITS (3)
56#define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
57#define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
58#define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
59/* Macros for extracting codes */
60#define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
61#define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
62
63#define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
64#define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
65#define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
66#define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
67#define SYS_USB_LUN_LOCKED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 4)
68#define SYS_USB_READ_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 5)
69#define SYS_USB_WRITE_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 6)
70#define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
71#define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
72#define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
73#define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
74#define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
75#define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
76#define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
77#define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
78#define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
79#define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
80#define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
81#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
82#define SYS_CALL_INCOMING MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 3)
83#define SYS_CALL_HUNG_UP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 4)
84#define SYS_VOLUME_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 5)
85
86#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
87
88#ifndef TIMEOUT_BLOCK
89#define TIMEOUT_BLOCK -1
90#define TIMEOUT_NOBLOCK 0
91#endif
92
93struct queue_event
94{
95 long id;
96 intptr_t data;
97};
98
99#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
100struct queue_sender_list
101{
102 /* If non-NULL, there is a thread waiting for the corresponding event */
103 /* Must be statically allocated to put in non-cached ram. */
104 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
105 struct thread_entry *list; /* list of senders in map */
106 /* Send info for last message dequeued or NULL if replied or not sent */
107 struct thread_entry * volatile curr_sender;
108#ifdef HAVE_PRIORITY_SCHEDULING
109 struct blocker blocker;
110#endif
111};
112#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
113
114#ifdef HAVE_PRIORITY_SCHEDULING
115#define QUEUE_GET_THREAD(q) \
116 (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
117#else
118/* Queue without priority enabled have no owner provision _at this time_ */
119#define QUEUE_GET_THREAD(q) \
120 (NULL)
121#endif
122
123struct event_queue
124{
125 struct thread_entry *queue; /* waiter list */
126 struct queue_event events[QUEUE_LENGTH]; /* list of events */
127 unsigned int volatile read; /* head of queue */
128 unsigned int volatile write; /* tail of queue */
129#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
130 struct queue_sender_list * volatile send; /* list of threads waiting for
131 reply to an event */
132#ifdef HAVE_PRIORITY_SCHEDULING
133 struct blocker *blocker_p; /* priority inheritance info
134 for sync message senders */
135#endif
136#endif
137 IF_COP( struct corelock cl; ) /* multiprocessor sync */
138};
139
140struct mutex
141{
142 struct thread_entry *queue; /* waiter list */
143 int recursion; /* lock owner recursion count */
144#ifdef HAVE_PRIORITY_SCHEDULING
145 struct blocker blocker; /* priority inheritance info
146 for waiters */
147 bool no_preempt; /* don't allow higher-priority thread
148 to be scheduled even if woken */
149#else
150 struct thread_entry *thread; /* Indicates owner thread - an owner
151 implies a locked state - same goes
152 for priority scheduling
153 (in blocker struct for that) */
154#endif
155 IF_COP( struct corelock cl; ) /* multiprocessor sync */
156};
157
158#ifdef HAVE_SEMAPHORE_OBJECTS
159struct semaphore
160{
161 struct thread_entry *queue; /* Waiter list */
162 int volatile count; /* # of waits remaining before unsignaled */
163 int max; /* maximum # of waits to remain signaled */
164 IF_COP( struct corelock cl; ) /* multiprocessor sync */
165};
166#endif
167
168/* global tick variable */
169#if defined(CPU_PP) && defined(BOOTLOADER) && \
170 !defined(HAVE_BOOTLOADER_USB_MODE)
171/* We don't enable interrupts in the PP bootloader unless USB mode is
172 enabled for it, so we need to fake the current_tick variable */
173#define current_tick (signed)(USEC_TIMER/10000)
174
175static inline void call_tick_tasks(void)
176{
177}
178#else
179extern volatile long current_tick;
180
181/* inline helper for implementing target interrupt handler */
182static inline void call_tick_tasks(void)
183{
184 extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
185 void (**p)(void) = tick_funcs;
186 void (*fn)(void);
187
188 current_tick++;
189
190 for(fn = *p; fn != NULL; fn = *(++p))
191 {
192 fn();
193 }
194}
195#endif
196
197/* kernel functions */
198extern void kernel_init(void) INIT_ATTR;
199extern void yield(void);
200extern unsigned sleep(unsigned ticks);
201int tick_add_task(void (*f)(void));
202int tick_remove_task(void (*f)(void));
203extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
204
205#ifdef INCLUDE_TIMEOUT_API
206struct timeout;
207
208/* timeout callback type
209 * tmo - pointer to struct timeout associated with event
210 * return next interval or <= 0 to stop event
211 */
212#define MAX_NUM_TIMEOUTS 8
213typedef int (* timeout_cb_type)(struct timeout *tmo);
214
215struct timeout
216{
217 timeout_cb_type callback;/* callback - returning false cancels */
218 intptr_t data; /* data passed to callback */
219 long expires; /* expiration tick */
220};
221
222void timeout_register(struct timeout *tmo, timeout_cb_type callback,
223 int ticks, intptr_t data);
224void timeout_cancel(struct timeout *tmo);
225#endif /* INCLUDE_TIMEOUT_API */
226
227#define STATE_NONSIGNALED 0
228#define STATE_SIGNALED 1
229
230#define OBJ_WAIT_TIMEDOUT (-1)
231#define OBJ_WAIT_FAILED 0
232#define OBJ_WAIT_SUCCEEDED 1
233
234extern void queue_init(struct event_queue *q, bool register_queue);
235extern void queue_delete(struct event_queue *q);
236extern void queue_wait(struct event_queue *q, struct queue_event *ev);
237extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
238 int ticks);
239extern void queue_post(struct event_queue *q, long id, intptr_t data);
240#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
241extern void queue_enable_queue_send(struct event_queue *q,
242 struct queue_sender_list *send,
243 unsigned int owner_id);
244extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
245extern void queue_reply(struct event_queue *q, intptr_t retval);
246extern bool queue_in_queue_send(struct event_queue *q);
247#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
248extern bool queue_empty(const struct event_queue* q);
249extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
250
251#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
252#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
253#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
254extern bool queue_peek_ex(struct event_queue *q,
255 struct queue_event *ev,
256 unsigned int flags,
257 const long (*filters)[2]);
258
259extern void queue_clear(struct event_queue* q);
260extern void queue_remove_from_head(struct event_queue *q, long id);
261extern int queue_count(const struct event_queue *q);
262extern int queue_broadcast(long id, intptr_t data);
263
264extern void mutex_init(struct mutex *m);
265extern void mutex_lock(struct mutex *m);
266extern void mutex_unlock(struct mutex *m);
267#ifdef HAVE_PRIORITY_SCHEDULING
268/* Deprecated temporary function to disable mutex preempting a thread on
269 * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
270 * reliance on it is a bug! */
271static inline void mutex_set_preempt(struct mutex *m, bool preempt)
272 { m->no_preempt = !preempt; }
273#else
274/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
275static inline bool mutex_test(const struct mutex *m)
276 { return m->thread != NULL; }
277#endif /* HAVE_PRIORITY_SCHEDULING */
278
279#ifdef HAVE_SEMAPHORE_OBJECTS
280extern void semaphore_init(struct semaphore *s, int max, int start);
281extern int semaphore_wait(struct semaphore *s, int timeout);
282extern void semaphore_release(struct semaphore *s);
283#endif /* HAVE_SEMAPHORE_OBJECTS */
284
285#endif /* _KERNEL_H_ */
diff --git a/firmware/export/system.h b/firmware/export/system.h
index 25f9287618..1dab352071 100644
--- a/firmware/export/system.h
+++ b/firmware/export/system.h
@@ -24,7 +24,6 @@
24 24
25#include <stdbool.h> 25#include <stdbool.h>
26#include <stdint.h> 26#include <stdint.h>
27
28#include "cpu.h" 27#include "cpu.h"
29#include "gcc_extensions.h" /* for LIKELY/UNLIKELY */ 28#include "gcc_extensions.h" /* for LIKELY/UNLIKELY */
30 29
@@ -86,6 +85,10 @@ int get_cpu_boost_counter(void);
86 85
87#define BAUDRATE 9600 86#define BAUDRATE 9600
88 87
88/* wrap-safe macros for tick comparison */
89#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
90#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
91
89#ifndef NULL 92#ifndef NULL
90#define NULL ((void*)0) 93#define NULL ((void*)0)
91#endif 94#endif
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
deleted file mode 100644
index da395b8ffa..0000000000
--- a/firmware/export/thread.h
+++ /dev/null
@@ -1,408 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef THREAD_H
23#define THREAD_H
24
25#include "config.h"
26#include <inttypes.h>
27#include <stddef.h>
28#include <stdbool.h>
29#include "gcc_extensions.h"
30
31/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
32 * by giving high priority threads more CPU time than lower priority threads
33 * when they need it. Priority is differential such that the priority
34 * difference between a lower priority runnable thread and the highest priority
35 * runnable thread determines the amount of aging necessary for the lower
36 * priority thread to be scheduled in order to prevent starvation.
37 *
38 * If software playback codec pcm buffer is going down to critical, codec
39 * can gradually raise its own priority to override user interface and
40 * prevent playback skipping.
41 */
42#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
43#define PRIORITY_RESERVED_LOW 32 /* Reserved */
44#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
45#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
46/* Realtime range reserved for threads that will not allow threads of lower
47 * priority to age and run (future expansion) */
48#define PRIORITY_REALTIME_1 1
49#define PRIORITY_REALTIME_2 2
50#define PRIORITY_REALTIME_3 3
51#define PRIORITY_REALTIME_4 4
52#define PRIORITY_REALTIME 4 /* Lowest realtime range */
53#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
54#define PRIORITY_USER_INTERFACE 16 /* The main thread */
55#define PRIORITY_RECORDING 16 /* Recording thread */
56#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
57#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
58#define PRIORITY_SYSTEM 18 /* All other firmware threads */
59#define PRIORITY_BACKGROUND 20 /* Normal application threads */
60#define NUM_PRIORITIES 32
61#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
62
63#define IO_PRIORITY_IMMEDIATE 0
64#define IO_PRIORITY_BACKGROUND 32
65
66#if CONFIG_CODEC == SWCODEC
67# ifdef HAVE_HARDWARE_CLICK
68# define BASETHREADS 17
69# else
70# define BASETHREADS 16
71# endif
72#else
73# define BASETHREADS 11
74#endif /* CONFIG_CODE == * */
75
76#ifndef TARGET_EXTRA_THREADS
77#define TARGET_EXTRA_THREADS 0
78#endif
79
80#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
81
82/*
83 * We need more stack when we run under a host
84 * maybe more expensive C lib functions?
85 *
86 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
87
88#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
89struct regs
90{
91 void *t; /* OS thread */
92 void *told; /* Last thread in slot (explained in thead-sdl.c) */
93 void *s; /* Semaphore for blocking and wakeup */
94 void (*start)(void); /* Start function */
95};
96
97#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
98#else
99#include "asm/thread.h"
100#endif /* HAVE_SDL_THREADS */
101
102#ifdef CPU_PP
103#ifdef HAVE_CORELOCK_OBJECT
104/* No reliable atomic instruction available - use Peterson's algorithm */
105struct corelock
106{
107 volatile unsigned char myl[NUM_CORES];
108 volatile unsigned char turn;
109} __attribute__((packed));
110
111/* Too big to inline everywhere */
112void corelock_init(struct corelock *cl);
113void corelock_lock(struct corelock *cl);
114int corelock_try_lock(struct corelock *cl);
115void corelock_unlock(struct corelock *cl);
116#endif /* HAVE_CORELOCK_OBJECT */
117#endif /* CPU_PP */
118
119/* NOTE: The use of the word "queue" may also refer to a linked list of
120 threads being maintained that are normally dealt with in FIFO order
121 and not necessarily kernel event_queue */
122enum
123{
124 /* States without a timeout must be first */
125 STATE_KILLED = 0, /* Thread is killed (default) */
126 STATE_RUNNING, /* Thread is currently running */
127 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
128 /* These states involve adding the thread to the tmo list */
129 STATE_SLEEPING, /* Thread is sleeping with a timeout */
130 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
131 /* Miscellaneous states */
132 STATE_FROZEN, /* Thread is suspended and will not run until
133 thread_thaw is called with its ID */
134 THREAD_NUM_STATES,
135 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
136};
137
138#if NUM_CORES > 1
139/* Pointer value for name field to indicate thread is being killed. Using
140 * an alternate STATE_* won't work since that would interfere with operation
141 * while the thread is still running. */
142#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
143#endif
144
145/* Link information for lists thread is in */
146struct thread_entry; /* forward */
147struct thread_list
148{
149 struct thread_entry *prev; /* Previous thread in a list */
150 struct thread_entry *next; /* Next thread in a list */
151};
152
153#ifndef HAVE_CORELOCK_OBJECT
154/* No atomic corelock op needed or just none defined */
155#define corelock_init(cl)
156#define corelock_lock(cl)
157#define corelock_try_lock(cl)
158#define corelock_unlock(cl)
159#endif /* HAVE_CORELOCK_OBJECT */
160
161#ifdef HAVE_PRIORITY_SCHEDULING
162struct blocker
163{
164 struct thread_entry * volatile thread; /* thread blocking other threads
165 (aka. object owner) */
166 int priority; /* highest priority waiter */
167 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
168};
169
170/* Choices of wakeup protocol */
171
172/* For transfer of object ownership by one thread to another thread by
173 * the owning thread itself (mutexes) */
174struct thread_entry *
175 wakeup_priority_protocol_transfer(struct thread_entry *thread);
176
177/* For release by owner where ownership doesn't change - other threads,
178 * interrupts, timeouts, etc. (mutex timeout, queues) */
179struct thread_entry *
180 wakeup_priority_protocol_release(struct thread_entry *thread);
181
182
183struct priority_distribution
184{
185 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
186 uint32_t mask; /* Bitmask of hist entries that are not zero */
187};
188
189#endif /* HAVE_PRIORITY_SCHEDULING */
190
191/* Information kept in each thread slot
192 * members are arranged according to size - largest first - in order
193 * to ensure both alignment and packing at the same time.
194 */
195struct thread_entry
196{
197 struct regs context; /* Register context at switch -
198 _must_ be first member */
199 uintptr_t *stack; /* Pointer to top of stack */
200 const char *name; /* Thread name */
201 long tmo_tick; /* Tick when thread should be woken from
202 timeout -
203 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
204 struct thread_list l; /* Links for blocked/waking/running -
205 circular linkage in both directions */
206 struct thread_list tmo; /* Links for timeout list -
207 Circular in reverse direction, NULL-terminated in
208 forward direction -
209 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
210 struct thread_entry **bqp; /* Pointer to list variable in kernel
211 object where thread is blocked - used
212 for implicit unblock and explicit wake
213 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
214#ifdef HAVE_CORELOCK_OBJECT
215 struct corelock *obj_cl; /* Object corelock where thead is blocked -
216 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
217 struct corelock waiter_cl; /* Corelock for thread_wait */
218 struct corelock slot_cl; /* Corelock to lock thread slot */
219 unsigned char core; /* The core to which thread belongs */
220#endif
221 struct thread_entry *queue; /* List of threads waiting for thread to be
222 removed */
223#ifdef HAVE_WAKEUP_EXT_CB
224 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
225 performs special steps needed when being
226 forced off of an object's wait queue that
227 go beyond the standard wait queue removal
228 and priority disinheritance */
229 /* Only enabled when using queue_send for now */
230#endif
231#if defined(HAVE_SEMAPHORE_OBJECTS) || \
232 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
233 NUM_CORES > 1
234 volatile intptr_t retval; /* Return value from a blocked operation/
235 misc. use */
236#endif
237#ifdef HAVE_PRIORITY_SCHEDULING
238 /* Priority summary of owned objects that support inheritance */
239 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
240 on an object that supports PIP -
241 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
242 struct priority_distribution pdist; /* Priority summary of owned objects
243 that have blocked threads and thread's own
244 base priority */
245 int skip_count; /* Number of times skipped if higher priority
246 thread was running */
247 unsigned char base_priority; /* Base priority (set explicitly during
248 creation or thread_set_priority) */
249 unsigned char priority; /* Scheduled priority (higher of base or
250 all threads blocked by this one) */
251#endif
252 uint16_t id; /* Current slot id */
253 unsigned short stack_size; /* Size of stack in bytes */
254 unsigned char state; /* Thread slot state (STATE_*) */
255#ifdef HAVE_SCHEDULER_BOOSTCTRL
256 unsigned char cpu_boost; /* CPU frequency boost flag */
257#endif
258#ifdef HAVE_IO_PRIORITY
259 unsigned char io_priority;
260#endif
261};
262
263/*** Macros for internal use ***/
264/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
265#define THREAD_ID_VERSION_SHIFT 8
266#define THREAD_ID_VERSION_MASK 0xff00
267#define THREAD_ID_SLOT_MASK 0x00ff
268#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
269
270#ifdef HAVE_CORELOCK_OBJECT
271/* Operations to be performed just before stopping a thread and starting
272 a new one if specified before calling switch_thread */
273enum
274{
275 TBOP_CLEAR = 0, /* No operation to do */
276 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
277 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
278};
279
280struct thread_blk_ops
281{
282 struct corelock *cl_p; /* pointer to corelock */
283 unsigned char flags; /* TBOP_* flags */
284};
285#endif /* NUM_CORES > 1 */
286
287/* Information kept for each core
288 * Members are arranged for the same reason as in thread_entry
289 */
290struct core_entry
291{
292 /* "Active" lists - core is constantly active on these and are never
293 locked and interrupts do not access them */
294 struct thread_entry *running; /* threads that are running (RTR) */
295 struct thread_entry *timeout; /* threads that are on a timeout before
296 running again */
297 struct thread_entry *block_task; /* Task going off running list */
298#ifdef HAVE_PRIORITY_SCHEDULING
299 struct priority_distribution rtr; /* Summary of running and ready-to-run
300 threads */
301#endif
302 long next_tmo_check; /* soonest time to check tmo threads */
303#ifdef HAVE_CORELOCK_OBJECT
304 struct thread_blk_ops blk_ops; /* operations to perform when
305 blocking a thread */
306 struct corelock rtr_cl; /* Lock for rtr list */
307#endif /* NUM_CORES */
308};
309
310#ifdef HAVE_PRIORITY_SCHEDULING
311#define IF_PRIO(...) __VA_ARGS__
312#define IFN_PRIO(...)
313#else
314#define IF_PRIO(...)
315#define IFN_PRIO(...) __VA_ARGS__
316#endif
317
318void core_idle(void);
319void core_wake(IF_COP_VOID(unsigned int core));
320
321/* Initialize the scheduler */
322void init_threads(void) INIT_ATTR;
323
324/* Allocate a thread in the scheduler */
325#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
326unsigned int create_thread(void (*function)(void),
327 void* stack, size_t stack_size,
328 unsigned flags, const char *name
329 IF_PRIO(, int priority)
330 IF_COP(, unsigned int core));
331
332/* Set and clear the CPU frequency boost flag for the calling thread */
333#ifdef HAVE_SCHEDULER_BOOSTCTRL
334void trigger_cpu_boost(void);
335void cancel_cpu_boost(void);
336#else
337#define trigger_cpu_boost() do { } while(0)
338#define cancel_cpu_boost() do { } while(0)
339#endif
340/* Return thread entry from id */
341struct thread_entry *thread_id_entry(unsigned int thread_id);
342/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
343 * Has no effect on a thread not frozen. */
344void thread_thaw(unsigned int thread_id);
345/* Wait for a thread to exit */
346void thread_wait(unsigned int thread_id);
347/* Exit the current thread */
348void thread_exit(void) NORETURN_ATTR;
349#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
350#define ALLOW_REMOVE_THREAD
351/* Remove a thread from the scheduler */
352void remove_thread(unsigned int thread_id);
353#endif
354
355/* Switch to next runnable thread */
356void switch_thread(void);
357/* Blocks a thread for at least the specified number of ticks (0 = wait until
358 * next tick) */
359void sleep_thread(int ticks);
360/* Indefinitely blocks the current thread on a thread queue */
361void block_thread(struct thread_entry *current);
362/* Blocks the current thread on a thread queue until explicitely woken or
363 * the timeout is reached */
364void block_thread_w_tmo(struct thread_entry *current, int timeout);
365
366/* Return bit flags for thread wakeup */
367#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
368#define THREAD_OK 0x1 /* A thread was woken up */
369#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
370 higher priority than current were woken) */
371
372/* A convenience function for waking an entire queue of threads. */
373unsigned int thread_queue_wake(struct thread_entry **list);
374
375/* Wakeup a thread at the head of a list */
376unsigned int wakeup_thread(struct thread_entry **list);
377
378#ifdef HAVE_PRIORITY_SCHEDULING
379int thread_set_priority(unsigned int thread_id, int priority);
380int thread_get_priority(unsigned int thread_id);
381#endif /* HAVE_PRIORITY_SCHEDULING */
382#ifdef HAVE_IO_PRIORITY
383void thread_set_io_priority(unsigned int thread_id, int io_priority);
384int thread_get_io_priority(unsigned int thread_id);
385#endif /* HAVE_IO_PRIORITY */
386#if NUM_CORES > 1
387unsigned int switch_core(unsigned int new_core);
388#endif
389
390/* Return the id of the calling thread. */
391unsigned int thread_self(void);
392
393/* Return the thread_entry for the calling thread.
394 * INTERNAL: Intended for use by kernel and not for programs. */
395struct thread_entry* thread_self_entry(void);
396
397/* Debugging info - only! */
398int thread_stack_usage(const struct thread_entry *thread);
399#if NUM_CORES > 1
400int idle_stack_usage(unsigned int core);
401#endif
402void thread_get_name(char *buffer, int size,
403 struct thread_entry *thread);
404#ifdef RB_PROFILE
405void profile_thread(void);
406#endif
407
408#endif /* THREAD_H */