summaryrefslogtreecommitdiff
path: root/firmware/kernel/pthread/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/pthread/thread.c')
-rw-r--r--firmware/kernel/pthread/thread.c204
1 files changed, 0 insertions, 204 deletions
diff --git a/firmware/kernel/pthread/thread.c b/firmware/kernel/pthread/thread.c
deleted file mode 100644
index 71cbd1d136..0000000000
--- a/firmware/kernel/pthread/thread.c
+++ /dev/null
@@ -1,204 +0,0 @@
1#include <stdlib.h>
2#include <stdbool.h>
3#include <errno.h>
4#include <pthread.h>
5#include "/usr/include/semaphore.h"
6#include "thread-internal.h"
7#include "kernel.h"
8
9#define NSEC_PER_SEC 1000000000L
10static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
11{
12 lldiv_t q = lldiv(a->tv_nsec + ns, NSEC_PER_SEC);
13 a->tv_sec += q.quot;
14 a->tv_nsec = q.rem;
15}
16
17static int threads_initialized;
18
19struct thread_init_data {
20 void (*function)(void);
21 bool start_frozen;
22 sem_t init_sem;
23 struct thread_entry *entry;
24};
25
26__thread struct thread_entry *_current;
27
28unsigned int thread_self(void)
29{
30 return (unsigned) pthread_self();
31}
32
33static struct thread_entry_item {
34 unsigned thread_id;
35 struct thread_entry *entry;
36} entry_lookup[32];
37
38
39
40static struct thread_entry_item *__find_thread_entry(unsigned thread_id)
41{
42 int i;
43
44 for (i = 0; i < 32; i++)
45 {
46 if (entry_lookup[i].thread_id == thread_id)
47 return &entry_lookup[i];
48 }
49 return NULL;
50}
51
52static struct thread_entry *find_thread_entry(unsigned thread_id)
53{
54 return __find_thread_entry(thread_id)->entry;
55}
56
57static void *trampoline(void *arg)
58{
59 struct thread_init_data *data = arg;
60
61 void (*thread_fn)(void) = data->function;
62
63 _current = data->entry;
64
65 if (data->start_frozen)
66 {
67 struct corelock thaw_lock;
68 corelock_init(&thaw_lock);
69 corelock_lock(&thaw_lock);
70
71 _current->lock = &thaw_lock;
72 sem_post(&data->init_sem);
73 block_thread_switch(_current, _current->lock);
74 _current->lock = NULL;
75
76 corelock_unlock(&thaw_lock);
77 }
78 else
79 sem_post(&data->init_sem);
80
81 free(data);
82 thread_fn();
83
84 return NULL;
85}
86
87void thread_thaw(unsigned int thread_id)
88{
89 struct thread_entry *e = find_thread_entry(thread_id);
90 if (e->lock)
91 {
92 corelock_lock(e->lock);
93 wakeup_thread(e);
94 corelock_unlock(e->lock);
95 }
96 /* else: no lock. must be running already */
97}
98
99void init_threads(void)
100{
101 struct thread_entry_item *item0 = &entry_lookup[0];
102 item0->entry = calloc(1, sizeof(struct thread_entry));
103 item0->thread_id = pthread_self();
104
105 _current = item0->entry;
106 pthread_cond_init(&item0->entry->cond, NULL);
107 threads_initialized = 1;
108}
109
110
111unsigned int create_thread(void (*function)(void),
112 void* stack, size_t stack_size,
113 unsigned flags, const char *name
114 //~ IF_PRIO(, int priority)
115 IF_COP(, unsigned int core))
116{
117 pthread_t retval;
118
119 struct thread_init_data *data = calloc(1, sizeof(struct thread_init_data));
120 struct thread_entry *entry = calloc(1, sizeof(struct thread_entry));
121 struct thread_entry_item *item;
122
123 if (!threads_initialized)
124 abort();
125
126 data->function = function;
127 data->start_frozen = flags & CREATE_THREAD_FROZEN;
128 data->entry = entry;
129 pthread_cond_init(&entry->cond, NULL);
130 entry->runnable = true;
131
132 sem_init(&data->init_sem, 0, 0);
133
134 if (pthread_create(&retval, NULL, trampoline, data) < 0)
135 return -1;
136
137 sem_wait(&data->init_sem);
138
139 item = __find_thread_entry(0);
140 item->thread_id = retval;
141 item->entry = entry;
142
143 pthread_setname_np(retval, name);
144
145
146 return retval;
147}
148
149/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
150 * to a corelock instance, and this corelock must be held by the caller */
151void block_thread_switch(struct thread_entry *t, struct corelock *cl)
152{
153 t->runnable = false;
154 if (wait_queue_ptr(t))
155 wait_queue_register(t);
156 while(!t->runnable)
157 pthread_cond_wait(&t->cond, &cl->mutex);
158}
159
160void block_thread_switch_w_tmo(struct thread_entry *t, int timeout,
161 struct corelock *cl)
162{
163 int err = 0;
164 struct timespec ts;
165
166 clock_gettime(CLOCK_REALTIME, &ts);
167 timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
168
169 t->runnable = false;
170 wait_queue_register(t->wqp, t);
171 while(!t->runnable && !err)
172 err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
173
174 if (err == ETIMEDOUT)
175 { /* the thread timed out and was not explicitely woken up.
176 * we need to do this now to mark it runnable again */
177 t->runnable = true;
178 /* NOTE: objects do their own removal upon timer expiration */
179 }
180}
181
182unsigned int wakeup_thread(struct thread_entry *t)
183{
184 if (t->wqp)
185 wait_queue_remove(t);
186 t->runnable = true;
187 pthread_cond_signal(&t->cond);
188 return THREAD_OK;
189}
190
191
192void yield(void) {}
193
194unsigned sleep(unsigned ticks)
195{
196 struct timespec ts;
197
198 ts.tv_sec = ticks/HZ;
199 ts.tv_nsec = (ticks % HZ) * (NSEC_PER_SEC/HZ);
200
201 nanosleep(&ts, NULL);
202
203 return 0;
204}