Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
29
 
30
 
31
struct scheduler_thread scheduler_threads[MAX_THREADS] IDATA_ATTR;
32
struct scheduler_thread* current_thread IDATA_ATTR;
33
uint32_t last_tick IDATA_ATTR;
34
 
35
 
36
void mutex_init(struct mutex* obj)
37
{
38
    memset(obj, 0, sizeof(struct mutex));
39
}
40
 
41
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
42
{
43
    struct scheduler_thread* t;
44
    if (!obj->waiters || obj->waiters->priority < thread->priority)
45
    {
46
        thread->queue_next = obj->waiters;
47
        obj->waiters = thread;
48
    }
49
    else
50
    {
51
        t = obj->waiters;
52
        while (t->queue_next && t->queue_next->priority > thread->priority)
53
            t = t->queue_next;
54
        thread->queue_next = t->queue_next;
55
        t->queue_next = thread;
56
    }
57
}
58
 
59
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
60
{
61
    struct scheduler_thread* t;
62
    if (!obj->waiters) return;
63
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
64
    else
65
    {
66
        t = obj->waiters;
67
        while (t->queue_next)
68
        {
69
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
70
            t = t->queue_next;
71
        }
72
    }
73
}
74
 
75
int mutex_lock(struct mutex* obj, int timeout)
76
{
77
    int ret = THREAD_OK;
78
    struct scheduler_thread* thread;
79
    uint32_t mode = enter_critical_section();
80
 
81
    if (!obj->count)
82
    {
83
        obj->count = 1;
84
        obj->owner = current_thread;
85
    }
86
    else if (obj->owner == current_thread) obj->count++;
87
    else
88
    {
89
        if (timeout)
90
        {
91
            current_thread->state = THREAD_BLOCKED;
92
            current_thread->block_type = THREAD_BLOCK_MUTEX;
93
            current_thread->blocked_by = obj;
94
            current_thread->timeout = timeout;
95
            current_thread->blocked_since = USEC_TIMER;
96
            mutex_add_to_queue(obj, current_thread);
97
            leave_critical_section(mode);
98
            context_switch();
99
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
100
            return THREAD_OK;
101
        }
102
        else ret = THREAD_TIMEOUT;
103
    }
104
 
105
    leave_critical_section(mode);
106
    return ret;
107
}
108
 
109
int mutex_unlock(struct mutex* obj)
110
{
111
    int ret = THREAD_OK;
112
    uint32_t mode = enter_critical_section();
113
 
114
    if (!obj->count)
115
    {
116
        leave_critical_section(mode);
117
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
118
    }
119
 
120
    if (obj->owner != current_thread)
121
    {
122
        leave_critical_section(mode);
123
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
124
    }
125
 
126
    if (--(obj->count)) ret = obj->count;
127
    else if (obj->waiters)
128
    {
129
        obj->count = 1;
130
        obj->owner = obj->waiters;
131
        obj->waiters->state = THREAD_READY;
132
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
133
        obj->waiters->blocked_by = NULL;
134
        obj->waiters->timeout = 0;
135
        obj->waiters = obj->waiters->queue_next;
136
    }
137
 
138
    leave_critical_section(mode);
139
    return ret;
140
}
141
 
142
void wakeup_init(struct wakeup* obj)
143
{
144
    memset(obj, 0, sizeof(struct wakeup));
145
}
146
 
147
int wakeup_wait(struct wakeup* obj, int timeout)
148
{
149
    int ret = THREAD_OK;
150
    uint32_t mode = enter_critical_section();
151
 
152
    if (obj->waiter)
153
    {
154
        leave_critical_section(mode);
155
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
156
    }
157
 
158
    if (obj->signalled) obj->signalled = false;
159
    else
160
    {
161
        if (timeout)
162
        {
163
            current_thread->state = THREAD_BLOCKED;
164
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
165
            current_thread->blocked_by = obj;
166
            current_thread->timeout = timeout;
167
            current_thread->blocked_since = USEC_TIMER;
168
            obj->waiter = current_thread;
169
            leave_critical_section(mode);
170
            context_switch();
171
            if (!obj->signalled) return THREAD_TIMEOUT;
172
            obj->signalled = false;
173
            return THREAD_OK;
174
        }
175
        else ret = THREAD_TIMEOUT;
176
    }
177
 
178
    leave_critical_section(mode);
179
    return ret;
180
}
181
 
182
int wakeup_signal(struct wakeup* obj)
183
{
184
    int ret = THREAD_OK;
185
    uint32_t mode = enter_critical_section();
186
 
187
    obj->signalled = true;
188
    if (obj->waiter)
189
    {
190
        obj->waiter->state = THREAD_READY;
191
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
192
        obj->waiter->blocked_by = NULL;
193
        obj->waiter->timeout = 0;
194
        ret = THREAD_FOUND;
195
    }
196
 
197
    leave_critical_section(mode);
198
    return ret;
199
}
200
 
201
void sleep(int usecs)
202
{
203
    uint32_t mode = enter_critical_section();
204
    current_thread->state = THREAD_BLOCKED;
205
    current_thread->block_type = THREAD_BLOCK_SLEEP;
206
    current_thread->timeout = usecs;
207
    current_thread->blocked_since = USEC_TIMER;
208
    leave_critical_section(mode);
209
    context_switch();
210
}
211
 
212
void scheduler_init(void)
213
{
214
    memset(scheduler_threads, 0, sizeof(scheduler_threads));
215
    last_tick = USEC_TIMER;
216
    current_thread = scheduler_threads;
217
    current_thread->state = THREAD_RUNNING;
218
    current_thread->startusec = last_tick;
219
    current_thread->name = "idle thread";
220
    current_thread->stack = (uint32_t*)-1;
221
    setup_tick();
222
}
223
 
224
void scheduler_switch(int thread)
225
{
226
    int i;
227
    uint32_t score, best;
228
    uint32_t usec = USEC_TIMER;
229
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
230
    current_thread->cputime_total += usec - current_thread->startusec;
231
    current_thread->cputime_current += usec - current_thread->startusec;
232
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
233
        panicf(PANIC_KILLPROCESS, "Stack overflow (%s)", current_thread->name);
234
 
235
    if (usec - last_tick > SCHEDULER_TICK)
236
    {
237
        for (i = 0; i < MAX_THREADS; i++)
238
        {
239
            scheduler_threads[i].cpuload = scheduler_threads[i].cputime_current / SCHEDULER_TICK;
240
            scheduler_threads[i].cputime_current = 0;
241
        }
242
    }
243
 
244
    for (i = 0; i < MAX_THREADS; i++)
245
        if (scheduler_threads[i].state == THREAD_BLOCKED
246
         && scheduler_threads[i].timeout != -1
247
         && TIME_AFTER(usec, scheduler_threads[i].blocked_since
248
                           + scheduler_threads[i].timeout))
249
        {
250
            if (scheduler_threads[i].block_type == THREAD_BLOCK_MUTEX)
251
                mutex_remove_from_queue((struct mutex*)scheduler_threads[i].blocked_by,
252
                                        &scheduler_threads[i]);
253
            scheduler_threads[i].state = THREAD_READY;
254
            scheduler_threads[i].block_type = THREAD_NOT_BLOCKED;
255
            scheduler_threads[i].blocked_by = NULL;
256
            scheduler_threads[i].timeout = 0;
257
        }
258
 
259
    if (thread >= 0 && thread < MAX_THREADS && scheduler_threads[thread].state == THREAD_READY)
260
        current_thread = &scheduler_threads[thread];
261
    else
262
    {
263
        thread = 0;
264
        best = 0xffffffff;
265
        for (i = 0; i < MAX_THREADS; i++)
266
            if (scheduler_threads[i].state == THREAD_READY && scheduler_threads[i].priority)
267
            {
268
                score = scheduler_threads[i].cputime_current / scheduler_threads[i].priority;
269
                if (score < best)
270
                {
271
                    score = best;
272
                    thread = i;
273
                }
274
            }
275
    }
276
 
277
    current_thread = &scheduler_threads[thread];
278
    current_thread->state = THREAD_RUNNING;
279
    current_thread->startusec = USEC_TIMER;
280
}
281
 
282
int thread_create(const char* name, const void* code, void* stack,
283
                  int stacksize, int priority, bool run)
284
{
285
    int ret = NO_MORE_THREADS;
286
    int i;
287
 
288
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
289
 
290
    uint32_t mode = enter_critical_section();
291
 
292
    for (i = 0; i < MAX_THREADS; i++)
293
        if (scheduler_threads[i].state == THREAD_FREE)
294
        {
295
            ret = i;
296
            memset(&scheduler_threads[i], 0, sizeof(struct scheduler_thread));
297
            scheduler_threads[i].state = run ? THREAD_READY : THREAD_SUSPENDED;
298
            scheduler_threads[i].name = name;
299
            scheduler_threads[i].priority = priority;
300
            scheduler_threads[i].cpsr = 0x13;
301
            scheduler_threads[i].regs[15] = (uint32_t)code;
302
            scheduler_threads[i].regs[14] = (uint32_t)thread_exit;
303
            scheduler_threads[i].regs[13] = (uint32_t)stack + stacksize;
304
            scheduler_threads[i].stack = stack;
305
            break;
306
        }
307
 
308
    leave_critical_section(mode);
309
    return ret;
310
}
311
 
312
int thread_suspend(int thread)
313
{
314
    int ret = THREAD_OK;
315
    struct scheduler_thread* t = &scheduler_threads[thread];
316
    bool needsswitch = false;
317
    uint32_t mode = enter_critical_section();
318
 
319
    if (thread == -1) t = current_thread;
320
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
321
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
322
    else if (t->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
323
    if (ret == THREAD_OK)
324
    {
325
        if (t->state == THREAD_RUNNING) needsswitch = true;
326
        else if (t->state == THREAD_BLOCKED)
327
        {
328
            if (t->block_type == THREAD_BLOCK_SLEEP)
329
                t->timeout -= USEC_TIMER - t->blocked_since;
330
            else if (t->block_type == THREAD_BLOCK_MUTEX)
331
            {
332
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
333
                t->timeout -= USEC_TIMER - t->blocked_since;
334
            }
335
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
336
                t->timeout -= USEC_TIMER - t->blocked_since;
337
        }
338
        t->state = THREAD_SUSPENDED;
339
    }
340
 
341
    leave_critical_section(mode);
342
 
343
    if (needsswitch) context_switch();
344
 
345
    return ret;
346
}
347
 
348
int thread_resume(int thread)
349
{
350
    int ret = THREAD_OK;
351
    struct scheduler_thread* t = &scheduler_threads[thread];
352
    bool needsswitch = false;
353
    uint32_t mode = enter_critical_section();
354
 
355
    if (thread == -1) t = current_thread;
356
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
357
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
358
    else if (t->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
359
    if (ret == THREAD_OK)
360
    {
361
        if (t->block_type == THREAD_BLOCK_SLEEP)
362
            t->blocked_since = USEC_TIMER;
363
        else if (t->block_type == THREAD_BLOCK_MUTEX)
364
        {
365
            mutex_add_to_queue((struct mutex*)t->blocked_by, t);
366
            t->blocked_since = USEC_TIMER;
367
            t->state = THREAD_BLOCKED;
368
        }
369
        else if (t->block_type == THREAD_BLOCK_WAKEUP)
370
        {
371
            t->blocked_since = USEC_TIMER;
372
            t->state = THREAD_BLOCKED;
373
        }
374
        else t->state = THREAD_READY;
375
    }
376
 
377
    leave_critical_section(mode);
378
    return ret;
379
}
380
 
381
int thread_terminate(int thread)
382
{
383
    int ret = THREAD_OK;
384
    struct scheduler_thread* t = &scheduler_threads[thread];
385
    bool needsswitch = false;
386
    uint32_t mode = enter_critical_section();
387
 
388
    if (thread == -1) t = current_thread;
389
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
390
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
391
    if (ret == THREAD_OK)
392
    {
393
        if (t->state == THREAD_RUNNING) needsswitch = true;
394
        else if (t->state == THREAD_BLOCKED)
395
        {
396
            if (t->block_type == THREAD_BLOCK_MUTEX)
397
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
398
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
399
                ((struct wakeup*)t->blocked_by)->waiter = NULL;
400
        }
401
        t->state = THREAD_FREE;
402
    }
403
 
404
    leave_critical_section(mode);
405
 
406
    if (needsswitch) context_switch();
407
 
408
    return ret;
409
}
410
 
411
void thread_exit()
412
{
413
    thread_terminate(-1);
414
}