Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
29
 
30
 
15 theseven 31
struct scheduler_thread scheduler_threads[MAX_THREADS] IBSS_ATTR;
32
struct scheduler_thread* current_thread IBSS_ATTR;
33
uint32_t last_tick IBSS_ATTR;
34 theseven 34
bool scheduler_frozen IBSS_ATTR;
15 theseven 35
extern struct wakeup dbgwakeup;
14 theseven 36
 
37
 
38
void mutex_init(struct mutex* obj)
39
{
40
    memset(obj, 0, sizeof(struct mutex));
41
}
42
 
43
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
44
{
45
    struct scheduler_thread* t;
15 theseven 46
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 47
    {
48
        thread->queue_next = obj->waiters;
49
        obj->waiters = thread;
50
    }
51
    else
52
    {
53
        t = obj->waiters;
54
        while (t->queue_next && t->queue_next->priority > thread->priority)
55
            t = t->queue_next;
56
        thread->queue_next = t->queue_next;
57
        t->queue_next = thread;
58
    }
59
}
60
 
61
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
62
{
63
    struct scheduler_thread* t;
64
    if (!obj->waiters) return;
65
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
66
    else
67
    {
68
        t = obj->waiters;
69
        while (t->queue_next)
70
        {
71
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
72
            t = t->queue_next;
73
        }
74
    }
75
}
76
 
77
int mutex_lock(struct mutex* obj, int timeout)
78
{
79
    int ret = THREAD_OK;
80
    struct scheduler_thread* thread;
81
    uint32_t mode = enter_critical_section();
82
 
83
    if (!obj->count)
84
    {
85
        obj->count = 1;
86
        obj->owner = current_thread;
87
    }
88
    else if (obj->owner == current_thread) obj->count++;
89
    else
90
    {
91
        if (timeout)
92
        {
93
            current_thread->state = THREAD_BLOCKED;
94
            current_thread->block_type = THREAD_BLOCK_MUTEX;
95
            current_thread->blocked_by = obj;
96
            current_thread->timeout = timeout;
97
            current_thread->blocked_since = USEC_TIMER;
98
            mutex_add_to_queue(obj, current_thread);
99
            leave_critical_section(mode);
100
            context_switch();
101
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
102
            return THREAD_OK;
103
        }
104
        else ret = THREAD_TIMEOUT;
105
    }
106
 
107
    leave_critical_section(mode);
108
    return ret;
109
}
110
 
111
int mutex_unlock(struct mutex* obj)
112
{
113
    int ret = THREAD_OK;
114
    uint32_t mode = enter_critical_section();
115
 
116
    if (!obj->count)
117
    {
118
        leave_critical_section(mode);
119
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
120
    }
121
 
122
    if (obj->owner != current_thread)
123
    {
124
        leave_critical_section(mode);
125
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
126
    }
127
 
128
    if (--(obj->count)) ret = obj->count;
129
    else if (obj->waiters)
130
    {
131
        obj->count = 1;
132
        obj->owner = obj->waiters;
133
        obj->waiters->state = THREAD_READY;
134
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
135
        obj->waiters->blocked_by = NULL;
136
        obj->waiters->timeout = 0;
137
        obj->waiters = obj->waiters->queue_next;
138
    }
139
 
140
    leave_critical_section(mode);
141
    return ret;
142
}
143
 
144
void wakeup_init(struct wakeup* obj)
145
{
146
    memset(obj, 0, sizeof(struct wakeup));
147
}
148
 
149
int wakeup_wait(struct wakeup* obj, int timeout)
150
{
151
    int ret = THREAD_OK;
152
    uint32_t mode = enter_critical_section();
153
 
154
    if (obj->waiter)
155
    {
156
        leave_critical_section(mode);
157
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
158
    }
159
 
160
    if (obj->signalled) obj->signalled = false;
161
    else
162
    {
163
        if (timeout)
164
        {
165
            current_thread->state = THREAD_BLOCKED;
166
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
167
            current_thread->blocked_by = obj;
168
            current_thread->timeout = timeout;
169
            current_thread->blocked_since = USEC_TIMER;
170
            obj->waiter = current_thread;
171
            leave_critical_section(mode);
172
            context_switch();
15 theseven 173
            obj->waiter = NULL;
14 theseven 174
            if (!obj->signalled) return THREAD_TIMEOUT;
175
            obj->signalled = false;
176
            return THREAD_OK;
177
        }
178
        else ret = THREAD_TIMEOUT;
179
    }
180
 
181
    leave_critical_section(mode);
182
    return ret;
183
}
184
 
185
int wakeup_signal(struct wakeup* obj)
186
{
187
    int ret = THREAD_OK;
188
    uint32_t mode = enter_critical_section();
189
 
190
    obj->signalled = true;
191
    if (obj->waiter)
192
    {
193
        obj->waiter->state = THREAD_READY;
194
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
195
        obj->waiter->blocked_by = NULL;
196
        obj->waiter->timeout = 0;
197
        ret = THREAD_FOUND;
198
    }
199
 
200
    leave_critical_section(mode);
201
    return ret;
202
}
203
 
204
void sleep(int usecs)
205
{
15 theseven 206
    if (usecs)
207
    {
208
        uint32_t mode = enter_critical_section();
209
        current_thread->state = THREAD_BLOCKED;
210
        current_thread->block_type = THREAD_BLOCK_SLEEP;
211
        current_thread->timeout = usecs;
212
        current_thread->blocked_since = USEC_TIMER;
213
        leave_critical_section(mode);
214
    }
14 theseven 215
    context_switch();
216
}
217
 
218
void scheduler_init(void)
219
{
220
    memset(scheduler_threads, 0, sizeof(scheduler_threads));
34 theseven 221
    scheduler_frozen = false;
14 theseven 222
    last_tick = USEC_TIMER;
223
    current_thread = scheduler_threads;
224
    current_thread->state = THREAD_RUNNING;
225
    current_thread->startusec = last_tick;
226
    current_thread->name = "idle thread";
227
    current_thread->stack = (uint32_t*)-1;
228
    setup_tick();
229
}
230
 
54 theseven 231
bool scheduler_freeze(bool value)
34 theseven 232
{
54 theseven 233
    bool old = scheduler_frozen;
34 theseven 234
    scheduler_frozen = value;
54 theseven 235
    return old;
34 theseven 236
}
237
 
14 theseven 238
void scheduler_switch(int thread)
239
{
240
    int i;
241
    uint32_t score, best;
242
    uint32_t usec = USEC_TIMER;
243
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
244
    current_thread->cputime_total += usec - current_thread->startusec;
245
    current_thread->cputime_current += usec - current_thread->startusec;
246
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 247
    {
248
        for (i = 0; i < MAX_THREADS; i++)
249
            if (scheduler_threads[i].type == USER_THREAD)
250
                scheduler_threads[i].state = THREAD_SUSPENDED;
251
        current_thread->state = THREAD_DEFUNCT;
252
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
253
        wakeup_signal(&dbgwakeup);
254
    }
14 theseven 255
 
256
    if (usec - last_tick > SCHEDULER_TICK)
257
    {
15 theseven 258
        last_tick = usec;
14 theseven 259
        for (i = 0; i < MAX_THREADS; i++)
260
        {
261
            scheduler_threads[i].cpuload = scheduler_threads[i].cputime_current / SCHEDULER_TICK;
262
            scheduler_threads[i].cputime_current = 0;
263
        }
264
    }
265
 
34 theseven 266
    if (scheduler_frozen) thread = 0;
14 theseven 267
    else
268
    {
269
        for (i = 0; i < MAX_THREADS; i++)
34 theseven 270
            if (scheduler_threads[i].state == THREAD_BLOCKED
271
             && scheduler_threads[i].timeout != -1
272
             && TIME_AFTER(usec, scheduler_threads[i].blocked_since
273
                               + scheduler_threads[i].timeout))
14 theseven 274
            {
34 theseven 275
                if (scheduler_threads[i].block_type == THREAD_BLOCK_MUTEX)
276
                    mutex_remove_from_queue((struct mutex*)scheduler_threads[i].blocked_by,
277
                                            &scheduler_threads[i]);
278
                scheduler_threads[i].state = THREAD_READY;
279
                scheduler_threads[i].block_type = THREAD_NOT_BLOCKED;
280
                scheduler_threads[i].blocked_by = NULL;
281
                scheduler_threads[i].timeout = 0;
282
            }
283
 
284
        if (thread >= 0 && thread < MAX_THREADS && scheduler_threads[thread].state == THREAD_READY)
285
            current_thread = &scheduler_threads[thread];
286
        else
287
        {
288
            thread = 0;
289
            best = 0xffffffff;
290
            for (i = 0; i < MAX_THREADS; i++)
291
                if (scheduler_threads[i].state == THREAD_READY && scheduler_threads[i].priority)
14 theseven 292
                {
34 theseven 293
                    score = scheduler_threads[i].cputime_current / scheduler_threads[i].priority;
294
                    if (score < best)
295
                    {
296
                        best = score;
297
                        thread = i;
298
                    }
14 theseven 299
                }
34 theseven 300
        }
14 theseven 301
    }
302
 
303
    current_thread = &scheduler_threads[thread];
304
    current_thread->state = THREAD_RUNNING;
305
    current_thread->startusec = USEC_TIMER;
306
}
307
 
308
int thread_create(const char* name, const void* code, void* stack,
15 theseven 309
                  int stacksize, enum thread_type type, int priority, bool run)
14 theseven 310
{
311
    int ret = NO_MORE_THREADS;
312
    int i;
313
 
314
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
315
 
316
    uint32_t mode = enter_critical_section();
317
 
318
    for (i = 0; i < MAX_THREADS; i++)
319
        if (scheduler_threads[i].state == THREAD_FREE)
320
        {
321
            ret = i;
322
            memset(&scheduler_threads[i], 0, sizeof(struct scheduler_thread));
323
            scheduler_threads[i].state = run ? THREAD_READY : THREAD_SUSPENDED;
15 theseven 324
            scheduler_threads[i].type = type;
14 theseven 325
            scheduler_threads[i].name = name;
326
            scheduler_threads[i].priority = priority;
43 theseven 327
            scheduler_threads[i].cpsr = 0x1f;
14 theseven 328
            scheduler_threads[i].regs[15] = (uint32_t)code;
329
            scheduler_threads[i].regs[14] = (uint32_t)thread_exit;
330
            scheduler_threads[i].regs[13] = (uint32_t)stack + stacksize;
331
            scheduler_threads[i].stack = stack;
332
            break;
333
        }
334
 
335
    leave_critical_section(mode);
336
    return ret;
337
}
338
 
339
int thread_suspend(int thread)
340
{
341
    int ret = THREAD_OK;
342
    struct scheduler_thread* t = &scheduler_threads[thread];
343
    bool needsswitch = false;
344
    uint32_t mode = enter_critical_section();
345
 
346
    if (thread == -1) t = current_thread;
347
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
348
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
349
    else if (t->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
350
    if (ret == THREAD_OK)
351
    {
352
        if (t->state == THREAD_RUNNING) needsswitch = true;
353
        else if (t->state == THREAD_BLOCKED)
354
        {
355
            if (t->block_type == THREAD_BLOCK_SLEEP)
15 theseven 356
            {
357
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
358
            }
14 theseven 359
            else if (t->block_type == THREAD_BLOCK_MUTEX)
360
            {
361
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
15 theseven 362
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
14 theseven 363
            }
364
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 365
            {
366
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
367
            }
14 theseven 368
        }
369
        t->state = THREAD_SUSPENDED;
370
    }
371
 
372
    leave_critical_section(mode);
373
 
374
    if (needsswitch) context_switch();
375
 
376
    return ret;
377
}
378
 
379
int thread_resume(int thread)
380
{
381
    int ret = THREAD_OK;
382
    struct scheduler_thread* t = &scheduler_threads[thread];
383
    bool needsswitch = false;
384
    uint32_t mode = enter_critical_section();
385
 
386
    if (thread == -1) t = current_thread;
387
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
388
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
389
    else if (t->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
390
    if (ret == THREAD_OK)
391
    {
392
        if (t->block_type == THREAD_BLOCK_SLEEP)
393
            t->blocked_since = USEC_TIMER;
394
        else if (t->block_type == THREAD_BLOCK_MUTEX)
395
        {
396
            mutex_add_to_queue((struct mutex*)t->blocked_by, t);
397
            t->blocked_since = USEC_TIMER;
398
            t->state = THREAD_BLOCKED;
399
        }
400
        else if (t->block_type == THREAD_BLOCK_WAKEUP)
401
        {
402
            t->blocked_since = USEC_TIMER;
403
            t->state = THREAD_BLOCKED;
404
        }
405
        else t->state = THREAD_READY;
406
    }
407
 
408
    leave_critical_section(mode);
409
    return ret;
410
}
411
 
412
int thread_terminate(int thread)
413
{
414
    int ret = THREAD_OK;
415
    struct scheduler_thread* t = &scheduler_threads[thread];
416
    bool needsswitch = false;
417
    uint32_t mode = enter_critical_section();
418
 
419
    if (thread == -1) t = current_thread;
420
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
421
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
422
    if (ret == THREAD_OK)
423
    {
424
        if (t->state == THREAD_RUNNING) needsswitch = true;
425
        else if (t->state == THREAD_BLOCKED)
426
        {
427
            if (t->block_type == THREAD_BLOCK_MUTEX)
428
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
429
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
430
                ((struct wakeup*)t->blocked_by)->waiter = NULL;
431
        }
432
        t->state = THREAD_FREE;
433
    }
434
 
435
    leave_critical_section(mode);
436
 
437
    if (needsswitch) context_switch();
438
 
439
    return ret;
440
}
441
 
442
void thread_exit()
443
{
444
    thread_terminate(-1);
445
}