Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
58 theseven 29
#include "dir.h"
30
#include "file.h"
14 theseven 31
 
32
 
15 theseven 33
struct scheduler_thread scheduler_threads[MAX_THREADS] IBSS_ATTR;
34
struct scheduler_thread* current_thread IBSS_ATTR;
35
uint32_t last_tick IBSS_ATTR;
34 theseven 36
bool scheduler_frozen IBSS_ATTR;
15 theseven 37
extern struct wakeup dbgwakeup;
14 theseven 38
 
39
 
40
void mutex_init(struct mutex* obj)
41
{
42
    memset(obj, 0, sizeof(struct mutex));
43
}
44
 
45
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
46
{
47
    struct scheduler_thread* t;
15 theseven 48
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 49
    {
50
        thread->queue_next = obj->waiters;
51
        obj->waiters = thread;
52
    }
53
    else
54
    {
55
        t = obj->waiters;
56
        while (t->queue_next && t->queue_next->priority > thread->priority)
57
            t = t->queue_next;
58
        thread->queue_next = t->queue_next;
59
        t->queue_next = thread;
60
    }
61
}
62
 
63
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
64
{
65
    struct scheduler_thread* t;
66
    if (!obj->waiters) return;
67
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
68
    else
69
    {
70
        t = obj->waiters;
71
        while (t->queue_next)
72
        {
73
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
74
            t = t->queue_next;
75
        }
76
    }
77
}
78
 
79
int mutex_lock(struct mutex* obj, int timeout)
80
{
81
    int ret = THREAD_OK;
82
    struct scheduler_thread* thread;
83
    uint32_t mode = enter_critical_section();
84
 
85
    if (!obj->count)
86
    {
87
        obj->count = 1;
88
        obj->owner = current_thread;
89
    }
90
    else if (obj->owner == current_thread) obj->count++;
91
    else
92
    {
93
        if (timeout)
94
        {
95
            current_thread->state = THREAD_BLOCKED;
96
            current_thread->block_type = THREAD_BLOCK_MUTEX;
97
            current_thread->blocked_by = obj;
98
            current_thread->timeout = timeout;
99
            current_thread->blocked_since = USEC_TIMER;
100
            mutex_add_to_queue(obj, current_thread);
101
            leave_critical_section(mode);
102
            context_switch();
103
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
104
            return THREAD_OK;
105
        }
106
        else ret = THREAD_TIMEOUT;
107
    }
108
 
109
    leave_critical_section(mode);
110
    return ret;
111
}
112
 
113
int mutex_unlock(struct mutex* obj)
114
{
115
    int ret = THREAD_OK;
116
    uint32_t mode = enter_critical_section();
117
 
118
    if (!obj->count)
119
    {
120
        leave_critical_section(mode);
121
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
122
    }
123
 
124
    if (obj->owner != current_thread)
125
    {
126
        leave_critical_section(mode);
127
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
128
    }
129
 
130
    if (--(obj->count)) ret = obj->count;
131
    else if (obj->waiters)
132
    {
133
        obj->count = 1;
134
        obj->owner = obj->waiters;
135
        obj->waiters->state = THREAD_READY;
136
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
137
        obj->waiters->blocked_by = NULL;
138
        obj->waiters->timeout = 0;
139
        obj->waiters = obj->waiters->queue_next;
140
    }
141
 
142
    leave_critical_section(mode);
143
    return ret;
144
}
145
 
146
void wakeup_init(struct wakeup* obj)
147
{
148
    memset(obj, 0, sizeof(struct wakeup));
149
}
150
 
151
int wakeup_wait(struct wakeup* obj, int timeout)
152
{
153
    int ret = THREAD_OK;
154
    uint32_t mode = enter_critical_section();
155
 
156
    if (obj->waiter)
157
    {
158
        leave_critical_section(mode);
159
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
160
    }
161
 
162
    if (obj->signalled) obj->signalled = false;
163
    else
164
    {
165
        if (timeout)
166
        {
167
            current_thread->state = THREAD_BLOCKED;
168
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
169
            current_thread->blocked_by = obj;
170
            current_thread->timeout = timeout;
171
            current_thread->blocked_since = USEC_TIMER;
172
            obj->waiter = current_thread;
173
            leave_critical_section(mode);
174
            context_switch();
15 theseven 175
            obj->waiter = NULL;
14 theseven 176
            if (!obj->signalled) return THREAD_TIMEOUT;
177
            obj->signalled = false;
178
            return THREAD_OK;
179
        }
180
        else ret = THREAD_TIMEOUT;
181
    }
182
 
183
    leave_critical_section(mode);
184
    return ret;
185
}
186
 
187
int wakeup_signal(struct wakeup* obj)
188
{
189
    int ret = THREAD_OK;
190
    uint32_t mode = enter_critical_section();
191
 
192
    obj->signalled = true;
193
    if (obj->waiter)
194
    {
195
        obj->waiter->state = THREAD_READY;
196
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
197
        obj->waiter->blocked_by = NULL;
198
        obj->waiter->timeout = 0;
199
        ret = THREAD_FOUND;
200
    }
201
 
202
    leave_critical_section(mode);
203
    return ret;
204
}
205
 
206
void sleep(int usecs)
207
{
15 theseven 208
    if (usecs)
209
    {
210
        uint32_t mode = enter_critical_section();
211
        current_thread->state = THREAD_BLOCKED;
212
        current_thread->block_type = THREAD_BLOCK_SLEEP;
213
        current_thread->timeout = usecs;
214
        current_thread->blocked_since = USEC_TIMER;
215
        leave_critical_section(mode);
216
    }
14 theseven 217
    context_switch();
218
}
219
 
220
void scheduler_init(void)
221
{
222
    memset(scheduler_threads, 0, sizeof(scheduler_threads));
34 theseven 223
    scheduler_frozen = false;
14 theseven 224
    last_tick = USEC_TIMER;
225
    current_thread = scheduler_threads;
226
    current_thread->state = THREAD_RUNNING;
227
    current_thread->startusec = last_tick;
228
    current_thread->name = "idle thread";
229
    current_thread->stack = (uint32_t*)-1;
230
    setup_tick();
231
}
232
 
54 theseven 233
bool scheduler_freeze(bool value)
34 theseven 234
{
54 theseven 235
    bool old = scheduler_frozen;
34 theseven 236
    scheduler_frozen = value;
54 theseven 237
    return old;
34 theseven 238
}
239
 
14 theseven 240
void scheduler_switch(int thread)
241
{
242
    int i;
243
    uint32_t score, best;
244
    uint32_t usec = USEC_TIMER;
245
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
246
    current_thread->cputime_total += usec - current_thread->startusec;
247
    current_thread->cputime_current += usec - current_thread->startusec;
248
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 249
    {
250
        for (i = 0; i < MAX_THREADS; i++)
251
            if (scheduler_threads[i].type == USER_THREAD)
252
                scheduler_threads[i].state = THREAD_SUSPENDED;
253
        current_thread->state = THREAD_DEFUNCT;
254
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
255
        wakeup_signal(&dbgwakeup);
256
    }
14 theseven 257
 
258
    if (usec - last_tick > SCHEDULER_TICK)
259
    {
15 theseven 260
        last_tick = usec;
14 theseven 261
        for (i = 0; i < MAX_THREADS; i++)
262
        {
263
            scheduler_threads[i].cpuload = scheduler_threads[i].cputime_current / SCHEDULER_TICK;
264
            scheduler_threads[i].cputime_current = 0;
265
        }
266
    }
267
 
34 theseven 268
    if (scheduler_frozen) thread = 0;
14 theseven 269
    else
270
    {
271
        for (i = 0; i < MAX_THREADS; i++)
34 theseven 272
            if (scheduler_threads[i].state == THREAD_BLOCKED
273
             && scheduler_threads[i].timeout != -1
274
             && TIME_AFTER(usec, scheduler_threads[i].blocked_since
275
                               + scheduler_threads[i].timeout))
14 theseven 276
            {
34 theseven 277
                if (scheduler_threads[i].block_type == THREAD_BLOCK_MUTEX)
278
                    mutex_remove_from_queue((struct mutex*)scheduler_threads[i].blocked_by,
279
                                            &scheduler_threads[i]);
280
                scheduler_threads[i].state = THREAD_READY;
281
                scheduler_threads[i].block_type = THREAD_NOT_BLOCKED;
282
                scheduler_threads[i].blocked_by = NULL;
283
                scheduler_threads[i].timeout = 0;
284
            }
285
 
286
        if (thread >= 0 && thread < MAX_THREADS && scheduler_threads[thread].state == THREAD_READY)
287
            current_thread = &scheduler_threads[thread];
288
        else
289
        {
290
            thread = 0;
291
            best = 0xffffffff;
292
            for (i = 0; i < MAX_THREADS; i++)
293
                if (scheduler_threads[i].state == THREAD_READY && scheduler_threads[i].priority)
14 theseven 294
                {
34 theseven 295
                    score = scheduler_threads[i].cputime_current / scheduler_threads[i].priority;
296
                    if (score < best)
297
                    {
298
                        best = score;
299
                        thread = i;
300
                    }
14 theseven 301
                }
34 theseven 302
        }
14 theseven 303
    }
304
 
305
    current_thread = &scheduler_threads[thread];
306
    current_thread->state = THREAD_RUNNING;
307
    current_thread->startusec = USEC_TIMER;
308
}
309
 
310
int thread_create(const char* name, const void* code, void* stack,
15 theseven 311
                  int stacksize, enum thread_type type, int priority, bool run)
14 theseven 312
{
313
    int ret = NO_MORE_THREADS;
314
    int i;
315
 
316
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
317
 
318
    uint32_t mode = enter_critical_section();
319
 
320
    for (i = 0; i < MAX_THREADS; i++)
321
        if (scheduler_threads[i].state == THREAD_FREE)
322
        {
323
            ret = i;
324
            memset(&scheduler_threads[i], 0, sizeof(struct scheduler_thread));
325
            scheduler_threads[i].state = run ? THREAD_READY : THREAD_SUSPENDED;
15 theseven 326
            scheduler_threads[i].type = type;
14 theseven 327
            scheduler_threads[i].name = name;
328
            scheduler_threads[i].priority = priority;
43 theseven 329
            scheduler_threads[i].cpsr = 0x1f;
14 theseven 330
            scheduler_threads[i].regs[15] = (uint32_t)code;
331
            scheduler_threads[i].regs[14] = (uint32_t)thread_exit;
332
            scheduler_threads[i].regs[13] = (uint32_t)stack + stacksize;
333
            scheduler_threads[i].stack = stack;
334
            break;
335
        }
336
 
337
    leave_critical_section(mode);
338
    return ret;
339
}
340
 
341
int thread_suspend(int thread)
342
{
343
    int ret = THREAD_OK;
344
    struct scheduler_thread* t = &scheduler_threads[thread];
345
    bool needsswitch = false;
346
    uint32_t mode = enter_critical_section();
347
 
348
    if (thread == -1) t = current_thread;
349
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
350
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
351
    else if (t->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
352
    if (ret == THREAD_OK)
353
    {
354
        if (t->state == THREAD_RUNNING) needsswitch = true;
355
        else if (t->state == THREAD_BLOCKED)
356
        {
357
            if (t->block_type == THREAD_BLOCK_SLEEP)
15 theseven 358
            {
359
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
360
            }
14 theseven 361
            else if (t->block_type == THREAD_BLOCK_MUTEX)
362
            {
363
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
15 theseven 364
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
14 theseven 365
            }
366
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 367
            {
368
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
369
            }
14 theseven 370
        }
371
        t->state = THREAD_SUSPENDED;
372
    }
373
 
374
    leave_critical_section(mode);
375
 
376
    if (needsswitch) context_switch();
377
 
378
    return ret;
379
}
380
 
381
int thread_resume(int thread)
382
{
383
    int ret = THREAD_OK;
384
    struct scheduler_thread* t = &scheduler_threads[thread];
385
    bool needsswitch = false;
386
    uint32_t mode = enter_critical_section();
387
 
388
    if (thread == -1) t = current_thread;
389
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
390
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
391
    else if (t->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
392
    if (ret == THREAD_OK)
393
    {
394
        if (t->block_type == THREAD_BLOCK_SLEEP)
395
            t->blocked_since = USEC_TIMER;
396
        else if (t->block_type == THREAD_BLOCK_MUTEX)
397
        {
398
            mutex_add_to_queue((struct mutex*)t->blocked_by, t);
399
            t->blocked_since = USEC_TIMER;
400
            t->state = THREAD_BLOCKED;
401
        }
402
        else if (t->block_type == THREAD_BLOCK_WAKEUP)
403
        {
404
            t->blocked_since = USEC_TIMER;
405
            t->state = THREAD_BLOCKED;
406
        }
407
        else t->state = THREAD_READY;
408
    }
409
 
410
    leave_critical_section(mode);
411
    return ret;
412
}
413
 
414
int thread_terminate(int thread)
415
{
416
    int ret = THREAD_OK;
417
    struct scheduler_thread* t = &scheduler_threads[thread];
418
    bool needsswitch = false;
419
    uint32_t mode = enter_critical_section();
420
 
421
    if (thread == -1) t = current_thread;
422
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
423
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
424
    if (ret == THREAD_OK)
425
    {
426
        if (t->state == THREAD_RUNNING) needsswitch = true;
427
        else if (t->state == THREAD_BLOCKED)
428
        {
429
            if (t->block_type == THREAD_BLOCK_MUTEX)
430
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
431
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
432
                ((struct wakeup*)t->blocked_by)->waiter = NULL;
433
        }
434
        t->state = THREAD_FREE;
58 theseven 435
        close_all_of_process(t);
436
        closedir_all_of_process(t);
14 theseven 437
    }
438
 
439
    leave_critical_section(mode);
440
 
441
    if (needsswitch) context_switch();
442
 
443
    return ret;
444
}
445
 
446
void thread_exit()
447
{
448
    thread_terminate(-1);
449
}
71 theseven 450
 
451
int* __errno()
452
{
453
    return &current_thread->err_no;
454
}