Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
85 theseven 29
#ifdef HAVE_STORAGE
58 theseven 30
#include "dir.h"
31
#include "file.h"
85 theseven 32
#endif
14 theseven 33
 
34
 
15 theseven 35
struct scheduler_thread scheduler_threads[MAX_THREADS] IBSS_ATTR;
36
struct scheduler_thread* current_thread IBSS_ATTR;
37
uint32_t last_tick IBSS_ATTR;
34 theseven 38
bool scheduler_frozen IBSS_ATTR;
15 theseven 39
extern struct wakeup dbgwakeup;
14 theseven 40
 
41
 
42
void mutex_init(struct mutex* obj)
43
{
44
    memset(obj, 0, sizeof(struct mutex));
45
}
46
 
47
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
48
{
49
    struct scheduler_thread* t;
15 theseven 50
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 51
    {
52
        thread->queue_next = obj->waiters;
53
        obj->waiters = thread;
54
    }
55
    else
56
    {
57
        t = obj->waiters;
58
        while (t->queue_next && t->queue_next->priority > thread->priority)
59
            t = t->queue_next;
60
        thread->queue_next = t->queue_next;
61
        t->queue_next = thread;
62
    }
63
}
64
 
65
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
66
{
67
    struct scheduler_thread* t;
68
    if (!obj->waiters) return;
69
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
70
    else
71
    {
72
        t = obj->waiters;
73
        while (t->queue_next)
74
        {
75
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
76
            t = t->queue_next;
77
        }
78
    }
79
}
80
 
81
int mutex_lock(struct mutex* obj, int timeout)
82
{
83
    int ret = THREAD_OK;
84
    struct scheduler_thread* thread;
85
    uint32_t mode = enter_critical_section();
86
 
87
    if (!obj->count)
88
    {
89
        obj->count = 1;
90
        obj->owner = current_thread;
91
    }
92
    else if (obj->owner == current_thread) obj->count++;
93
    else
94
    {
95
        if (timeout)
96
        {
97
            current_thread->state = THREAD_BLOCKED;
98
            current_thread->block_type = THREAD_BLOCK_MUTEX;
99
            current_thread->blocked_by = obj;
100
            current_thread->timeout = timeout;
101
            current_thread->blocked_since = USEC_TIMER;
102
            mutex_add_to_queue(obj, current_thread);
103
            leave_critical_section(mode);
104
            context_switch();
105
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
106
            return THREAD_OK;
107
        }
108
        else ret = THREAD_TIMEOUT;
109
    }
110
 
111
    leave_critical_section(mode);
112
    return ret;
113
}
114
 
115
int mutex_unlock(struct mutex* obj)
116
{
117
    int ret = THREAD_OK;
118
    uint32_t mode = enter_critical_section();
119
 
120
    if (!obj->count)
121
    {
122
        leave_critical_section(mode);
123
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
124
    }
125
 
126
    if (obj->owner != current_thread)
127
    {
128
        leave_critical_section(mode);
129
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
130
    }
131
 
132
    if (--(obj->count)) ret = obj->count;
133
    else if (obj->waiters)
134
    {
135
        obj->count = 1;
136
        obj->owner = obj->waiters;
137
        obj->waiters->state = THREAD_READY;
138
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
139
        obj->waiters->blocked_by = NULL;
140
        obj->waiters->timeout = 0;
141
        obj->waiters = obj->waiters->queue_next;
142
    }
143
 
144
    leave_critical_section(mode);
145
    return ret;
146
}
147
 
148
void wakeup_init(struct wakeup* obj)
149
{
150
    memset(obj, 0, sizeof(struct wakeup));
151
}
152
 
153
int wakeup_wait(struct wakeup* obj, int timeout)
154
{
155
    int ret = THREAD_OK;
156
    uint32_t mode = enter_critical_section();
157
 
158
    if (obj->waiter)
159
    {
160
        leave_critical_section(mode);
161
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
162
    }
163
 
164
    if (obj->signalled) obj->signalled = false;
165
    else
166
    {
167
        if (timeout)
168
        {
169
            current_thread->state = THREAD_BLOCKED;
170
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
171
            current_thread->blocked_by = obj;
172
            current_thread->timeout = timeout;
173
            current_thread->blocked_since = USEC_TIMER;
174
            obj->waiter = current_thread;
175
            leave_critical_section(mode);
176
            context_switch();
15 theseven 177
            obj->waiter = NULL;
14 theseven 178
            if (!obj->signalled) return THREAD_TIMEOUT;
179
            obj->signalled = false;
180
            return THREAD_OK;
181
        }
182
        else ret = THREAD_TIMEOUT;
183
    }
184
 
185
    leave_critical_section(mode);
186
    return ret;
187
}
188
 
189
int wakeup_signal(struct wakeup* obj)
190
{
191
    int ret = THREAD_OK;
192
    uint32_t mode = enter_critical_section();
193
 
194
    obj->signalled = true;
195
    if (obj->waiter)
196
    {
197
        obj->waiter->state = THREAD_READY;
198
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
199
        obj->waiter->blocked_by = NULL;
200
        obj->waiter->timeout = 0;
201
        ret = THREAD_FOUND;
202
    }
203
 
204
    leave_critical_section(mode);
205
    return ret;
206
}
207
 
208
void sleep(int usecs)
209
{
15 theseven 210
    if (usecs)
211
    {
212
        uint32_t mode = enter_critical_section();
213
        current_thread->state = THREAD_BLOCKED;
214
        current_thread->block_type = THREAD_BLOCK_SLEEP;
215
        current_thread->timeout = usecs;
216
        current_thread->blocked_since = USEC_TIMER;
217
        leave_critical_section(mode);
218
    }
14 theseven 219
    context_switch();
220
}
221
 
222
void scheduler_init(void)
223
{
224
    memset(scheduler_threads, 0, sizeof(scheduler_threads));
34 theseven 225
    scheduler_frozen = false;
14 theseven 226
    last_tick = USEC_TIMER;
227
    current_thread = scheduler_threads;
228
    current_thread->state = THREAD_RUNNING;
229
    current_thread->startusec = last_tick;
230
    current_thread->name = "idle thread";
231
    current_thread->stack = (uint32_t*)-1;
232
    setup_tick();
233
}
234
 
54 theseven 235
bool scheduler_freeze(bool value)
34 theseven 236
{
54 theseven 237
    bool old = scheduler_frozen;
34 theseven 238
    scheduler_frozen = value;
54 theseven 239
    return old;
34 theseven 240
}
241
 
14 theseven 242
void scheduler_switch(int thread)
243
{
244
    int i;
245
    uint32_t score, best;
246
    uint32_t usec = USEC_TIMER;
247
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
248
    current_thread->cputime_total += usec - current_thread->startusec;
249
    current_thread->cputime_current += usec - current_thread->startusec;
250
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 251
    {
252
        for (i = 0; i < MAX_THREADS; i++)
253
            if (scheduler_threads[i].type == USER_THREAD)
254
                scheduler_threads[i].state = THREAD_SUSPENDED;
255
        current_thread->state = THREAD_DEFUNCT;
256
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
257
        wakeup_signal(&dbgwakeup);
258
    }
14 theseven 259
 
260
    if (usec - last_tick > SCHEDULER_TICK)
261
    {
15 theseven 262
        last_tick = usec;
14 theseven 263
        for (i = 0; i < MAX_THREADS; i++)
264
        {
265
            scheduler_threads[i].cpuload = scheduler_threads[i].cputime_current / SCHEDULER_TICK;
266
            scheduler_threads[i].cputime_current = 0;
267
        }
268
    }
269
 
34 theseven 270
    if (scheduler_frozen) thread = 0;
14 theseven 271
    else
272
    {
273
        for (i = 0; i < MAX_THREADS; i++)
34 theseven 274
            if (scheduler_threads[i].state == THREAD_BLOCKED
275
             && scheduler_threads[i].timeout != -1
276
             && TIME_AFTER(usec, scheduler_threads[i].blocked_since
277
                               + scheduler_threads[i].timeout))
14 theseven 278
            {
34 theseven 279
                if (scheduler_threads[i].block_type == THREAD_BLOCK_MUTEX)
280
                    mutex_remove_from_queue((struct mutex*)scheduler_threads[i].blocked_by,
281
                                            &scheduler_threads[i]);
282
                scheduler_threads[i].state = THREAD_READY;
283
                scheduler_threads[i].block_type = THREAD_NOT_BLOCKED;
284
                scheduler_threads[i].blocked_by = NULL;
285
                scheduler_threads[i].timeout = 0;
286
            }
287
 
288
        if (thread >= 0 && thread < MAX_THREADS && scheduler_threads[thread].state == THREAD_READY)
289
            current_thread = &scheduler_threads[thread];
290
        else
291
        {
292
            thread = 0;
293
            best = 0xffffffff;
294
            for (i = 0; i < MAX_THREADS; i++)
295
                if (scheduler_threads[i].state == THREAD_READY && scheduler_threads[i].priority)
14 theseven 296
                {
34 theseven 297
                    score = scheduler_threads[i].cputime_current / scheduler_threads[i].priority;
298
                    if (score < best)
299
                    {
300
                        best = score;
301
                        thread = i;
302
                    }
14 theseven 303
                }
34 theseven 304
        }
14 theseven 305
    }
306
 
307
    current_thread = &scheduler_threads[thread];
308
    current_thread->state = THREAD_RUNNING;
309
    current_thread->startusec = USEC_TIMER;
310
}
311
 
312
int thread_create(const char* name, const void* code, void* stack,
15 theseven 313
                  int stacksize, enum thread_type type, int priority, bool run)
14 theseven 314
{
315
    int ret = NO_MORE_THREADS;
316
    int i;
317
 
318
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
319
 
320
    uint32_t mode = enter_critical_section();
321
 
322
    for (i = 0; i < MAX_THREADS; i++)
323
        if (scheduler_threads[i].state == THREAD_FREE)
324
        {
325
            ret = i;
326
            memset(&scheduler_threads[i], 0, sizeof(struct scheduler_thread));
327
            scheduler_threads[i].state = run ? THREAD_READY : THREAD_SUSPENDED;
15 theseven 328
            scheduler_threads[i].type = type;
14 theseven 329
            scheduler_threads[i].name = name;
330
            scheduler_threads[i].priority = priority;
43 theseven 331
            scheduler_threads[i].cpsr = 0x1f;
14 theseven 332
            scheduler_threads[i].regs[15] = (uint32_t)code;
333
            scheduler_threads[i].regs[14] = (uint32_t)thread_exit;
334
            scheduler_threads[i].regs[13] = (uint32_t)stack + stacksize;
335
            scheduler_threads[i].stack = stack;
336
            break;
337
        }
338
 
339
    leave_critical_section(mode);
340
    return ret;
341
}
342
 
343
int thread_suspend(int thread)
344
{
345
    int ret = THREAD_OK;
346
    struct scheduler_thread* t = &scheduler_threads[thread];
347
    bool needsswitch = false;
348
    uint32_t mode = enter_critical_section();
349
 
350
    if (thread == -1) t = current_thread;
351
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
352
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
353
    else if (t->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
354
    if (ret == THREAD_OK)
355
    {
356
        if (t->state == THREAD_RUNNING) needsswitch = true;
357
        else if (t->state == THREAD_BLOCKED)
358
        {
359
            if (t->block_type == THREAD_BLOCK_SLEEP)
15 theseven 360
            {
361
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
362
            }
14 theseven 363
            else if (t->block_type == THREAD_BLOCK_MUTEX)
364
            {
365
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
15 theseven 366
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
14 theseven 367
            }
368
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 369
            {
370
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
371
            }
14 theseven 372
        }
373
        t->state = THREAD_SUSPENDED;
374
    }
375
 
376
    leave_critical_section(mode);
377
 
378
    if (needsswitch) context_switch();
379
 
380
    return ret;
381
}
382
 
383
int thread_resume(int thread)
384
{
385
    int ret = THREAD_OK;
386
    struct scheduler_thread* t = &scheduler_threads[thread];
387
    bool needsswitch = false;
388
    uint32_t mode = enter_critical_section();
389
 
390
    if (thread == -1) t = current_thread;
391
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
392
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
393
    else if (t->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
394
    if (ret == THREAD_OK)
395
    {
396
        if (t->block_type == THREAD_BLOCK_SLEEP)
397
            t->blocked_since = USEC_TIMER;
398
        else if (t->block_type == THREAD_BLOCK_MUTEX)
399
        {
400
            mutex_add_to_queue((struct mutex*)t->blocked_by, t);
401
            t->blocked_since = USEC_TIMER;
402
            t->state = THREAD_BLOCKED;
403
        }
404
        else if (t->block_type == THREAD_BLOCK_WAKEUP)
405
        {
406
            t->blocked_since = USEC_TIMER;
407
            t->state = THREAD_BLOCKED;
408
        }
409
        else t->state = THREAD_READY;
410
    }
411
 
412
    leave_critical_section(mode);
413
    return ret;
414
}
415
 
416
int thread_terminate(int thread)
417
{
418
    int ret = THREAD_OK;
419
    struct scheduler_thread* t = &scheduler_threads[thread];
420
    bool needsswitch = false;
421
    uint32_t mode = enter_critical_section();
422
 
423
    if (thread == -1) t = current_thread;
424
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
425
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
426
    if (ret == THREAD_OK)
427
    {
428
        if (t->state == THREAD_RUNNING) needsswitch = true;
429
        else if (t->state == THREAD_BLOCKED)
430
        {
431
            if (t->block_type == THREAD_BLOCK_MUTEX)
432
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
433
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
434
                ((struct wakeup*)t->blocked_by)->waiter = NULL;
435
        }
436
        t->state = THREAD_FREE;
85 theseven 437
#ifdef HAVE_STORAGE
58 theseven 438
        close_all_of_process(t);
439
        closedir_all_of_process(t);
85 theseven 440
#endif
14 theseven 441
    }
442
 
443
    leave_critical_section(mode);
444
 
445
    if (needsswitch) context_switch();
446
 
447
    return ret;
448
}
449
 
450
void thread_exit()
451
{
452
    thread_terminate(-1);
453
}
71 theseven 454
 
455
int* __errno()
456
{
457
    return &current_thread->err_no;
458
}