Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
465 theseven 30
#include "library.h"
85 theseven 31
#ifdef HAVE_STORAGE
58 theseven 32
#include "dir.h"
33
#include "file.h"
85 theseven 34
#endif
130 theseven 35
#ifdef HAVE_BUTTON
36
#include "button.h"
37
#endif
14 theseven 38
 
39
 
429 theseven 40
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 41
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 42
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 43
uint32_t last_tick IBSS_ATTR;
34 theseven 44
bool scheduler_frozen IBSS_ATTR;
15 theseven 45
extern struct wakeup dbgwakeup;
14 theseven 46
 
47
 
48
void mutex_init(struct mutex* obj)
49
{
50
    memset(obj, 0, sizeof(struct mutex));
51
}
52
 
53
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
54
{
55
    struct scheduler_thread* t;
15 theseven 56
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 57
    {
58
        thread->queue_next = obj->waiters;
59
        obj->waiters = thread;
60
    }
61
    else
62
    {
63
        t = obj->waiters;
64
        while (t->queue_next && t->queue_next->priority > thread->priority)
65
            t = t->queue_next;
66
        thread->queue_next = t->queue_next;
67
        t->queue_next = thread;
68
    }
69
}
70
 
71
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
72
{
73
    struct scheduler_thread* t;
74
    if (!obj->waiters) return;
75
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
76
    else
77
    {
78
        t = obj->waiters;
79
        while (t->queue_next)
80
        {
81
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
82
            t = t->queue_next;
83
        }
84
    }
85
}
86
 
87
int mutex_lock(struct mutex* obj, int timeout)
88
{
89
    int ret = THREAD_OK;
90
    uint32_t mode = enter_critical_section();
91
 
92
    if (!obj->count)
93
    {
94
        obj->count = 1;
95
        obj->owner = current_thread;
96
    }
97
    else if (obj->owner == current_thread) obj->count++;
98
    else
99
    {
100
        if (timeout)
101
        {
102
            current_thread->state = THREAD_BLOCKED;
103
            current_thread->block_type = THREAD_BLOCK_MUTEX;
104
            current_thread->blocked_by = obj;
105
            current_thread->timeout = timeout;
106
            current_thread->blocked_since = USEC_TIMER;
107
            mutex_add_to_queue(obj, current_thread);
108
            leave_critical_section(mode);
109
            context_switch();
110
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
111
            return THREAD_OK;
112
        }
113
        else ret = THREAD_TIMEOUT;
114
    }
115
 
116
    leave_critical_section(mode);
117
    return ret;
118
}
119
 
120
int mutex_unlock(struct mutex* obj)
121
{
122
    int ret = THREAD_OK;
123
    uint32_t mode = enter_critical_section();
124
 
125
    if (!obj->count)
126
    {
127
        leave_critical_section(mode);
128
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
129
    }
130
 
131
    if (obj->owner != current_thread)
132
    {
133
        leave_critical_section(mode);
134
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
135
    }
136
 
137
    if (--(obj->count)) ret = obj->count;
138
    else if (obj->waiters)
139
    {
140
        obj->count = 1;
141
        obj->owner = obj->waiters;
142
        obj->waiters->state = THREAD_READY;
143
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
144
        obj->waiters->blocked_by = NULL;
145
        obj->waiters->timeout = 0;
146
        obj->waiters = obj->waiters->queue_next;
147
    }
148
 
149
    leave_critical_section(mode);
150
    return ret;
151
}
152
 
153
void wakeup_init(struct wakeup* obj)
154
{
155
    memset(obj, 0, sizeof(struct wakeup));
156
}
157
 
158
int wakeup_wait(struct wakeup* obj, int timeout)
159
{
160
    int ret = THREAD_OK;
161
    uint32_t mode = enter_critical_section();
162
 
163
    if (obj->waiter)
164
    {
165
        leave_critical_section(mode);
166
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
167
    }
168
 
169
    if (obj->signalled) obj->signalled = false;
170
    else
171
    {
172
        if (timeout)
173
        {
174
            current_thread->state = THREAD_BLOCKED;
175
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
176
            current_thread->blocked_by = obj;
177
            current_thread->timeout = timeout;
178
            current_thread->blocked_since = USEC_TIMER;
179
            obj->waiter = current_thread;
180
            leave_critical_section(mode);
181
            context_switch();
15 theseven 182
            obj->waiter = NULL;
14 theseven 183
            if (!obj->signalled) return THREAD_TIMEOUT;
184
            obj->signalled = false;
185
            return THREAD_OK;
186
        }
187
        else ret = THREAD_TIMEOUT;
188
    }
189
 
190
    leave_critical_section(mode);
191
    return ret;
192
}
193
 
194
int wakeup_signal(struct wakeup* obj)
195
{
196
    int ret = THREAD_OK;
197
    uint32_t mode = enter_critical_section();
198
 
199
    obj->signalled = true;
200
    if (obj->waiter)
201
    {
202
        obj->waiter->state = THREAD_READY;
203
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
204
        obj->waiter->blocked_by = NULL;
205
        obj->waiter->timeout = 0;
206
        ret = THREAD_FOUND;
207
    }
208
 
209
    leave_critical_section(mode);
210
    return ret;
211
}
212
 
213
void sleep(int usecs)
214
{
15 theseven 215
    if (usecs)
216
    {
217
        uint32_t mode = enter_critical_section();
218
        current_thread->state = THREAD_BLOCKED;
219
        current_thread->block_type = THREAD_BLOCK_SLEEP;
220
        current_thread->timeout = usecs;
221
        current_thread->blocked_since = USEC_TIMER;
222
        leave_critical_section(mode);
223
    }
14 theseven 224
    context_switch();
225
}
226
 
227
void scheduler_init(void)
228
{
429 theseven 229
    last_tick = USEC_TIMER;
34 theseven 230
    scheduler_frozen = false;
429 theseven 231
    head_thread = &idle_thread;
232
    current_thread = &idle_thread;
233
    memset(&idle_thread, 0, sizeof(idle_thread));
234
    idle_thread.state = THREAD_RUNNING;
235
    idle_thread.startusec = last_tick;
542 theseven 236
    idle_thread.type = CORE_THREAD;
429 theseven 237
    idle_thread.name = "idle thread";
238
    idle_thread.stack = (uint32_t*)-1;
14 theseven 239
    setup_tick();
240
}
241
 
54 theseven 242
bool scheduler_freeze(bool value)
34 theseven 243
{
54 theseven 244
    bool old = scheduler_frozen;
34 theseven 245
    scheduler_frozen = value;
54 theseven 246
    return old;
34 theseven 247
}
248
 
389 theseven 249
void scheduler_pause_accounting()
250
{
251
    uint32_t usec = USEC_TIMER;
252
    current_thread->cputime_total += usec - current_thread->startusec;
253
    current_thread->cputime_current += usec - current_thread->startusec;
254
}
255
 
256
void scheduler_resume_accounting()
257
{
258
    current_thread->startusec = USEC_TIMER;
259
}
260
 
429 theseven 261
void scheduler_switch(struct scheduler_thread* thread)
14 theseven 262
{
429 theseven 263
    struct scheduler_thread* t;
14 theseven 264
    uint32_t score, best;
265
    uint32_t usec = USEC_TIMER;
266
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
267
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 268
    {
429 theseven 269
        for (t = head_thread; t; t = t->thread_next)
270
            if (t->type == USER_THREAD)
271
                t->state = THREAD_SUSPENDED;
15 theseven 272
        current_thread->state = THREAD_DEFUNCT;
273
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
274
        wakeup_signal(&dbgwakeup);
275
    }
14 theseven 276
 
277
    if (usec - last_tick > SCHEDULER_TICK)
278
    {
392 theseven 279
        uint32_t diff = usec - last_tick;
15 theseven 280
        last_tick = usec;
429 theseven 281
        for (t = head_thread; t; t = t->thread_next)
14 theseven 282
        {
429 theseven 283
            t->cpuload = 255 * t->cputime_current / diff;
284
            t->cputime_current = 0;
14 theseven 285
        }
286
    }
287
 
437 theseven 288
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 289
    else
290
    {
429 theseven 291
        for (t = head_thread; t; t = t->thread_next)
292
            if (t->state == THREAD_BLOCKED && t->timeout != -1
293
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 294
            {
429 theseven 295
                if (t->block_type == THREAD_BLOCK_MUTEX)
296
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
297
                t->state = THREAD_READY;
298
                t->block_type = THREAD_NOT_BLOCKED;
299
                t->blocked_by = NULL;
300
                t->timeout = 0;
34 theseven 301
            }
302
 
429 theseven 303
        if (thread && thread->state == THREAD_READY) current_thread = thread;
34 theseven 304
        else
305
        {
437 theseven 306
            thread = &idle_thread;
34 theseven 307
            best = 0xffffffff;
429 theseven 308
            for (t = head_thread; t; t = t->thread_next)
309
                if (t->state == THREAD_READY && t->priority)
14 theseven 310
                {
429 theseven 311
                    score = t->cputime_current / t->priority;
34 theseven 312
                    if (score < best)
313
                    {
314
                        best = score;
429 theseven 315
                        thread = t;
34 theseven 316
                    }
14 theseven 317
                }
34 theseven 318
        }
14 theseven 319
    }
320
 
429 theseven 321
    current_thread = thread;
14 theseven 322
    current_thread->state = THREAD_RUNNING;
323
}
324
 
429 theseven 325
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
326
                                       const void* code, void* stack, int stacksize,
327
                                       enum thread_type type, int priority, bool run)
14 theseven 328
{
429 theseven 329
    bool stack_alloced = false;
437 theseven 330
    bool thread_alloced = false;
429 theseven 331
    if (!stack)
332
    {
333
        stack = malloc(stacksize);
334
        stack_alloced = true;
335
    }
336
    if (!stack) return NULL;
337
    if (!thread)
338
    {
437 theseven 339
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
340
        thread_alloced = true;
341
    }
342
    if (!thread)
343
    {
429 theseven 344
        if (stack_alloced) free(stack);
345
        return NULL;
346
    }
437 theseven 347
    if (thread_alloced) reownalloc(thread, thread);
348
    if (stack_alloced) reownalloc(stack, thread);
429 theseven 349
 
14 theseven 350
    int i;
351
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
352
 
429 theseven 353
    memset(thread, 0, sizeof(struct scheduler_thread));
354
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
355
    thread->type = type;
356
    thread->name = name;
357
    thread->priority = priority;
358
    thread->cpsr = 0x1f;
359
    thread->regs[15] = (uint32_t)code;
360
    thread->regs[14] = (uint32_t)thread_exit;
361
    thread->regs[13] = (uint32_t)stack + stacksize;
362
    thread->stack = stack;
363
 
14 theseven 364
    uint32_t mode = enter_critical_section();
489 theseven 365
    thread->thread_next = head_thread->thread_next;
366
    head_thread->thread_next = thread;
429 theseven 367
    leave_critical_section(mode);
14 theseven 368
 
429 theseven 369
    return thread;
14 theseven 370
}
371
 
429 theseven 372
int thread_suspend(struct scheduler_thread* thread)
14 theseven 373
{
374
    int ret = THREAD_OK;
375
    bool needsswitch = false;
376
    uint32_t mode = enter_critical_section();
377
 
429 theseven 378
    if (!thread) thread = current_thread;
379
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 380
    if (ret == THREAD_OK)
381
    {
429 theseven 382
        if (thread->state == THREAD_RUNNING) needsswitch = true;
383
        else if (thread->state == THREAD_BLOCKED)
14 theseven 384
        {
429 theseven 385
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 386
            {
429 theseven 387
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 388
            }
429 theseven 389
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 390
            {
429 theseven 391
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
392
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 393
            }
429 theseven 394
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 395
            {
429 theseven 396
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 397
            }
14 theseven 398
        }
429 theseven 399
        thread->state = THREAD_SUSPENDED;
14 theseven 400
    }
401
 
402
    leave_critical_section(mode);
403
 
404
    if (needsswitch) context_switch();
405
 
406
    return ret;
407
}
408
 
429 theseven 409
int thread_resume(struct scheduler_thread* thread)
14 theseven 410
{
411
    int ret = THREAD_OK;
412
    bool needsswitch = false;
413
    uint32_t mode = enter_critical_section();
414
 
429 theseven 415
    if (!thread) thread = current_thread;
416
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 417
    if (ret == THREAD_OK)
418
    {
429 theseven 419
        if (thread->block_type == THREAD_BLOCK_SLEEP)
420
            thread->blocked_since = USEC_TIMER;
421
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 422
        {
429 theseven 423
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
424
            thread->blocked_since = USEC_TIMER;
425
            thread->state = THREAD_BLOCKED;
14 theseven 426
        }
429 theseven 427
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 428
        {
429 theseven 429
            thread->blocked_since = USEC_TIMER;
430
            thread->state = THREAD_BLOCKED;
14 theseven 431
        }
429 theseven 432
        else thread->state = THREAD_READY;
14 theseven 433
    }
434
 
435
    leave_critical_section(mode);
436
    return ret;
437
}
438
 
453 theseven 439
void thread_set_name(struct scheduler_thread* thread, char* name)
440
{
441
    uint32_t mode = enter_critical_section();
442
    if (!thread) thread = current_thread;
443
    thread->name = name;
444
    leave_critical_section(mode);
445
}
446
 
447
void thread_set_priority(struct scheduler_thread* thread, int priority)
448
{
449
    uint32_t mode = enter_critical_section();
450
    if (!thread) thread = current_thread;
451
    thread->priority = priority;
452
    leave_critical_section(mode);
453
}
454
 
541 theseven 455
int thread_terminate_internal(struct scheduler_thread* thread, uint32_t mode)
14 theseven 456
{
429 theseven 457
    struct scheduler_thread* t;
14 theseven 458
    bool needsswitch = false;
459
 
429 theseven 460
    if (!thread) thread = current_thread;
461
    if (thread->state == THREAD_RUNNING) needsswitch = true;
462
    else if (thread->state == THREAD_BLOCKED)
14 theseven 463
    {
429 theseven 464
        if (thread->block_type == THREAD_BLOCK_MUTEX)
465
            mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
466
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
467
            ((struct wakeup*)thread->blocked_by)->waiter = NULL;
468
    }
469
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
470
    if (t) t->thread_next = thread->thread_next;
471
 
472
    leave_critical_section(mode);
473
 
465 theseven 474
    library_release_all_of_thread(thread);
85 theseven 475
#ifdef HAVE_STORAGE
429 theseven 476
    close_all_of_process(thread);
477
    closedir_all_of_process(thread);
85 theseven 478
#endif
130 theseven 479
#ifdef HAVE_BUTTON
429 theseven 480
    button_unregister_all_of_thread(thread);
130 theseven 481
#endif
429 theseven 482
    free_all_of_thread(thread);
14 theseven 483
 
484
    if (needsswitch) context_switch();
485
 
429 theseven 486
    return THREAD_OK;
14 theseven 487
}
488
 
541 theseven 489
int thread_terminate(struct scheduler_thread* thread)
490
{
491
    uint32_t mode = enter_critical_section();
492
    return thread_terminate_internal(thread, mode);
493
}
494
 
423 theseven 495
int thread_killlevel(enum thread_type type, bool killself)
496
{
429 theseven 497
    struct scheduler_thread* t;
423 theseven 498
    int count = 0;
541 theseven 499
    while (true)
500
    {
501
        bool found = false;
502
        uint32_t mode = enter_critical_section();
503
        for (t = head_thread; t; t = t->thread_next)
504
            if (t->type <= type && (killself || current_thread != t))
505
            {
542 theseven 506
                panicf(PANIC_FATAL, "Killing thread %08X: %s (%d)", t, t->name, t->type);
541 theseven 507
                thread_terminate_internal(t, mode);
508
                found = true;
509
                count++;
510
                break;
511
            }
542 theseven 512
        panicf(PANIC_FATAL, "Reached end of list");
541 theseven 513
        if (found) continue;
514
        leave_critical_section(mode);
515
        break;
516
    }
423 theseven 517
    return count;
518
}
519
 
429 theseven 520
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 521
{
429 theseven 522
    return thread->state;
249 theseven 523
}
524
 
14 theseven 525
void thread_exit()
526
{
429 theseven 527
    thread_terminate(NULL);
14 theseven 528
}
71 theseven 529
 
530
int* __errno()
531
{
532
    return &current_thread->err_no;
533
}