Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
465 theseven 30
#include "library.h"
85 theseven 31
#ifdef HAVE_STORAGE
58 theseven 32
#include "dir.h"
33
#include "file.h"
85 theseven 34
#endif
130 theseven 35
#ifdef HAVE_BUTTON
36
#include "button.h"
37
#endif
14 theseven 38
 
39
 
429 theseven 40
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 41
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 42
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 43
uint32_t last_tick IBSS_ATTR;
34 theseven 44
bool scheduler_frozen IBSS_ATTR;
15 theseven 45
extern struct wakeup dbgwakeup;
14 theseven 46
 
47
 
48
void mutex_init(struct mutex* obj)
49
{
50
    memset(obj, 0, sizeof(struct mutex));
51
}
52
 
53
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
54
{
55
    struct scheduler_thread* t;
15 theseven 56
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 57
    {
58
        thread->queue_next = obj->waiters;
59
        obj->waiters = thread;
60
    }
61
    else
62
    {
63
        t = obj->waiters;
64
        while (t->queue_next && t->queue_next->priority > thread->priority)
65
            t = t->queue_next;
66
        thread->queue_next = t->queue_next;
67
        t->queue_next = thread;
68
    }
69
}
70
 
71
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
72
{
73
    struct scheduler_thread* t;
74
    if (!obj->waiters) return;
75
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
76
    else
77
    {
78
        t = obj->waiters;
79
        while (t->queue_next)
80
        {
81
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
82
            t = t->queue_next;
83
        }
84
    }
85
}
86
 
87
int mutex_lock(struct mutex* obj, int timeout)
88
{
89
    int ret = THREAD_OK;
90
    uint32_t mode = enter_critical_section();
91
 
92
    if (!obj->count)
93
    {
94
        obj->count = 1;
95
        obj->owner = current_thread;
96
    }
97
    else if (obj->owner == current_thread) obj->count++;
98
    else
99
    {
100
        if (timeout)
101
        {
102
            current_thread->state = THREAD_BLOCKED;
103
            current_thread->block_type = THREAD_BLOCK_MUTEX;
104
            current_thread->blocked_by = obj;
105
            current_thread->timeout = timeout;
106
            current_thread->blocked_since = USEC_TIMER;
107
            mutex_add_to_queue(obj, current_thread);
108
            leave_critical_section(mode);
593 theseven 109
            yield();
14 theseven 110
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
111
            return THREAD_OK;
112
        }
113
        else ret = THREAD_TIMEOUT;
114
    }
115
 
116
    leave_critical_section(mode);
117
    return ret;
118
}
119
 
120
int mutex_unlock(struct mutex* obj)
121
{
122
    int ret = THREAD_OK;
123
    uint32_t mode = enter_critical_section();
124
 
125
    if (!obj->count)
126
    {
127
        leave_critical_section(mode);
128
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
129
    }
130
 
131
    if (obj->owner != current_thread)
132
    {
133
        leave_critical_section(mode);
134
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
135
    }
136
 
137
    if (--(obj->count)) ret = obj->count;
138
    else if (obj->waiters)
139
    {
140
        obj->count = 1;
141
        obj->owner = obj->waiters;
142
        obj->waiters->state = THREAD_READY;
143
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
144
        obj->waiters->blocked_by = NULL;
145
        obj->waiters->timeout = 0;
146
        obj->waiters = obj->waiters->queue_next;
147
    }
148
 
149
    leave_critical_section(mode);
150
    return ret;
151
}
152
 
153
void wakeup_init(struct wakeup* obj)
154
{
155
    memset(obj, 0, sizeof(struct wakeup));
156
}
157
 
158
int wakeup_wait(struct wakeup* obj, int timeout)
159
{
160
    int ret = THREAD_OK;
161
    uint32_t mode = enter_critical_section();
162
 
163
    if (obj->waiter)
164
    {
165
        leave_critical_section(mode);
166
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
167
    }
168
 
169
    if (obj->signalled) obj->signalled = false;
170
    else
171
    {
172
        if (timeout)
173
        {
174
            current_thread->state = THREAD_BLOCKED;
175
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
176
            current_thread->blocked_by = obj;
177
            current_thread->timeout = timeout;
178
            current_thread->blocked_since = USEC_TIMER;
179
            obj->waiter = current_thread;
180
            leave_critical_section(mode);
593 theseven 181
            yield();
15 theseven 182
            obj->waiter = NULL;
14 theseven 183
            if (!obj->signalled) return THREAD_TIMEOUT;
184
            obj->signalled = false;
185
            return THREAD_OK;
186
        }
187
        else ret = THREAD_TIMEOUT;
188
    }
189
 
190
    leave_critical_section(mode);
191
    return ret;
192
}
193
 
194
int wakeup_signal(struct wakeup* obj)
195
{
196
    int ret = THREAD_OK;
197
    uint32_t mode = enter_critical_section();
198
 
199
    obj->signalled = true;
200
    if (obj->waiter)
201
    {
202
        obj->waiter->state = THREAD_READY;
203
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
204
        obj->waiter->blocked_by = NULL;
205
        obj->waiter->timeout = 0;
206
        ret = THREAD_FOUND;
596 theseven 207
        if (current_thread == &idle_thread)
208
            scheduler_switch(obj->waiter, NULL);
14 theseven 209
    }
210
 
211
    leave_critical_section(mode);
212
    return ret;
213
}
214
 
215
void sleep(int usecs)
216
{
15 theseven 217
    if (usecs)
218
    {
219
        uint32_t mode = enter_critical_section();
220
        current_thread->state = THREAD_BLOCKED;
221
        current_thread->block_type = THREAD_BLOCK_SLEEP;
222
        current_thread->timeout = usecs;
223
        current_thread->blocked_since = USEC_TIMER;
224
        leave_critical_section(mode);
225
    }
593 theseven 226
    yield();
14 theseven 227
}
228
 
229
void scheduler_init(void)
230
{
429 theseven 231
    last_tick = USEC_TIMER;
34 theseven 232
    scheduler_frozen = false;
429 theseven 233
    head_thread = &idle_thread;
234
    current_thread = &idle_thread;
235
    memset(&idle_thread, 0, sizeof(idle_thread));
236
    idle_thread.state = THREAD_RUNNING;
237
    idle_thread.startusec = last_tick;
542 theseven 238
    idle_thread.type = CORE_THREAD;
429 theseven 239
    idle_thread.name = "idle thread";
240
    idle_thread.stack = (uint32_t*)-1;
14 theseven 241
    setup_tick();
242
}
243
 
54 theseven 244
bool scheduler_freeze(bool value)
34 theseven 245
{
54 theseven 246
    bool old = scheduler_frozen;
34 theseven 247
    scheduler_frozen = value;
54 theseven 248
    return old;
34 theseven 249
}
250
 
389 theseven 251
void scheduler_pause_accounting()
252
{
253
    uint32_t usec = USEC_TIMER;
254
    current_thread->cputime_total += usec - current_thread->startusec;
255
    current_thread->cputime_current += usec - current_thread->startusec;
256
}
257
 
258
void scheduler_resume_accounting()
259
{
260
    current_thread->startusec = USEC_TIMER;
261
}
262
 
595 theseven 263
void scheduler_switch(struct scheduler_thread* thread, struct scheduler_thread* block)
14 theseven 264
{
429 theseven 265
    struct scheduler_thread* t;
14 theseven 266
    uint32_t score, best;
267
    uint32_t usec = USEC_TIMER;
268
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
269
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 270
    {
429 theseven 271
        for (t = head_thread; t; t = t->thread_next)
272
            if (t->type == USER_THREAD)
273
                t->state = THREAD_SUSPENDED;
15 theseven 274
        current_thread->state = THREAD_DEFUNCT;
275
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
276
        wakeup_signal(&dbgwakeup);
277
    }
14 theseven 278
    if (usec - last_tick > SCHEDULER_TICK)
279
    {
392 theseven 280
        uint32_t diff = usec - last_tick;
15 theseven 281
        last_tick = usec;
429 theseven 282
        for (t = head_thread; t; t = t->thread_next)
14 theseven 283
        {
429 theseven 284
            t->cpuload = 255 * t->cputime_current / diff;
285
            t->cputime_current = 0;
14 theseven 286
        }
287
    }
288
 
437 theseven 289
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 290
    else
291
    {
429 theseven 292
        for (t = head_thread; t; t = t->thread_next)
293
            if (t->state == THREAD_BLOCKED && t->timeout != -1
294
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 295
            {
429 theseven 296
                if (t->block_type == THREAD_BLOCK_MUTEX)
297
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
298
                t->state = THREAD_READY;
299
                t->block_type = THREAD_NOT_BLOCKED;
300
                t->blocked_by = NULL;
301
                t->timeout = 0;
34 theseven 302
            }
303
 
595 theseven 304
        if (!thread || thread->state != THREAD_READY)
34 theseven 305
        {
437 theseven 306
            thread = &idle_thread;
34 theseven 307
            best = 0xffffffff;
429 theseven 308
            for (t = head_thread; t; t = t->thread_next)
309
                if (t->state == THREAD_READY && t->priority)
14 theseven 310
                {
595 theseven 311
                    if (t == block) score = 0xfffffffe;
312
                    else score = t->cputime_current / t->priority;
34 theseven 313
                    if (score < best)
314
                    {
315
                        best = score;
429 theseven 316
                        thread = t;
34 theseven 317
                    }
14 theseven 318
                }
34 theseven 319
        }
14 theseven 320
    }
321
 
429 theseven 322
    current_thread = thread;
14 theseven 323
    current_thread->state = THREAD_RUNNING;
324
}
325
 
429 theseven 326
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
327
                                       const void* code, void* stack, int stacksize,
328
                                       enum thread_type type, int priority, bool run)
14 theseven 329
{
429 theseven 330
    bool stack_alloced = false;
437 theseven 331
    bool thread_alloced = false;
429 theseven 332
    if (!stack)
333
    {
334
        stack = malloc(stacksize);
335
        stack_alloced = true;
336
    }
337
    if (!stack) return NULL;
338
    if (!thread)
339
    {
437 theseven 340
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
341
        thread_alloced = true;
342
    }
343
    if (!thread)
344
    {
429 theseven 345
        if (stack_alloced) free(stack);
346
        return NULL;
347
    }
437 theseven 348
    if (thread_alloced) reownalloc(thread, thread);
349
    if (stack_alloced) reownalloc(stack, thread);
429 theseven 350
 
14 theseven 351
    int i;
352
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
353
 
429 theseven 354
    memset(thread, 0, sizeof(struct scheduler_thread));
355
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
356
    thread->type = type;
357
    thread->name = name;
358
    thread->priority = priority;
359
    thread->cpsr = 0x1f;
360
    thread->regs[15] = (uint32_t)code;
361
    thread->regs[14] = (uint32_t)thread_exit;
362
    thread->regs[13] = (uint32_t)stack + stacksize;
363
    thread->stack = stack;
364
 
14 theseven 365
    uint32_t mode = enter_critical_section();
489 theseven 366
    thread->thread_next = head_thread->thread_next;
367
    head_thread->thread_next = thread;
429 theseven 368
    leave_critical_section(mode);
14 theseven 369
 
429 theseven 370
    return thread;
14 theseven 371
}
372
 
429 theseven 373
int thread_suspend(struct scheduler_thread* thread)
14 theseven 374
{
375
    int ret = THREAD_OK;
376
    bool needsswitch = false;
377
    uint32_t mode = enter_critical_section();
378
 
429 theseven 379
    if (!thread) thread = current_thread;
380
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 381
    if (ret == THREAD_OK)
382
    {
429 theseven 383
        if (thread->state == THREAD_RUNNING) needsswitch = true;
384
        else if (thread->state == THREAD_BLOCKED)
14 theseven 385
        {
429 theseven 386
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 387
            {
429 theseven 388
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 389
            }
429 theseven 390
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 391
            {
429 theseven 392
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
393
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 394
            }
429 theseven 395
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 396
            {
429 theseven 397
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 398
            }
14 theseven 399
        }
429 theseven 400
        thread->state = THREAD_SUSPENDED;
14 theseven 401
    }
402
 
403
    leave_critical_section(mode);
404
 
593 theseven 405
    if (needsswitch) yield();
14 theseven 406
 
407
    return ret;
408
}
409
 
429 theseven 410
int thread_resume(struct scheduler_thread* thread)
14 theseven 411
{
412
    int ret = THREAD_OK;
413
    bool needsswitch = false;
414
    uint32_t mode = enter_critical_section();
415
 
429 theseven 416
    if (!thread) thread = current_thread;
417
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 418
    if (ret == THREAD_OK)
419
    {
429 theseven 420
        if (thread->block_type == THREAD_BLOCK_SLEEP)
421
            thread->blocked_since = USEC_TIMER;
422
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 423
        {
429 theseven 424
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
425
            thread->blocked_since = USEC_TIMER;
426
            thread->state = THREAD_BLOCKED;
14 theseven 427
        }
429 theseven 428
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 429
        {
429 theseven 430
            thread->blocked_since = USEC_TIMER;
431
            thread->state = THREAD_BLOCKED;
14 theseven 432
        }
429 theseven 433
        else thread->state = THREAD_READY;
14 theseven 434
    }
435
 
436
    leave_critical_section(mode);
437
    return ret;
438
}
439
 
453 theseven 440
void thread_set_name(struct scheduler_thread* thread, char* name)
441
{
442
    uint32_t mode = enter_critical_section();
443
    if (!thread) thread = current_thread;
444
    thread->name = name;
445
    leave_critical_section(mode);
446
}
447
 
448
void thread_set_priority(struct scheduler_thread* thread, int priority)
449
{
450
    uint32_t mode = enter_critical_section();
451
    if (!thread) thread = current_thread;
452
    thread->priority = priority;
453
    leave_critical_section(mode);
454
}
455
 
541 theseven 456
int thread_terminate_internal(struct scheduler_thread* thread, uint32_t mode)
14 theseven 457
{
429 theseven 458
    struct scheduler_thread* t;
14 theseven 459
    bool needsswitch = false;
460
 
429 theseven 461
    if (!thread) thread = current_thread;
462
    if (thread->state == THREAD_RUNNING) needsswitch = true;
463
    else if (thread->state == THREAD_BLOCKED)
14 theseven 464
    {
429 theseven 465
        if (thread->block_type == THREAD_BLOCK_MUTEX)
466
            mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
467
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
468
            ((struct wakeup*)thread->blocked_by)->waiter = NULL;
469
    }
470
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
471
    if (t) t->thread_next = thread->thread_next;
472
 
473
    leave_critical_section(mode);
474
 
465 theseven 475
    library_release_all_of_thread(thread);
85 theseven 476
#ifdef HAVE_STORAGE
429 theseven 477
    close_all_of_process(thread);
478
    closedir_all_of_process(thread);
85 theseven 479
#endif
130 theseven 480
#ifdef HAVE_BUTTON
429 theseven 481
    button_unregister_all_of_thread(thread);
130 theseven 482
#endif
429 theseven 483
    free_all_of_thread(thread);
14 theseven 484
 
593 theseven 485
    if (needsswitch) yield();
14 theseven 486
 
429 theseven 487
    return THREAD_OK;
14 theseven 488
}
489
 
541 theseven 490
int thread_terminate(struct scheduler_thread* thread)
491
{
492
    uint32_t mode = enter_critical_section();
493
    return thread_terminate_internal(thread, mode);
494
}
495
 
423 theseven 496
int thread_killlevel(enum thread_type type, bool killself)
497
{
429 theseven 498
    struct scheduler_thread* t;
423 theseven 499
    int count = 0;
541 theseven 500
    while (true)
501
    {
502
        bool found = false;
503
        uint32_t mode = enter_critical_section();
504
        for (t = head_thread; t; t = t->thread_next)
505
            if (t->type <= type && (killself || current_thread != t))
506
            {
507
                thread_terminate_internal(t, mode);
508
                found = true;
509
                count++;
510
                break;
511
            }
512
        if (found) continue;
513
        leave_critical_section(mode);
514
        break;
515
    }
423 theseven 516
    return count;
517
}
518
 
429 theseven 519
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 520
{
429 theseven 521
    return thread->state;
249 theseven 522
}
523
 
14 theseven 524
void thread_exit()
525
{
429 theseven 526
    thread_terminate(NULL);
14 theseven 527
}
71 theseven 528
 
529
int* __errno()
530
{
531
    return &current_thread->err_no;
532
}