Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
465 theseven 30
#include "library.h"
85 theseven 31
#ifdef HAVE_STORAGE
58 theseven 32
#include "dir.h"
33
#include "file.h"
85 theseven 34
#endif
130 theseven 35
#ifdef HAVE_BUTTON
36
#include "button.h"
37
#endif
14 theseven 38
 
39
 
429 theseven 40
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 41
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 42
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 43
uint32_t last_tick IBSS_ATTR;
34 theseven 44
bool scheduler_frozen IBSS_ATTR;
15 theseven 45
extern struct wakeup dbgwakeup;
14 theseven 46
 
47
 
48
void mutex_init(struct mutex* obj)
49
{
50
    memset(obj, 0, sizeof(struct mutex));
51
}
52
 
53
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
54
{
55
    struct scheduler_thread* t;
15 theseven 56
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 57
    {
58
        thread->queue_next = obj->waiters;
59
        obj->waiters = thread;
60
    }
61
    else
62
    {
63
        t = obj->waiters;
64
        while (t->queue_next && t->queue_next->priority > thread->priority)
65
            t = t->queue_next;
66
        thread->queue_next = t->queue_next;
67
        t->queue_next = thread;
68
    }
69
}
70
 
71
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
72
{
73
    struct scheduler_thread* t;
74
    if (!obj->waiters) return;
75
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
76
    else
77
    {
78
        t = obj->waiters;
79
        while (t->queue_next)
80
        {
81
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
82
            t = t->queue_next;
83
        }
84
    }
85
}
86
 
87
int mutex_lock(struct mutex* obj, int timeout)
88
{
89
    int ret = THREAD_OK;
90
    uint32_t mode = enter_critical_section();
91
 
92
    if (!obj->count)
93
    {
94
        obj->count = 1;
95
        obj->owner = current_thread;
691 theseven 96
        obj->owned_next = current_thread->owned_mutexes;
97
        current_thread->owned_mutexes = obj;
14 theseven 98
    }
99
    else if (obj->owner == current_thread) obj->count++;
100
    else
101
    {
102
        if (timeout)
103
        {
104
            current_thread->state = THREAD_BLOCKED;
105
            current_thread->block_type = THREAD_BLOCK_MUTEX;
106
            current_thread->blocked_by = obj;
107
            current_thread->timeout = timeout;
108
            current_thread->blocked_since = USEC_TIMER;
109
            mutex_add_to_queue(obj, current_thread);
110
            leave_critical_section(mode);
593 theseven 111
            yield();
14 theseven 112
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
113
            return THREAD_OK;
114
        }
115
        else ret = THREAD_TIMEOUT;
116
    }
117
 
118
    leave_critical_section(mode);
119
    return ret;
120
}
121
 
691 theseven 122
void mutex_unlock_internal(struct mutex* obj)
123
{
124
    struct mutex* o;
125
    if (!obj->owner->owned_mutexes) return;
126
    if (obj->owner->owned_mutexes == obj) obj->owner->owned_mutexes = obj->owned_next;
127
    else
128
    {
129
        o = obj->owner->owned_mutexes;
130
        while (o->owned_next)
131
        {
132
            if (o->owned_next == obj) o->owned_next = obj->owned_next;
133
            o = o->owned_next;
134
        }
135
    }
136
    if (obj->waiters)
137
    {
138
        obj->count = 1;
139
        obj->owner = obj->waiters;
140
        obj->waiters->state = THREAD_READY;
141
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
142
        obj->waiters->blocked_by = NULL;
143
        obj->waiters->timeout = 0;
144
        obj->waiters = obj->waiters->queue_next;
145
        obj->owned_next = obj->owner->owned_mutexes;
146
        obj->owner->owned_mutexes = obj;
147
    }
148
    else obj->count = 0;
149
}
150
 
14 theseven 151
int mutex_unlock(struct mutex* obj)
152
{
153
    int ret = THREAD_OK;
154
    uint32_t mode = enter_critical_section();
155
 
156
    if (!obj->count)
157
    {
158
        leave_critical_section(mode);
159
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
160
    }
161
 
162
    if (obj->owner != current_thread)
163
    {
164
        leave_critical_section(mode);
165
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
166
    }
167
    if (--(obj->count)) ret = obj->count;
691 theseven 168
    else mutex_unlock_internal(obj);
14 theseven 169
 
170
    leave_critical_section(mode);
171
    return ret;
172
}
173
 
174
void wakeup_init(struct wakeup* obj)
175
{
176
    memset(obj, 0, sizeof(struct wakeup));
177
}
178
 
179
int wakeup_wait(struct wakeup* obj, int timeout)
180
{
181
    int ret = THREAD_OK;
182
    uint32_t mode = enter_critical_section();
183
 
184
    if (obj->waiter)
185
    {
186
        leave_critical_section(mode);
619 theseven 187
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting for single wakeup! (%08X)", obj);
14 theseven 188
    }
189
 
190
    if (obj->signalled) obj->signalled = false;
191
    else
192
    {
193
        if (timeout)
194
        {
195
            current_thread->state = THREAD_BLOCKED;
196
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
197
            current_thread->blocked_by = obj;
198
            current_thread->timeout = timeout;
199
            current_thread->blocked_since = USEC_TIMER;
200
            obj->waiter = current_thread;
201
            leave_critical_section(mode);
593 theseven 202
            yield();
15 theseven 203
            obj->waiter = NULL;
14 theseven 204
            if (!obj->signalled) return THREAD_TIMEOUT;
205
            obj->signalled = false;
206
            return THREAD_OK;
207
        }
208
        else ret = THREAD_TIMEOUT;
209
    }
210
 
211
    leave_critical_section(mode);
212
    return ret;
213
}
214
 
215
int wakeup_signal(struct wakeup* obj)
216
{
217
    int ret = THREAD_OK;
218
    uint32_t mode = enter_critical_section();
219
 
220
    obj->signalled = true;
221
    if (obj->waiter)
222
    {
223
        obj->waiter->state = THREAD_READY;
224
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
225
        obj->waiter->blocked_by = NULL;
226
        obj->waiter->timeout = 0;
227
        ret = THREAD_FOUND;
596 theseven 228
        if (current_thread == &idle_thread)
229
            scheduler_switch(obj->waiter, NULL);
14 theseven 230
    }
231
 
232
    leave_critical_section(mode);
233
    return ret;
234
}
235
 
236
void sleep(int usecs)
237
{
15 theseven 238
    if (usecs)
239
    {
240
        uint32_t mode = enter_critical_section();
241
        current_thread->state = THREAD_BLOCKED;
242
        current_thread->block_type = THREAD_BLOCK_SLEEP;
243
        current_thread->timeout = usecs;
244
        current_thread->blocked_since = USEC_TIMER;
245
        leave_critical_section(mode);
246
    }
593 theseven 247
    yield();
14 theseven 248
}
249
 
250
void scheduler_init(void)
251
{
429 theseven 252
    last_tick = USEC_TIMER;
34 theseven 253
    scheduler_frozen = false;
429 theseven 254
    head_thread = &idle_thread;
255
    current_thread = &idle_thread;
256
    memset(&idle_thread, 0, sizeof(idle_thread));
257
    idle_thread.state = THREAD_RUNNING;
258
    idle_thread.startusec = last_tick;
542 theseven 259
    idle_thread.type = CORE_THREAD;
429 theseven 260
    idle_thread.name = "idle thread";
261
    idle_thread.stack = (uint32_t*)-1;
14 theseven 262
}
263
 
54 theseven 264
bool scheduler_freeze(bool value)
34 theseven 265
{
54 theseven 266
    bool old = scheduler_frozen;
34 theseven 267
    scheduler_frozen = value;
54 theseven 268
    return old;
34 theseven 269
}
270
 
389 theseven 271
void scheduler_pause_accounting()
272
{
273
    uint32_t usec = USEC_TIMER;
274
    current_thread->cputime_total += usec - current_thread->startusec;
275
    current_thread->cputime_current += usec - current_thread->startusec;
276
}
277
 
278
void scheduler_resume_accounting()
279
{
280
    current_thread->startusec = USEC_TIMER;
281
}
282
 
595 theseven 283
void scheduler_switch(struct scheduler_thread* thread, struct scheduler_thread* block)
14 theseven 284
{
429 theseven 285
    struct scheduler_thread* t;
14 theseven 286
    uint32_t score, best;
287
    uint32_t usec = USEC_TIMER;
288
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
289
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 290
    {
429 theseven 291
        for (t = head_thread; t; t = t->thread_next)
292
            if (t->type == USER_THREAD)
293
                t->state = THREAD_SUSPENDED;
15 theseven 294
        current_thread->state = THREAD_DEFUNCT;
295
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
296
        wakeup_signal(&dbgwakeup);
297
    }
597 theseven 298
 
299
    timer_kill_wakeup();
300
 
14 theseven 301
    if (usec - last_tick > SCHEDULER_TICK)
302
    {
392 theseven 303
        uint32_t diff = usec - last_tick;
15 theseven 304
        last_tick = usec;
429 theseven 305
        for (t = head_thread; t; t = t->thread_next)
14 theseven 306
        {
429 theseven 307
            t->cpuload = 255 * t->cputime_current / diff;
308
            t->cputime_current = 0;
14 theseven 309
        }
310
    }
311
 
597 theseven 312
    uint32_t next_unblock = 0xffffffff;
437 theseven 313
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 314
    else
315
    {
429 theseven 316
        for (t = head_thread; t; t = t->thread_next)
597 theseven 317
        {
429 theseven 318
            if (t->state == THREAD_BLOCKED && t->timeout != -1
319
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 320
            {
429 theseven 321
                if (t->block_type == THREAD_BLOCK_MUTEX)
322
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
323
                t->state = THREAD_READY;
324
                t->block_type = THREAD_NOT_BLOCKED;
325
                t->blocked_by = NULL;
326
                t->timeout = 0;
34 theseven 327
            }
597 theseven 328
            else if (t->state == THREAD_BLOCKED && t->timeout != -1)
329
            {
330
                uint32_t left = t->blocked_since + t->timeout - usec;
331
                if (left < next_unblock) next_unblock = left;
332
            }
333
        }
34 theseven 334
 
595 theseven 335
        if (!thread || thread->state != THREAD_READY)
34 theseven 336
        {
437 theseven 337
            thread = &idle_thread;
34 theseven 338
            best = 0xffffffff;
429 theseven 339
            for (t = head_thread; t; t = t->thread_next)
340
                if (t->state == THREAD_READY && t->priority)
14 theseven 341
                {
595 theseven 342
                    if (t == block) score = 0xfffffffe;
343
                    else score = t->cputime_current / t->priority;
34 theseven 344
                    if (score < best)
345
                    {
346
                        best = score;
429 theseven 347
                        thread = t;
34 theseven 348
                    }
14 theseven 349
                }
34 theseven 350
        }
597 theseven 351
 
352
        if (thread == &idle_thread) timer_schedule_wakeup(next_unblock);
353
        else timer_schedule_wakeup(SYSTEM_TICK);
14 theseven 354
    }
355
 
429 theseven 356
    current_thread = thread;
14 theseven 357
    current_thread->state = THREAD_RUNNING;
358
}
359
 
429 theseven 360
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
361
                                       const void* code, void* stack, int stacksize,
362
                                       enum thread_type type, int priority, bool run)
14 theseven 363
{
429 theseven 364
    bool stack_alloced = false;
437 theseven 365
    bool thread_alloced = false;
429 theseven 366
    if (!stack)
367
    {
368
        stack = malloc(stacksize);
369
        stack_alloced = true;
370
    }
371
    if (!stack) return NULL;
372
    if (!thread)
373
    {
437 theseven 374
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
375
        thread_alloced = true;
376
    }
377
    if (!thread)
378
    {
429 theseven 379
        if (stack_alloced) free(stack);
380
        return NULL;
381
    }
814 theseven 382
    if (thread_alloced) reownalloc(thread, OWNER_TYPE(OWNER_THREAD, thread));
383
    if (stack_alloced) reownalloc(stack, OWNER_TYPE(OWNER_THREAD, thread));
429 theseven 384
 
14 theseven 385
    int i;
386
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
387
 
429 theseven 388
    memset(thread, 0, sizeof(struct scheduler_thread));
389
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
390
    thread->type = type;
391
    thread->name = name;
392
    thread->priority = priority;
393
    thread->cpsr = 0x1f;
394
    thread->regs[15] = (uint32_t)code;
395
    thread->regs[14] = (uint32_t)thread_exit;
396
    thread->regs[13] = (uint32_t)stack + stacksize;
397
    thread->stack = stack;
398
 
14 theseven 399
    uint32_t mode = enter_critical_section();
489 theseven 400
    thread->thread_next = head_thread->thread_next;
401
    head_thread->thread_next = thread;
429 theseven 402
    leave_critical_section(mode);
14 theseven 403
 
429 theseven 404
    return thread;
14 theseven 405
}
406
 
429 theseven 407
int thread_suspend(struct scheduler_thread* thread)
14 theseven 408
{
409
    int ret = THREAD_OK;
410
    bool needsswitch = false;
411
    uint32_t mode = enter_critical_section();
412
 
429 theseven 413
    if (!thread) thread = current_thread;
414
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 415
    if (ret == THREAD_OK)
416
    {
429 theseven 417
        if (thread->state == THREAD_RUNNING) needsswitch = true;
418
        else if (thread->state == THREAD_BLOCKED)
14 theseven 419
        {
429 theseven 420
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 421
            {
429 theseven 422
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 423
            }
429 theseven 424
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 425
            {
429 theseven 426
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
427
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 428
            }
429 theseven 429
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 430
            {
429 theseven 431
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 432
            }
14 theseven 433
        }
429 theseven 434
        thread->state = THREAD_SUSPENDED;
14 theseven 435
    }
436
 
437
    leave_critical_section(mode);
438
 
593 theseven 439
    if (needsswitch) yield();
14 theseven 440
 
441
    return ret;
442
}
443
 
429 theseven 444
int thread_resume(struct scheduler_thread* thread)
14 theseven 445
{
446
    int ret = THREAD_OK;
447
    bool needsswitch = false;
448
    uint32_t mode = enter_critical_section();
449
 
429 theseven 450
    if (!thread) thread = current_thread;
451
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 452
    if (ret == THREAD_OK)
453
    {
429 theseven 454
        if (thread->block_type == THREAD_BLOCK_SLEEP)
455
            thread->blocked_since = USEC_TIMER;
456
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 457
        {
429 theseven 458
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
459
            thread->blocked_since = USEC_TIMER;
460
            thread->state = THREAD_BLOCKED;
14 theseven 461
        }
429 theseven 462
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 463
        {
429 theseven 464
            thread->blocked_since = USEC_TIMER;
465
            thread->state = THREAD_BLOCKED;
14 theseven 466
        }
429 theseven 467
        else thread->state = THREAD_READY;
14 theseven 468
    }
469
 
470
    leave_critical_section(mode);
471
    return ret;
472
}
473
 
453 theseven 474
void thread_set_name(struct scheduler_thread* thread, char* name)
475
{
476
    uint32_t mode = enter_critical_section();
477
    if (!thread) thread = current_thread;
478
    thread->name = name;
479
    leave_critical_section(mode);
480
}
481
 
482
void thread_set_priority(struct scheduler_thread* thread, int priority)
483
{
484
    uint32_t mode = enter_critical_section();
485
    if (!thread) thread = current_thread;
486
    thread->priority = priority;
487
    leave_critical_section(mode);
488
}
489
 
541 theseven 490
int thread_terminate_internal(struct scheduler_thread* thread, uint32_t mode)
14 theseven 491
{
429 theseven 492
    struct scheduler_thread* t;
14 theseven 493
    bool needsswitch = false;
494
 
429 theseven 495
    if (!thread) thread = current_thread;
496
    if (thread->state == THREAD_RUNNING) needsswitch = true;
688 theseven 497
    else
14 theseven 498
    {
688 theseven 499
        if (thread->state == THREAD_BLOCKED)
500
        {
501
            if (thread->block_type == THREAD_BLOCK_MUTEX)
502
                mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
503
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
504
                ((struct wakeup*)thread->blocked_by)->waiter = NULL;
505
        }
506
        thread->state = THREAD_SUSPENDED;
429 theseven 507
    }
508
 
691 theseven 509
    struct mutex* m;
510
    for (m = thread->owned_mutexes; m; m = m->owned_next)
511
        mutex_unlock_internal(m);
512
 
429 theseven 513
    leave_critical_section(mode);
514
 
814 theseven 515
    library_release_all_of_thread(OWNER_TYPE(OWNER_THREAD, thread));
85 theseven 516
#ifdef HAVE_STORAGE
814 theseven 517
    close_all_of_process(OWNER_TYPE(OWNER_THREAD, thread));
518
    closedir_all_of_process(OWNER_TYPE(OWNER_THREAD, thread));
85 theseven 519
#endif
130 theseven 520
#ifdef HAVE_BUTTON
814 theseven 521
    button_unregister_all_of_thread(OWNER_TYPE(OWNER_THREAD, thread));
130 theseven 522
#endif
14 theseven 523
 
818 theseven 524
    malloc_lock();
688 theseven 525
    mode = enter_critical_section();
526
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
527
    if (t) t->thread_next = thread->thread_next;
818 theseven 528
    free_all_of_thread(OWNER_TYPE(OWNER_THREAD, thread));
529
    malloc_unlock();
530
    if (needsswitch) leave_thread();
688 theseven 531
    leave_critical_section(mode);
532
 
429 theseven 533
    return THREAD_OK;
14 theseven 534
}
535
 
541 theseven 536
int thread_terminate(struct scheduler_thread* thread)
537
{
538
    uint32_t mode = enter_critical_section();
539
    return thread_terminate_internal(thread, mode);
540
}
541
 
423 theseven 542
int thread_killlevel(enum thread_type type, bool killself)
543
{
429 theseven 544
    struct scheduler_thread* t;
423 theseven 545
    int count = 0;
541 theseven 546
    while (true)
547
    {
548
        bool found = false;
549
        uint32_t mode = enter_critical_section();
550
        for (t = head_thread; t; t = t->thread_next)
688 theseven 551
            if (t->type <= type && current_thread != t)
541 theseven 552
            {
553
                thread_terminate_internal(t, mode);
554
                found = true;
555
                count++;
556
                break;
557
            }
558
        if (found) continue;
559
        leave_critical_section(mode);
560
        break;
561
    }
688 theseven 562
    if (killself) thread_exit();
423 theseven 563
    return count;
564
}
565
 
429 theseven 566
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 567
{
429 theseven 568
    return thread->state;
249 theseven 569
}
570
 
14 theseven 571
void thread_exit()
572
{
429 theseven 573
    thread_terminate(NULL);
14 theseven 574
}
71 theseven 575
 
576
int* __errno()
577
{
578
    return &current_thread->err_no;
579
}