Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
465 theseven 30
#include "library.h"
85 theseven 31
#ifdef HAVE_STORAGE
58 theseven 32
#include "dir.h"
33
#include "file.h"
85 theseven 34
#endif
130 theseven 35
#ifdef HAVE_BUTTON
36
#include "button.h"
37
#endif
14 theseven 38
 
39
 
429 theseven 40
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 41
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 42
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 43
uint32_t last_tick IBSS_ATTR;
34 theseven 44
bool scheduler_frozen IBSS_ATTR;
15 theseven 45
extern struct wakeup dbgwakeup;
14 theseven 46
 
47
 
48
void mutex_init(struct mutex* obj)
49
{
50
    memset(obj, 0, sizeof(struct mutex));
51
}
52
 
53
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
54
{
55
    struct scheduler_thread* t;
15 theseven 56
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 57
    {
58
        thread->queue_next = obj->waiters;
59
        obj->waiters = thread;
60
    }
61
    else
62
    {
63
        t = obj->waiters;
64
        while (t->queue_next && t->queue_next->priority > thread->priority)
65
            t = t->queue_next;
66
        thread->queue_next = t->queue_next;
67
        t->queue_next = thread;
68
    }
69
}
70
 
71
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
72
{
73
    struct scheduler_thread* t;
74
    if (!obj->waiters) return;
75
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
76
    else
77
    {
78
        t = obj->waiters;
79
        while (t->queue_next)
80
        {
81
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
82
            t = t->queue_next;
83
        }
84
    }
85
}
86
 
87
int mutex_lock(struct mutex* obj, int timeout)
88
{
89
    int ret = THREAD_OK;
90
    uint32_t mode = enter_critical_section();
91
 
92
    if (!obj->count)
93
    {
94
        obj->count = 1;
95
        obj->owner = current_thread;
96
    }
97
    else if (obj->owner == current_thread) obj->count++;
98
    else
99
    {
100
        if (timeout)
101
        {
102
            current_thread->state = THREAD_BLOCKED;
103
            current_thread->block_type = THREAD_BLOCK_MUTEX;
104
            current_thread->blocked_by = obj;
105
            current_thread->timeout = timeout;
106
            current_thread->blocked_since = USEC_TIMER;
107
            mutex_add_to_queue(obj, current_thread);
108
            leave_critical_section(mode);
593 theseven 109
            yield();
14 theseven 110
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
111
            return THREAD_OK;
112
        }
113
        else ret = THREAD_TIMEOUT;
114
    }
115
 
116
    leave_critical_section(mode);
117
    return ret;
118
}
119
 
120
int mutex_unlock(struct mutex* obj)
121
{
122
    int ret = THREAD_OK;
123
    uint32_t mode = enter_critical_section();
124
 
125
    if (!obj->count)
126
    {
127
        leave_critical_section(mode);
128
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
129
    }
130
 
131
    if (obj->owner != current_thread)
132
    {
133
        leave_critical_section(mode);
134
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
135
    }
136
 
137
    if (--(obj->count)) ret = obj->count;
138
    else if (obj->waiters)
139
    {
140
        obj->count = 1;
141
        obj->owner = obj->waiters;
142
        obj->waiters->state = THREAD_READY;
143
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
144
        obj->waiters->blocked_by = NULL;
145
        obj->waiters->timeout = 0;
146
        obj->waiters = obj->waiters->queue_next;
147
    }
148
 
149
    leave_critical_section(mode);
150
    return ret;
151
}
152
 
153
void wakeup_init(struct wakeup* obj)
154
{
155
    memset(obj, 0, sizeof(struct wakeup));
156
}
157
 
158
int wakeup_wait(struct wakeup* obj, int timeout)
159
{
160
    int ret = THREAD_OK;
161
    uint32_t mode = enter_critical_section();
162
 
163
    if (obj->waiter)
164
    {
165
        leave_critical_section(mode);
166
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
167
    }
168
 
169
    if (obj->signalled) obj->signalled = false;
170
    else
171
    {
172
        if (timeout)
173
        {
174
            current_thread->state = THREAD_BLOCKED;
175
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
176
            current_thread->blocked_by = obj;
177
            current_thread->timeout = timeout;
178
            current_thread->blocked_since = USEC_TIMER;
179
            obj->waiter = current_thread;
180
            leave_critical_section(mode);
593 theseven 181
            yield();
15 theseven 182
            obj->waiter = NULL;
14 theseven 183
            if (!obj->signalled) return THREAD_TIMEOUT;
184
            obj->signalled = false;
185
            return THREAD_OK;
186
        }
187
        else ret = THREAD_TIMEOUT;
188
    }
189
 
190
    leave_critical_section(mode);
191
    return ret;
192
}
193
 
194
int wakeup_signal(struct wakeup* obj)
195
{
196
    int ret = THREAD_OK;
197
    uint32_t mode = enter_critical_section();
198
 
199
    obj->signalled = true;
200
    if (obj->waiter)
201
    {
202
        obj->waiter->state = THREAD_READY;
203
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
204
        obj->waiter->blocked_by = NULL;
205
        obj->waiter->timeout = 0;
206
        ret = THREAD_FOUND;
207
    }
208
 
209
    leave_critical_section(mode);
210
    return ret;
211
}
212
 
213
void sleep(int usecs)
214
{
15 theseven 215
    if (usecs)
216
    {
217
        uint32_t mode = enter_critical_section();
218
        current_thread->state = THREAD_BLOCKED;
219
        current_thread->block_type = THREAD_BLOCK_SLEEP;
220
        current_thread->timeout = usecs;
221
        current_thread->blocked_since = USEC_TIMER;
222
        leave_critical_section(mode);
223
    }
593 theseven 224
    yield();
14 theseven 225
}
226
 
227
void scheduler_init(void)
228
{
429 theseven 229
    last_tick = USEC_TIMER;
34 theseven 230
    scheduler_frozen = false;
429 theseven 231
    head_thread = &idle_thread;
232
    current_thread = &idle_thread;
233
    memset(&idle_thread, 0, sizeof(idle_thread));
234
    idle_thread.state = THREAD_RUNNING;
235
    idle_thread.startusec = last_tick;
542 theseven 236
    idle_thread.type = CORE_THREAD;
429 theseven 237
    idle_thread.name = "idle thread";
238
    idle_thread.stack = (uint32_t*)-1;
14 theseven 239
    setup_tick();
240
}
241
 
54 theseven 242
bool scheduler_freeze(bool value)
34 theseven 243
{
54 theseven 244
    bool old = scheduler_frozen;
34 theseven 245
    scheduler_frozen = value;
54 theseven 246
    return old;
34 theseven 247
}
248
 
389 theseven 249
void scheduler_pause_accounting()
250
{
251
    uint32_t usec = USEC_TIMER;
252
    current_thread->cputime_total += usec - current_thread->startusec;
253
    current_thread->cputime_current += usec - current_thread->startusec;
254
}
255
 
256
void scheduler_resume_accounting()
257
{
258
    current_thread->startusec = USEC_TIMER;
259
}
260
 
595 theseven 261
void scheduler_switch(struct scheduler_thread* thread, struct scheduler_thread* block)
14 theseven 262
{
429 theseven 263
    struct scheduler_thread* t;
14 theseven 264
    uint32_t score, best;
265
    uint32_t usec = USEC_TIMER;
266
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
267
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 268
    {
429 theseven 269
        for (t = head_thread; t; t = t->thread_next)
270
            if (t->type == USER_THREAD)
271
                t->state = THREAD_SUSPENDED;
15 theseven 272
        current_thread->state = THREAD_DEFUNCT;
273
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
274
        wakeup_signal(&dbgwakeup);
275
    }
14 theseven 276
    if (usec - last_tick > SCHEDULER_TICK)
277
    {
392 theseven 278
        uint32_t diff = usec - last_tick;
15 theseven 279
        last_tick = usec;
429 theseven 280
        for (t = head_thread; t; t = t->thread_next)
14 theseven 281
        {
429 theseven 282
            t->cpuload = 255 * t->cputime_current / diff;
283
            t->cputime_current = 0;
14 theseven 284
        }
285
    }
286
 
437 theseven 287
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 288
    else
289
    {
429 theseven 290
        for (t = head_thread; t; t = t->thread_next)
291
            if (t->state == THREAD_BLOCKED && t->timeout != -1
292
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 293
            {
429 theseven 294
                if (t->block_type == THREAD_BLOCK_MUTEX)
295
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
296
                t->state = THREAD_READY;
297
                t->block_type = THREAD_NOT_BLOCKED;
298
                t->blocked_by = NULL;
299
                t->timeout = 0;
34 theseven 300
            }
301
 
595 theseven 302
        if (!thread || thread->state != THREAD_READY)
34 theseven 303
        {
437 theseven 304
            thread = &idle_thread;
34 theseven 305
            best = 0xffffffff;
429 theseven 306
            for (t = head_thread; t; t = t->thread_next)
307
                if (t->state == THREAD_READY && t->priority)
14 theseven 308
                {
595 theseven 309
                    if (t == block) score = 0xfffffffe;
310
                    else score = t->cputime_current / t->priority;
34 theseven 311
                    if (score < best)
312
                    {
313
                        best = score;
429 theseven 314
                        thread = t;
34 theseven 315
                    }
14 theseven 316
                }
34 theseven 317
        }
14 theseven 318
    }
319
 
429 theseven 320
    current_thread = thread;
14 theseven 321
    current_thread->state = THREAD_RUNNING;
322
}
323
 
429 theseven 324
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
325
                                       const void* code, void* stack, int stacksize,
326
                                       enum thread_type type, int priority, bool run)
14 theseven 327
{
429 theseven 328
    bool stack_alloced = false;
437 theseven 329
    bool thread_alloced = false;
429 theseven 330
    if (!stack)
331
    {
332
        stack = malloc(stacksize);
333
        stack_alloced = true;
334
    }
335
    if (!stack) return NULL;
336
    if (!thread)
337
    {
437 theseven 338
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
339
        thread_alloced = true;
340
    }
341
    if (!thread)
342
    {
429 theseven 343
        if (stack_alloced) free(stack);
344
        return NULL;
345
    }
437 theseven 346
    if (thread_alloced) reownalloc(thread, thread);
347
    if (stack_alloced) reownalloc(stack, thread);
429 theseven 348
 
14 theseven 349
    int i;
350
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
351
 
429 theseven 352
    memset(thread, 0, sizeof(struct scheduler_thread));
353
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
354
    thread->type = type;
355
    thread->name = name;
356
    thread->priority = priority;
357
    thread->cpsr = 0x1f;
358
    thread->regs[15] = (uint32_t)code;
359
    thread->regs[14] = (uint32_t)thread_exit;
360
    thread->regs[13] = (uint32_t)stack + stacksize;
361
    thread->stack = stack;
362
 
14 theseven 363
    uint32_t mode = enter_critical_section();
489 theseven 364
    thread->thread_next = head_thread->thread_next;
365
    head_thread->thread_next = thread;
429 theseven 366
    leave_critical_section(mode);
14 theseven 367
 
429 theseven 368
    return thread;
14 theseven 369
}
370
 
429 theseven 371
int thread_suspend(struct scheduler_thread* thread)
14 theseven 372
{
373
    int ret = THREAD_OK;
374
    bool needsswitch = false;
375
    uint32_t mode = enter_critical_section();
376
 
429 theseven 377
    if (!thread) thread = current_thread;
378
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 379
    if (ret == THREAD_OK)
380
    {
429 theseven 381
        if (thread->state == THREAD_RUNNING) needsswitch = true;
382
        else if (thread->state == THREAD_BLOCKED)
14 theseven 383
        {
429 theseven 384
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 385
            {
429 theseven 386
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 387
            }
429 theseven 388
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 389
            {
429 theseven 390
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
391
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 392
            }
429 theseven 393
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 394
            {
429 theseven 395
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 396
            }
14 theseven 397
        }
429 theseven 398
        thread->state = THREAD_SUSPENDED;
14 theseven 399
    }
400
 
401
    leave_critical_section(mode);
402
 
593 theseven 403
    if (needsswitch) yield();
14 theseven 404
 
405
    return ret;
406
}
407
 
429 theseven 408
int thread_resume(struct scheduler_thread* thread)
14 theseven 409
{
410
    int ret = THREAD_OK;
411
    bool needsswitch = false;
412
    uint32_t mode = enter_critical_section();
413
 
429 theseven 414
    if (!thread) thread = current_thread;
415
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 416
    if (ret == THREAD_OK)
417
    {
429 theseven 418
        if (thread->block_type == THREAD_BLOCK_SLEEP)
419
            thread->blocked_since = USEC_TIMER;
420
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 421
        {
429 theseven 422
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
423
            thread->blocked_since = USEC_TIMER;
424
            thread->state = THREAD_BLOCKED;
14 theseven 425
        }
429 theseven 426
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 427
        {
429 theseven 428
            thread->blocked_since = USEC_TIMER;
429
            thread->state = THREAD_BLOCKED;
14 theseven 430
        }
429 theseven 431
        else thread->state = THREAD_READY;
14 theseven 432
    }
433
 
434
    leave_critical_section(mode);
435
    return ret;
436
}
437
 
453 theseven 438
void thread_set_name(struct scheduler_thread* thread, char* name)
439
{
440
    uint32_t mode = enter_critical_section();
441
    if (!thread) thread = current_thread;
442
    thread->name = name;
443
    leave_critical_section(mode);
444
}
445
 
446
void thread_set_priority(struct scheduler_thread* thread, int priority)
447
{
448
    uint32_t mode = enter_critical_section();
449
    if (!thread) thread = current_thread;
450
    thread->priority = priority;
451
    leave_critical_section(mode);
452
}
453
 
541 theseven 454
int thread_terminate_internal(struct scheduler_thread* thread, uint32_t mode)
14 theseven 455
{
429 theseven 456
    struct scheduler_thread* t;
14 theseven 457
    bool needsswitch = false;
458
 
429 theseven 459
    if (!thread) thread = current_thread;
460
    if (thread->state == THREAD_RUNNING) needsswitch = true;
461
    else if (thread->state == THREAD_BLOCKED)
14 theseven 462
    {
429 theseven 463
        if (thread->block_type == THREAD_BLOCK_MUTEX)
464
            mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
465
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
466
            ((struct wakeup*)thread->blocked_by)->waiter = NULL;
467
    }
468
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
469
    if (t) t->thread_next = thread->thread_next;
470
 
471
    leave_critical_section(mode);
472
 
465 theseven 473
    library_release_all_of_thread(thread);
85 theseven 474
#ifdef HAVE_STORAGE
429 theseven 475
    close_all_of_process(thread);
476
    closedir_all_of_process(thread);
85 theseven 477
#endif
130 theseven 478
#ifdef HAVE_BUTTON
429 theseven 479
    button_unregister_all_of_thread(thread);
130 theseven 480
#endif
429 theseven 481
    free_all_of_thread(thread);
14 theseven 482
 
593 theseven 483
    if (needsswitch) yield();
14 theseven 484
 
429 theseven 485
    return THREAD_OK;
14 theseven 486
}
487
 
541 theseven 488
int thread_terminate(struct scheduler_thread* thread)
489
{
490
    uint32_t mode = enter_critical_section();
491
    return thread_terminate_internal(thread, mode);
492
}
493
 
423 theseven 494
int thread_killlevel(enum thread_type type, bool killself)
495
{
429 theseven 496
    struct scheduler_thread* t;
423 theseven 497
    int count = 0;
541 theseven 498
    while (true)
499
    {
500
        bool found = false;
501
        uint32_t mode = enter_critical_section();
502
        for (t = head_thread; t; t = t->thread_next)
503
            if (t->type <= type && (killself || current_thread != t))
504
            {
505
                thread_terminate_internal(t, mode);
506
                found = true;
507
                count++;
508
                break;
509
            }
510
        if (found) continue;
511
        leave_critical_section(mode);
512
        break;
513
    }
423 theseven 514
    return count;
515
}
516
 
429 theseven 517
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 518
{
429 theseven 519
    return thread->state;
249 theseven 520
}
521
 
14 theseven 522
void thread_exit()
523
{
429 theseven 524
    thread_terminate(NULL);
14 theseven 525
}
71 theseven 526
 
527
int* __errno()
528
{
529
    return &current_thread->err_no;
530
}