Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
465 theseven 30
#include "library.h"
85 theseven 31
#ifdef HAVE_STORAGE
58 theseven 32
#include "dir.h"
33
#include "file.h"
85 theseven 34
#endif
130 theseven 35
#ifdef HAVE_BUTTON
36
#include "button.h"
37
#endif
14 theseven 38
 
39
 
429 theseven 40
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 41
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 42
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 43
uint32_t last_tick IBSS_ATTR;
34 theseven 44
bool scheduler_frozen IBSS_ATTR;
15 theseven 45
extern struct wakeup dbgwakeup;
14 theseven 46
 
47
 
48
void mutex_init(struct mutex* obj)
49
{
50
    memset(obj, 0, sizeof(struct mutex));
51
}
52
 
53
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
54
{
55
    struct scheduler_thread* t;
15 theseven 56
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 57
    {
58
        thread->queue_next = obj->waiters;
59
        obj->waiters = thread;
60
    }
61
    else
62
    {
63
        t = obj->waiters;
64
        while (t->queue_next && t->queue_next->priority > thread->priority)
65
            t = t->queue_next;
66
        thread->queue_next = t->queue_next;
67
        t->queue_next = thread;
68
    }
69
}
70
 
71
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
72
{
73
    struct scheduler_thread* t;
74
    if (!obj->waiters) return;
75
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
76
    else
77
    {
78
        t = obj->waiters;
79
        while (t->queue_next)
80
        {
81
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
82
            t = t->queue_next;
83
        }
84
    }
85
}
86
 
87
int mutex_lock(struct mutex* obj, int timeout)
88
{
89
    int ret = THREAD_OK;
90
    uint32_t mode = enter_critical_section();
91
 
92
    if (!obj->count)
93
    {
94
        obj->count = 1;
95
        obj->owner = current_thread;
96
    }
97
    else if (obj->owner == current_thread) obj->count++;
98
    else
99
    {
100
        if (timeout)
101
        {
102
            current_thread->state = THREAD_BLOCKED;
103
            current_thread->block_type = THREAD_BLOCK_MUTEX;
104
            current_thread->blocked_by = obj;
105
            current_thread->timeout = timeout;
106
            current_thread->blocked_since = USEC_TIMER;
107
            mutex_add_to_queue(obj, current_thread);
108
            leave_critical_section(mode);
593 theseven 109
            yield();
14 theseven 110
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
111
            return THREAD_OK;
112
        }
113
        else ret = THREAD_TIMEOUT;
114
    }
115
 
116
    leave_critical_section(mode);
117
    return ret;
118
}
119
 
120
int mutex_unlock(struct mutex* obj)
121
{
122
    int ret = THREAD_OK;
123
    uint32_t mode = enter_critical_section();
124
 
125
    if (!obj->count)
126
    {
127
        leave_critical_section(mode);
128
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
129
    }
130
 
131
    if (obj->owner != current_thread)
132
    {
133
        leave_critical_section(mode);
134
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
135
    }
136
 
137
    if (--(obj->count)) ret = obj->count;
138
    else if (obj->waiters)
139
    {
140
        obj->count = 1;
141
        obj->owner = obj->waiters;
142
        obj->waiters->state = THREAD_READY;
143
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
144
        obj->waiters->blocked_by = NULL;
145
        obj->waiters->timeout = 0;
146
        obj->waiters = obj->waiters->queue_next;
147
    }
148
 
149
    leave_critical_section(mode);
150
    return ret;
151
}
152
 
153
void wakeup_init(struct wakeup* obj)
154
{
155
    memset(obj, 0, sizeof(struct wakeup));
156
}
157
 
158
int wakeup_wait(struct wakeup* obj, int timeout)
159
{
160
    int ret = THREAD_OK;
161
    uint32_t mode = enter_critical_section();
162
 
163
    if (obj->waiter)
164
    {
165
        leave_critical_section(mode);
619 theseven 166
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting for single wakeup! (%08X)", obj);
14 theseven 167
    }
168
 
169
    if (obj->signalled) obj->signalled = false;
170
    else
171
    {
172
        if (timeout)
173
        {
174
            current_thread->state = THREAD_BLOCKED;
175
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
176
            current_thread->blocked_by = obj;
177
            current_thread->timeout = timeout;
178
            current_thread->blocked_since = USEC_TIMER;
179
            obj->waiter = current_thread;
180
            leave_critical_section(mode);
593 theseven 181
            yield();
15 theseven 182
            obj->waiter = NULL;
14 theseven 183
            if (!obj->signalled) return THREAD_TIMEOUT;
184
            obj->signalled = false;
185
            return THREAD_OK;
186
        }
187
        else ret = THREAD_TIMEOUT;
188
    }
189
 
190
    leave_critical_section(mode);
191
    return ret;
192
}
193
 
194
int wakeup_signal(struct wakeup* obj)
195
{
196
    int ret = THREAD_OK;
197
    uint32_t mode = enter_critical_section();
198
 
199
    obj->signalled = true;
200
    if (obj->waiter)
201
    {
202
        obj->waiter->state = THREAD_READY;
203
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
204
        obj->waiter->blocked_by = NULL;
205
        obj->waiter->timeout = 0;
206
        ret = THREAD_FOUND;
596 theseven 207
        if (current_thread == &idle_thread)
208
            scheduler_switch(obj->waiter, NULL);
14 theseven 209
    }
210
 
211
    leave_critical_section(mode);
212
    return ret;
213
}
214
 
215
void sleep(int usecs)
216
{
15 theseven 217
    if (usecs)
218
    {
219
        uint32_t mode = enter_critical_section();
220
        current_thread->state = THREAD_BLOCKED;
221
        current_thread->block_type = THREAD_BLOCK_SLEEP;
222
        current_thread->timeout = usecs;
223
        current_thread->blocked_since = USEC_TIMER;
224
        leave_critical_section(mode);
225
    }
593 theseven 226
    yield();
14 theseven 227
}
228
 
229
void scheduler_init(void)
230
{
429 theseven 231
    last_tick = USEC_TIMER;
34 theseven 232
    scheduler_frozen = false;
429 theseven 233
    head_thread = &idle_thread;
234
    current_thread = &idle_thread;
235
    memset(&idle_thread, 0, sizeof(idle_thread));
236
    idle_thread.state = THREAD_RUNNING;
237
    idle_thread.startusec = last_tick;
542 theseven 238
    idle_thread.type = CORE_THREAD;
429 theseven 239
    idle_thread.name = "idle thread";
240
    idle_thread.stack = (uint32_t*)-1;
14 theseven 241
}
242
 
54 theseven 243
bool scheduler_freeze(bool value)
34 theseven 244
{
54 theseven 245
    bool old = scheduler_frozen;
34 theseven 246
    scheduler_frozen = value;
54 theseven 247
    return old;
34 theseven 248
}
249
 
389 theseven 250
void scheduler_pause_accounting()
251
{
252
    uint32_t usec = USEC_TIMER;
253
    current_thread->cputime_total += usec - current_thread->startusec;
254
    current_thread->cputime_current += usec - current_thread->startusec;
255
}
256
 
257
void scheduler_resume_accounting()
258
{
259
    current_thread->startusec = USEC_TIMER;
260
}
261
 
595 theseven 262
void scheduler_switch(struct scheduler_thread* thread, struct scheduler_thread* block)
14 theseven 263
{
429 theseven 264
    struct scheduler_thread* t;
14 theseven 265
    uint32_t score, best;
266
    uint32_t usec = USEC_TIMER;
267
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
268
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 269
    {
429 theseven 270
        for (t = head_thread; t; t = t->thread_next)
271
            if (t->type == USER_THREAD)
272
                t->state = THREAD_SUSPENDED;
15 theseven 273
        current_thread->state = THREAD_DEFUNCT;
274
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
275
        wakeup_signal(&dbgwakeup);
276
    }
597 theseven 277
 
278
    timer_kill_wakeup();
279
 
14 theseven 280
    if (usec - last_tick > SCHEDULER_TICK)
281
    {
392 theseven 282
        uint32_t diff = usec - last_tick;
15 theseven 283
        last_tick = usec;
429 theseven 284
        for (t = head_thread; t; t = t->thread_next)
14 theseven 285
        {
429 theseven 286
            t->cpuload = 255 * t->cputime_current / diff;
287
            t->cputime_current = 0;
14 theseven 288
        }
289
    }
290
 
597 theseven 291
    uint32_t next_unblock = 0xffffffff;
437 theseven 292
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 293
    else
294
    {
429 theseven 295
        for (t = head_thread; t; t = t->thread_next)
597 theseven 296
        {
429 theseven 297
            if (t->state == THREAD_BLOCKED && t->timeout != -1
298
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 299
            {
429 theseven 300
                if (t->block_type == THREAD_BLOCK_MUTEX)
301
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
302
                t->state = THREAD_READY;
303
                t->block_type = THREAD_NOT_BLOCKED;
304
                t->blocked_by = NULL;
305
                t->timeout = 0;
34 theseven 306
            }
597 theseven 307
            else if (t->state == THREAD_BLOCKED && t->timeout != -1)
308
            {
309
                uint32_t left = t->blocked_since + t->timeout - usec;
310
                if (left < next_unblock) next_unblock = left;
311
            }
312
        }
34 theseven 313
 
595 theseven 314
        if (!thread || thread->state != THREAD_READY)
34 theseven 315
        {
437 theseven 316
            thread = &idle_thread;
34 theseven 317
            best = 0xffffffff;
429 theseven 318
            for (t = head_thread; t; t = t->thread_next)
319
                if (t->state == THREAD_READY && t->priority)
14 theseven 320
                {
595 theseven 321
                    if (t == block) score = 0xfffffffe;
322
                    else score = t->cputime_current / t->priority;
34 theseven 323
                    if (score < best)
324
                    {
325
                        best = score;
429 theseven 326
                        thread = t;
34 theseven 327
                    }
14 theseven 328
                }
34 theseven 329
        }
597 theseven 330
 
331
        if (thread == &idle_thread) timer_schedule_wakeup(next_unblock);
332
        else timer_schedule_wakeup(SYSTEM_TICK);
14 theseven 333
    }
334
 
429 theseven 335
    current_thread = thread;
14 theseven 336
    current_thread->state = THREAD_RUNNING;
337
}
338
 
429 theseven 339
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
340
                                       const void* code, void* stack, int stacksize,
341
                                       enum thread_type type, int priority, bool run)
14 theseven 342
{
429 theseven 343
    bool stack_alloced = false;
437 theseven 344
    bool thread_alloced = false;
429 theseven 345
    if (!stack)
346
    {
347
        stack = malloc(stacksize);
348
        stack_alloced = true;
349
    }
350
    if (!stack) return NULL;
351
    if (!thread)
352
    {
437 theseven 353
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
354
        thread_alloced = true;
355
    }
356
    if (!thread)
357
    {
429 theseven 358
        if (stack_alloced) free(stack);
359
        return NULL;
360
    }
437 theseven 361
    if (thread_alloced) reownalloc(thread, thread);
362
    if (stack_alloced) reownalloc(stack, thread);
429 theseven 363
 
14 theseven 364
    int i;
365
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
366
 
429 theseven 367
    memset(thread, 0, sizeof(struct scheduler_thread));
368
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
369
    thread->type = type;
370
    thread->name = name;
371
    thread->priority = priority;
372
    thread->cpsr = 0x1f;
373
    thread->regs[15] = (uint32_t)code;
374
    thread->regs[14] = (uint32_t)thread_exit;
375
    thread->regs[13] = (uint32_t)stack + stacksize;
376
    thread->stack = stack;
377
 
14 theseven 378
    uint32_t mode = enter_critical_section();
489 theseven 379
    thread->thread_next = head_thread->thread_next;
380
    head_thread->thread_next = thread;
429 theseven 381
    leave_critical_section(mode);
14 theseven 382
 
429 theseven 383
    return thread;
14 theseven 384
}
385
 
429 theseven 386
int thread_suspend(struct scheduler_thread* thread)
14 theseven 387
{
388
    int ret = THREAD_OK;
389
    bool needsswitch = false;
390
    uint32_t mode = enter_critical_section();
391
 
429 theseven 392
    if (!thread) thread = current_thread;
393
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 394
    if (ret == THREAD_OK)
395
    {
429 theseven 396
        if (thread->state == THREAD_RUNNING) needsswitch = true;
397
        else if (thread->state == THREAD_BLOCKED)
14 theseven 398
        {
429 theseven 399
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 400
            {
429 theseven 401
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 402
            }
429 theseven 403
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 404
            {
429 theseven 405
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
406
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 407
            }
429 theseven 408
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 409
            {
429 theseven 410
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 411
            }
14 theseven 412
        }
429 theseven 413
        thread->state = THREAD_SUSPENDED;
14 theseven 414
    }
415
 
416
    leave_critical_section(mode);
417
 
593 theseven 418
    if (needsswitch) yield();
14 theseven 419
 
420
    return ret;
421
}
422
 
429 theseven 423
int thread_resume(struct scheduler_thread* thread)
14 theseven 424
{
425
    int ret = THREAD_OK;
426
    bool needsswitch = false;
427
    uint32_t mode = enter_critical_section();
428
 
429 theseven 429
    if (!thread) thread = current_thread;
430
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 431
    if (ret == THREAD_OK)
432
    {
429 theseven 433
        if (thread->block_type == THREAD_BLOCK_SLEEP)
434
            thread->blocked_since = USEC_TIMER;
435
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 436
        {
429 theseven 437
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
438
            thread->blocked_since = USEC_TIMER;
439
            thread->state = THREAD_BLOCKED;
14 theseven 440
        }
429 theseven 441
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 442
        {
429 theseven 443
            thread->blocked_since = USEC_TIMER;
444
            thread->state = THREAD_BLOCKED;
14 theseven 445
        }
429 theseven 446
        else thread->state = THREAD_READY;
14 theseven 447
    }
448
 
449
    leave_critical_section(mode);
450
    return ret;
451
}
452
 
453 theseven 453
void thread_set_name(struct scheduler_thread* thread, char* name)
454
{
455
    uint32_t mode = enter_critical_section();
456
    if (!thread) thread = current_thread;
457
    thread->name = name;
458
    leave_critical_section(mode);
459
}
460
 
461
void thread_set_priority(struct scheduler_thread* thread, int priority)
462
{
463
    uint32_t mode = enter_critical_section();
464
    if (!thread) thread = current_thread;
465
    thread->priority = priority;
466
    leave_critical_section(mode);
467
}
468
 
541 theseven 469
int thread_terminate_internal(struct scheduler_thread* thread, uint32_t mode)
14 theseven 470
{
429 theseven 471
    struct scheduler_thread* t;
14 theseven 472
    bool needsswitch = false;
473
 
429 theseven 474
    if (!thread) thread = current_thread;
475
    if (thread->state == THREAD_RUNNING) needsswitch = true;
688 theseven 476
    else
14 theseven 477
    {
688 theseven 478
        if (thread->state == THREAD_BLOCKED)
479
        {
480
            if (thread->block_type == THREAD_BLOCK_MUTEX)
481
                mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
482
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
483
                ((struct wakeup*)thread->blocked_by)->waiter = NULL;
484
        }
485
        thread->state = THREAD_SUSPENDED;
429 theseven 486
    }
487
 
488
    leave_critical_section(mode);
489
 
465 theseven 490
    library_release_all_of_thread(thread);
85 theseven 491
#ifdef HAVE_STORAGE
429 theseven 492
    close_all_of_process(thread);
493
    closedir_all_of_process(thread);
85 theseven 494
#endif
130 theseven 495
#ifdef HAVE_BUTTON
429 theseven 496
    button_unregister_all_of_thread(thread);
130 theseven 497
#endif
429 theseven 498
    free_all_of_thread(thread);
14 theseven 499
 
688 theseven 500
    mode = enter_critical_section();
501
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
502
    if (t) t->thread_next = thread->thread_next;
503
    leave_critical_section(mode);
504
 
593 theseven 505
    if (needsswitch) yield();
14 theseven 506
 
429 theseven 507
    return THREAD_OK;
14 theseven 508
}
509
 
541 theseven 510
int thread_terminate(struct scheduler_thread* thread)
511
{
512
    uint32_t mode = enter_critical_section();
513
    return thread_terminate_internal(thread, mode);
514
}
515
 
423 theseven 516
int thread_killlevel(enum thread_type type, bool killself)
517
{
429 theseven 518
    struct scheduler_thread* t;
423 theseven 519
    int count = 0;
541 theseven 520
    while (true)
521
    {
522
        bool found = false;
523
        uint32_t mode = enter_critical_section();
524
        for (t = head_thread; t; t = t->thread_next)
688 theseven 525
            if (t->type <= type && current_thread != t)
541 theseven 526
            {
527
                thread_terminate_internal(t, mode);
528
                found = true;
529
                count++;
530
                break;
531
            }
532
        if (found) continue;
533
        leave_critical_section(mode);
534
        break;
535
    }
688 theseven 536
    if (killself) thread_exit();
423 theseven 537
    return count;
538
}
539
 
429 theseven 540
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 541
{
429 theseven 542
    return thread->state;
249 theseven 543
}
544
 
14 theseven 545
void thread_exit()
546
{
429 theseven 547
    thread_terminate(NULL);
14 theseven 548
}
71 theseven 549
 
550
int* __errno()
551
{
552
    return &current_thread->err_no;
553
}