Subversion Repositories freemyipod

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
427 farthen 6
//    This file is part of emCORE.
14 theseven 7
//
427 farthen 8
//    emCORE is free software: you can redistribute it and/or
14 theseven 9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
427 farthen 13
//    emCORE is distributed in the hope that it will be useful,
14 theseven 14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
427 farthen 19
//    with emCORE.  If not, see <http://www.gnu.org/licenses/>.
14 theseven 20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
429 theseven 29
#include "malloc.h"
85 theseven 30
#ifdef HAVE_STORAGE
58 theseven 31
#include "dir.h"
32
#include "file.h"
85 theseven 33
#endif
130 theseven 34
#ifdef HAVE_BUTTON
35
#include "button.h"
36
#endif
14 theseven 37
 
38
 
429 theseven 39
struct scheduler_thread* head_thread IBSS_ATTR;
15 theseven 40
struct scheduler_thread* current_thread IBSS_ATTR;
429 theseven 41
struct scheduler_thread idle_thread IBSS_ATTR;
15 theseven 42
uint32_t last_tick IBSS_ATTR;
34 theseven 43
bool scheduler_frozen IBSS_ATTR;
15 theseven 44
extern struct wakeup dbgwakeup;
14 theseven 45
 
46
 
47
void mutex_init(struct mutex* obj)
48
{
49
    memset(obj, 0, sizeof(struct mutex));
50
}
51
 
52
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
53
{
54
    struct scheduler_thread* t;
15 theseven 55
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 56
    {
57
        thread->queue_next = obj->waiters;
58
        obj->waiters = thread;
59
    }
60
    else
61
    {
62
        t = obj->waiters;
63
        while (t->queue_next && t->queue_next->priority > thread->priority)
64
            t = t->queue_next;
65
        thread->queue_next = t->queue_next;
66
        t->queue_next = thread;
67
    }
68
}
69
 
70
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
71
{
72
    struct scheduler_thread* t;
73
    if (!obj->waiters) return;
74
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
75
    else
76
    {
77
        t = obj->waiters;
78
        while (t->queue_next)
79
        {
80
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
81
            t = t->queue_next;
82
        }
83
    }
84
}
85
 
86
int mutex_lock(struct mutex* obj, int timeout)
87
{
88
    int ret = THREAD_OK;
89
    uint32_t mode = enter_critical_section();
90
 
91
    if (!obj->count)
92
    {
93
        obj->count = 1;
94
        obj->owner = current_thread;
95
    }
96
    else if (obj->owner == current_thread) obj->count++;
97
    else
98
    {
99
        if (timeout)
100
        {
101
            current_thread->state = THREAD_BLOCKED;
102
            current_thread->block_type = THREAD_BLOCK_MUTEX;
103
            current_thread->blocked_by = obj;
104
            current_thread->timeout = timeout;
105
            current_thread->blocked_since = USEC_TIMER;
106
            mutex_add_to_queue(obj, current_thread);
107
            leave_critical_section(mode);
108
            context_switch();
109
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
110
            return THREAD_OK;
111
        }
112
        else ret = THREAD_TIMEOUT;
113
    }
114
 
115
    leave_critical_section(mode);
116
    return ret;
117
}
118
 
119
int mutex_unlock(struct mutex* obj)
120
{
121
    int ret = THREAD_OK;
122
    uint32_t mode = enter_critical_section();
123
 
124
    if (!obj->count)
125
    {
126
        leave_critical_section(mode);
127
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
128
    }
129
 
130
    if (obj->owner != current_thread)
131
    {
132
        leave_critical_section(mode);
133
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
134
    }
135
 
136
    if (--(obj->count)) ret = obj->count;
137
    else if (obj->waiters)
138
    {
139
        obj->count = 1;
140
        obj->owner = obj->waiters;
141
        obj->waiters->state = THREAD_READY;
142
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
143
        obj->waiters->blocked_by = NULL;
144
        obj->waiters->timeout = 0;
145
        obj->waiters = obj->waiters->queue_next;
146
    }
147
 
148
    leave_critical_section(mode);
149
    return ret;
150
}
151
 
152
void wakeup_init(struct wakeup* obj)
153
{
154
    memset(obj, 0, sizeof(struct wakeup));
155
}
156
 
157
int wakeup_wait(struct wakeup* obj, int timeout)
158
{
159
    int ret = THREAD_OK;
160
    uint32_t mode = enter_critical_section();
161
 
162
    if (obj->waiter)
163
    {
164
        leave_critical_section(mode);
165
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
166
    }
167
 
168
    if (obj->signalled) obj->signalled = false;
169
    else
170
    {
171
        if (timeout)
172
        {
173
            current_thread->state = THREAD_BLOCKED;
174
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
175
            current_thread->blocked_by = obj;
176
            current_thread->timeout = timeout;
177
            current_thread->blocked_since = USEC_TIMER;
178
            obj->waiter = current_thread;
179
            leave_critical_section(mode);
180
            context_switch();
15 theseven 181
            obj->waiter = NULL;
14 theseven 182
            if (!obj->signalled) return THREAD_TIMEOUT;
183
            obj->signalled = false;
184
            return THREAD_OK;
185
        }
186
        else ret = THREAD_TIMEOUT;
187
    }
188
 
189
    leave_critical_section(mode);
190
    return ret;
191
}
192
 
193
int wakeup_signal(struct wakeup* obj)
194
{
195
    int ret = THREAD_OK;
196
    uint32_t mode = enter_critical_section();
197
 
198
    obj->signalled = true;
199
    if (obj->waiter)
200
    {
201
        obj->waiter->state = THREAD_READY;
202
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
203
        obj->waiter->blocked_by = NULL;
204
        obj->waiter->timeout = 0;
205
        ret = THREAD_FOUND;
206
    }
207
 
208
    leave_critical_section(mode);
209
    return ret;
210
}
211
 
212
void sleep(int usecs)
213
{
15 theseven 214
    if (usecs)
215
    {
216
        uint32_t mode = enter_critical_section();
217
        current_thread->state = THREAD_BLOCKED;
218
        current_thread->block_type = THREAD_BLOCK_SLEEP;
219
        current_thread->timeout = usecs;
220
        current_thread->blocked_since = USEC_TIMER;
221
        leave_critical_section(mode);
222
    }
14 theseven 223
    context_switch();
224
}
225
 
226
void scheduler_init(void)
227
{
429 theseven 228
    last_tick = USEC_TIMER;
34 theseven 229
    scheduler_frozen = false;
429 theseven 230
    head_thread = &idle_thread;
231
    current_thread = &idle_thread;
232
    memset(&idle_thread, 0, sizeof(idle_thread));
233
    idle_thread.state = THREAD_RUNNING;
234
    idle_thread.startusec = last_tick;
235
    idle_thread.name = "idle thread";
236
    idle_thread.stack = (uint32_t*)-1;
14 theseven 237
    setup_tick();
238
}
239
 
54 theseven 240
bool scheduler_freeze(bool value)
34 theseven 241
{
54 theseven 242
    bool old = scheduler_frozen;
34 theseven 243
    scheduler_frozen = value;
54 theseven 244
    return old;
34 theseven 245
}
246
 
389 theseven 247
void scheduler_pause_accounting()
248
{
249
    uint32_t usec = USEC_TIMER;
250
    current_thread->cputime_total += usec - current_thread->startusec;
251
    current_thread->cputime_current += usec - current_thread->startusec;
252
}
253
 
254
void scheduler_resume_accounting()
255
{
256
    current_thread->startusec = USEC_TIMER;
257
}
258
 
429 theseven 259
void scheduler_switch(struct scheduler_thread* thread)
14 theseven 260
{
429 theseven 261
    struct scheduler_thread* t;
14 theseven 262
    uint32_t score, best;
263
    uint32_t usec = USEC_TIMER;
264
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
265
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 266
    {
429 theseven 267
        for (t = head_thread; t; t = t->thread_next)
268
            if (t->type == USER_THREAD)
269
                t->state = THREAD_SUSPENDED;
15 theseven 270
        current_thread->state = THREAD_DEFUNCT;
271
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
272
        wakeup_signal(&dbgwakeup);
273
    }
14 theseven 274
 
275
    if (usec - last_tick > SCHEDULER_TICK)
276
    {
392 theseven 277
        uint32_t diff = usec - last_tick;
15 theseven 278
        last_tick = usec;
429 theseven 279
        for (t = head_thread; t; t = t->thread_next)
14 theseven 280
        {
429 theseven 281
            t->cpuload = 255 * t->cputime_current / diff;
282
            t->cputime_current = 0;
14 theseven 283
        }
284
    }
285
 
437 theseven 286
    if (scheduler_frozen) thread = &idle_thread;
14 theseven 287
    else
288
    {
429 theseven 289
        for (t = head_thread; t; t = t->thread_next)
290
            if (t->state == THREAD_BLOCKED && t->timeout != -1
291
             && TIME_AFTER(usec, t->blocked_since + t->timeout))
14 theseven 292
            {
429 theseven 293
                if (t->block_type == THREAD_BLOCK_MUTEX)
294
                    mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
295
                t->state = THREAD_READY;
296
                t->block_type = THREAD_NOT_BLOCKED;
297
                t->blocked_by = NULL;
298
                t->timeout = 0;
34 theseven 299
            }
300
 
429 theseven 301
        if (thread && thread->state == THREAD_READY) current_thread = thread;
34 theseven 302
        else
303
        {
437 theseven 304
            thread = &idle_thread;
34 theseven 305
            best = 0xffffffff;
429 theseven 306
            for (t = head_thread; t; t = t->thread_next)
307
                if (t->state == THREAD_READY && t->priority)
14 theseven 308
                {
429 theseven 309
                    score = t->cputime_current / t->priority;
34 theseven 310
                    if (score < best)
311
                    {
312
                        best = score;
429 theseven 313
                        thread = t;
34 theseven 314
                    }
14 theseven 315
                }
34 theseven 316
        }
14 theseven 317
    }
318
 
429 theseven 319
    current_thread = thread;
14 theseven 320
    current_thread->state = THREAD_RUNNING;
321
}
322
 
429 theseven 323
struct scheduler_thread* thread_create(struct scheduler_thread* thread, const char* name,
324
                                       const void* code, void* stack, int stacksize,
325
                                       enum thread_type type, int priority, bool run)
14 theseven 326
{
429 theseven 327
    bool stack_alloced = false;
437 theseven 328
    bool thread_alloced = false;
429 theseven 329
    if (!stack)
330
    {
331
        stack = malloc(stacksize);
332
        stack_alloced = true;
333
    }
334
    if (!stack) return NULL;
335
    if (!thread)
336
    {
437 theseven 337
        thread = (struct scheduler_thread*)malloc(sizeof(struct scheduler_thread));
338
        thread_alloced = true;
339
    }
340
    if (!thread)
341
    {
429 theseven 342
        if (stack_alloced) free(stack);
343
        return NULL;
344
    }
437 theseven 345
    if (thread_alloced) reownalloc(thread, thread);
346
    if (stack_alloced) reownalloc(stack, thread);
429 theseven 347
 
14 theseven 348
    int i;
349
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
350
 
429 theseven 351
    memset(thread, 0, sizeof(struct scheduler_thread));
352
    thread->state = run ? THREAD_READY : THREAD_SUSPENDED;
353
    thread->type = type;
354
    thread->name = name;
355
    thread->priority = priority;
356
    thread->cpsr = 0x1f;
357
    thread->regs[15] = (uint32_t)code;
358
    thread->regs[14] = (uint32_t)thread_exit;
359
    thread->regs[13] = (uint32_t)stack + stacksize;
360
    thread->stack = stack;
361
 
14 theseven 362
    uint32_t mode = enter_critical_section();
429 theseven 363
    thread->thread_next = head_thread;
364
    head_thread = thread;
365
    leave_critical_section(mode);
14 theseven 366
 
429 theseven 367
    return thread;
14 theseven 368
}
369
 
429 theseven 370
int thread_suspend(struct scheduler_thread* thread)
14 theseven 371
{
372
    int ret = THREAD_OK;
373
    bool needsswitch = false;
374
    uint32_t mode = enter_critical_section();
375
 
429 theseven 376
    if (!thread) thread = current_thread;
377
    if (thread->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
14 theseven 378
    if (ret == THREAD_OK)
379
    {
429 theseven 380
        if (thread->state == THREAD_RUNNING) needsswitch = true;
381
        else if (thread->state == THREAD_BLOCKED)
14 theseven 382
        {
429 theseven 383
            if (thread->block_type == THREAD_BLOCK_SLEEP)
15 theseven 384
            {
429 theseven 385
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 386
            }
429 theseven 387
            else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 388
            {
429 theseven 389
                mutex_remove_from_queue((struct mutex*)thread->blocked_by, thread);
390
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
14 theseven 391
            }
429 theseven 392
            else if (thread->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 393
            {
429 theseven 394
                if (thread->timeout != -1) thread->timeout -= USEC_TIMER - thread->blocked_since;
15 theseven 395
            }
14 theseven 396
        }
429 theseven 397
        thread->state = THREAD_SUSPENDED;
14 theseven 398
    }
399
 
400
    leave_critical_section(mode);
401
 
402
    if (needsswitch) context_switch();
403
 
404
    return ret;
405
}
406
 
429 theseven 407
int thread_resume(struct scheduler_thread* thread)
14 theseven 408
{
409
    int ret = THREAD_OK;
410
    bool needsswitch = false;
411
    uint32_t mode = enter_critical_section();
412
 
429 theseven 413
    if (!thread) thread = current_thread;
414
    if (thread->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
14 theseven 415
    if (ret == THREAD_OK)
416
    {
429 theseven 417
        if (thread->block_type == THREAD_BLOCK_SLEEP)
418
            thread->blocked_since = USEC_TIMER;
419
        else if (thread->block_type == THREAD_BLOCK_MUTEX)
14 theseven 420
        {
429 theseven 421
            mutex_add_to_queue((struct mutex*)thread->blocked_by, thread);
422
            thread->blocked_since = USEC_TIMER;
423
            thread->state = THREAD_BLOCKED;
14 theseven 424
        }
429 theseven 425
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
14 theseven 426
        {
429 theseven 427
            thread->blocked_since = USEC_TIMER;
428
            thread->state = THREAD_BLOCKED;
14 theseven 429
        }
429 theseven 430
        else thread->state = THREAD_READY;
14 theseven 431
    }
432
 
433
    leave_critical_section(mode);
434
    return ret;
435
}
436
 
453 theseven 437
void thread_set_name(struct scheduler_thread* thread, char* name)
438
{
439
    uint32_t mode = enter_critical_section();
440
    if (!thread) thread = current_thread;
441
    thread->name = name;
442
    leave_critical_section(mode);
443
}
444
 
445
void thread_set_priority(struct scheduler_thread* thread, int priority)
446
{
447
    uint32_t mode = enter_critical_section();
448
    if (!thread) thread = current_thread;
449
    thread->priority = priority;
450
    leave_critical_section(mode);
451
}
452
 
429 theseven 453
int thread_terminate(struct scheduler_thread* thread)
14 theseven 454
{
429 theseven 455
    struct scheduler_thread* t;
14 theseven 456
    bool needsswitch = false;
457
    uint32_t mode = enter_critical_section();
458
 
429 theseven 459
    if (!thread) thread = current_thread;
460
    if (thread->state == THREAD_RUNNING) needsswitch = true;
461
    else if (thread->state == THREAD_BLOCKED)
14 theseven 462
    {
429 theseven 463
        if (thread->block_type == THREAD_BLOCK_MUTEX)
464
            mutex_remove_from_queue((struct mutex*)t->blocked_by, thread);
465
        else if (thread->block_type == THREAD_BLOCK_WAKEUP)
466
            ((struct wakeup*)thread->blocked_by)->waiter = NULL;
467
    }
468
    for (t = head_thread; t && t->thread_next != thread; t = t->thread_next);
469
    if (t) t->thread_next = thread->thread_next;
470
 
471
    leave_critical_section(mode);
472
 
85 theseven 473
#ifdef HAVE_STORAGE
429 theseven 474
    close_all_of_process(thread);
475
    closedir_all_of_process(thread);
85 theseven 476
#endif
130 theseven 477
#ifdef HAVE_BUTTON
429 theseven 478
    button_unregister_all_of_thread(thread);
130 theseven 479
#endif
429 theseven 480
    free_all_of_thread(thread);
14 theseven 481
 
482
    if (needsswitch) context_switch();
483
 
429 theseven 484
    return THREAD_OK;
14 theseven 485
}
486
 
423 theseven 487
int thread_killlevel(enum thread_type type, bool killself)
488
{
429 theseven 489
    struct scheduler_thread* t;
423 theseven 490
    int count = 0;
491
    uint32_t mode = enter_critical_section();
429 theseven 492
    for (t = head_thread; t; t = t->thread_next);
493
        if (t->type <= type && (killself || current_thread != t))
423 theseven 494
        {
429 theseven 495
            thread_terminate(t);
423 theseven 496
            count++;
497
        }
498
    leave_critical_section(mode);
499
    return count;
500
}
501
 
429 theseven 502
enum thread_state thread_get_state(struct scheduler_thread* thread)
249 theseven 503
{
429 theseven 504
    return thread->state;
249 theseven 505
}
506
 
14 theseven 507
void thread_exit()
508
{
429 theseven 509
    thread_terminate(NULL);
14 theseven 510
}
71 theseven 511
 
512
int* __errno()
513
{
514
    return &current_thread->err_no;
515
}