Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
14 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
#include "global.h"
25
#include "thread.h"
26
#include "timer.h"
27
#include "panic.h"
28
#include "util.h"
29
 
30
 
15 theseven 31
struct scheduler_thread scheduler_threads[MAX_THREADS] IBSS_ATTR;
32
struct scheduler_thread* current_thread IBSS_ATTR;
33
uint32_t last_tick IBSS_ATTR;
34
extern struct wakeup dbgwakeup;
14 theseven 35
 
36
 
37
void mutex_init(struct mutex* obj)
38
{
39
    memset(obj, 0, sizeof(struct mutex));
40
}
41
 
42
void mutex_add_to_queue(struct mutex* obj, struct scheduler_thread* thread)
43
{
44
    struct scheduler_thread* t;
15 theseven 45
    if (!obj->waiters || obj->waiters->priority <= thread->priority)
14 theseven 46
    {
47
        thread->queue_next = obj->waiters;
48
        obj->waiters = thread;
49
    }
50
    else
51
    {
52
        t = obj->waiters;
53
        while (t->queue_next && t->queue_next->priority > thread->priority)
54
            t = t->queue_next;
55
        thread->queue_next = t->queue_next;
56
        t->queue_next = thread;
57
    }
58
}
59
 
60
void mutex_remove_from_queue(struct mutex* obj, struct scheduler_thread* thread)
61
{
62
    struct scheduler_thread* t;
63
    if (!obj->waiters) return;
64
    if (obj->waiters == thread) obj->waiters = thread->queue_next;
65
    else
66
    {
67
        t = obj->waiters;
68
        while (t->queue_next)
69
        {
70
            if (t->queue_next == thread) t->queue_next = thread->queue_next;
71
            t = t->queue_next;
72
        }
73
    }
74
}
75
 
76
int mutex_lock(struct mutex* obj, int timeout)
77
{
78
    int ret = THREAD_OK;
79
    struct scheduler_thread* thread;
80
    uint32_t mode = enter_critical_section();
81
 
82
    if (!obj->count)
83
    {
84
        obj->count = 1;
85
        obj->owner = current_thread;
86
    }
87
    else if (obj->owner == current_thread) obj->count++;
88
    else
89
    {
90
        if (timeout)
91
        {
92
            current_thread->state = THREAD_BLOCKED;
93
            current_thread->block_type = THREAD_BLOCK_MUTEX;
94
            current_thread->blocked_by = obj;
95
            current_thread->timeout = timeout;
96
            current_thread->blocked_since = USEC_TIMER;
97
            mutex_add_to_queue(obj, current_thread);
98
            leave_critical_section(mode);
99
            context_switch();
100
            if (obj->owner != current_thread) return THREAD_TIMEOUT;
101
            return THREAD_OK;
102
        }
103
        else ret = THREAD_TIMEOUT;
104
    }
105
 
106
    leave_critical_section(mode);
107
    return ret;
108
}
109
 
110
int mutex_unlock(struct mutex* obj)
111
{
112
    int ret = THREAD_OK;
113
    uint32_t mode = enter_critical_section();
114
 
115
    if (!obj->count)
116
    {
117
        leave_critical_section(mode);
118
        panicf(PANIC_KILLTHREAD, "Trying to unlock non-owned mutex! (%08X)", obj);
119
    }
120
 
121
    if (obj->owner != current_thread)
122
    {
123
        leave_critical_section(mode);
124
        panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj);
125
    }
126
 
127
    if (--(obj->count)) ret = obj->count;
128
    else if (obj->waiters)
129
    {
130
        obj->count = 1;
131
        obj->owner = obj->waiters;
132
        obj->waiters->state = THREAD_READY;
133
        obj->waiters->block_type = THREAD_NOT_BLOCKED;
134
        obj->waiters->blocked_by = NULL;
135
        obj->waiters->timeout = 0;
136
        obj->waiters = obj->waiters->queue_next;
137
    }
138
 
139
    leave_critical_section(mode);
140
    return ret;
141
}
142
 
143
void wakeup_init(struct wakeup* obj)
144
{
145
    memset(obj, 0, sizeof(struct wakeup));
146
}
147
 
148
int wakeup_wait(struct wakeup* obj, int timeout)
149
{
150
    int ret = THREAD_OK;
151
    uint32_t mode = enter_critical_section();
152
 
153
    if (obj->waiter)
154
    {
155
        leave_critical_section(mode);
156
        panicf(PANIC_KILLTHREAD, "Multiple threads waiting single wakeup! (%08X)", obj);
157
    }
158
 
159
    if (obj->signalled) obj->signalled = false;
160
    else
161
    {
162
        if (timeout)
163
        {
164
            current_thread->state = THREAD_BLOCKED;
165
            current_thread->block_type = THREAD_BLOCK_WAKEUP;
166
            current_thread->blocked_by = obj;
167
            current_thread->timeout = timeout;
168
            current_thread->blocked_since = USEC_TIMER;
169
            obj->waiter = current_thread;
170
            leave_critical_section(mode);
171
            context_switch();
15 theseven 172
            obj->waiter = NULL;
14 theseven 173
            if (!obj->signalled) return THREAD_TIMEOUT;
174
            obj->signalled = false;
175
            return THREAD_OK;
176
        }
177
        else ret = THREAD_TIMEOUT;
178
    }
179
 
180
    leave_critical_section(mode);
181
    return ret;
182
}
183
 
184
int wakeup_signal(struct wakeup* obj)
185
{
186
    int ret = THREAD_OK;
187
    uint32_t mode = enter_critical_section();
188
 
189
    obj->signalled = true;
190
    if (obj->waiter)
191
    {
192
        obj->waiter->state = THREAD_READY;
193
        obj->waiter->block_type = THREAD_NOT_BLOCKED;
194
        obj->waiter->blocked_by = NULL;
195
        obj->waiter->timeout = 0;
196
        ret = THREAD_FOUND;
197
    }
198
 
199
    leave_critical_section(mode);
200
    return ret;
201
}
202
 
203
void sleep(int usecs)
204
{
15 theseven 205
    if (usecs)
206
    {
207
        uint32_t mode = enter_critical_section();
208
        current_thread->state = THREAD_BLOCKED;
209
        current_thread->block_type = THREAD_BLOCK_SLEEP;
210
        current_thread->timeout = usecs;
211
        current_thread->blocked_since = USEC_TIMER;
212
        leave_critical_section(mode);
213
    }
14 theseven 214
    context_switch();
215
}
216
 
217
void scheduler_init(void)
218
{
219
    memset(scheduler_threads, 0, sizeof(scheduler_threads));
220
    last_tick = USEC_TIMER;
221
    current_thread = scheduler_threads;
222
    current_thread->state = THREAD_RUNNING;
223
    current_thread->startusec = last_tick;
224
    current_thread->name = "idle thread";
225
    current_thread->stack = (uint32_t*)-1;
226
    setup_tick();
227
}
228
 
229
void scheduler_switch(int thread)
230
{
231
    int i;
232
    uint32_t score, best;
233
    uint32_t usec = USEC_TIMER;
234
    if (current_thread->state == THREAD_RUNNING) current_thread->state = THREAD_READY;
235
    current_thread->cputime_total += usec - current_thread->startusec;
236
    current_thread->cputime_current += usec - current_thread->startusec;
237
    if ((int)current_thread->stack != -1 && *current_thread->stack != 0xaffebeaf)
15 theseven 238
    {
239
        for (i = 0; i < MAX_THREADS; i++)
240
            if (scheduler_threads[i].type == USER_THREAD)
241
                scheduler_threads[i].state = THREAD_SUSPENDED;
242
        current_thread->state = THREAD_DEFUNCT;
243
        current_thread->block_type = THREAD_DEFUNCT_STKOV;
244
        wakeup_signal(&dbgwakeup);
245
    }
14 theseven 246
 
247
    if (usec - last_tick > SCHEDULER_TICK)
248
    {
15 theseven 249
        last_tick = usec;
14 theseven 250
        for (i = 0; i < MAX_THREADS; i++)
251
        {
252
            scheduler_threads[i].cpuload = scheduler_threads[i].cputime_current / SCHEDULER_TICK;
253
            scheduler_threads[i].cputime_current = 0;
254
        }
255
    }
256
 
257
    for (i = 0; i < MAX_THREADS; i++)
258
        if (scheduler_threads[i].state == THREAD_BLOCKED
259
         && scheduler_threads[i].timeout != -1
260
         && TIME_AFTER(usec, scheduler_threads[i].blocked_since
261
                           + scheduler_threads[i].timeout))
262
        {
263
            if (scheduler_threads[i].block_type == THREAD_BLOCK_MUTEX)
264
                mutex_remove_from_queue((struct mutex*)scheduler_threads[i].blocked_by,
265
                                        &scheduler_threads[i]);
266
            scheduler_threads[i].state = THREAD_READY;
267
            scheduler_threads[i].block_type = THREAD_NOT_BLOCKED;
268
            scheduler_threads[i].blocked_by = NULL;
269
            scheduler_threads[i].timeout = 0;
270
        }
271
 
272
    if (thread >= 0 && thread < MAX_THREADS && scheduler_threads[thread].state == THREAD_READY)
273
        current_thread = &scheduler_threads[thread];
274
    else
275
    {
276
        thread = 0;
277
        best = 0xffffffff;
278
        for (i = 0; i < MAX_THREADS; i++)
279
            if (scheduler_threads[i].state == THREAD_READY && scheduler_threads[i].priority)
280
            {
281
                score = scheduler_threads[i].cputime_current / scheduler_threads[i].priority;
282
                if (score < best)
283
                {
15 theseven 284
                    best = score;
14 theseven 285
                    thread = i;
286
                }
287
            }
288
    }
289
 
290
    current_thread = &scheduler_threads[thread];
291
    current_thread->state = THREAD_RUNNING;
292
    current_thread->startusec = USEC_TIMER;
293
}
294
 
295
int thread_create(const char* name, const void* code, void* stack,
15 theseven 296
                  int stacksize, enum thread_type type, int priority, bool run)
14 theseven 297
{
298
    int ret = NO_MORE_THREADS;
299
    int i;
300
 
301
    for (i = 0; i < stacksize >> 2; i ++) ((uint32_t*)stack)[i] = 0xaffebeaf;
302
 
303
    uint32_t mode = enter_critical_section();
304
 
305
    for (i = 0; i < MAX_THREADS; i++)
306
        if (scheduler_threads[i].state == THREAD_FREE)
307
        {
308
            ret = i;
309
            memset(&scheduler_threads[i], 0, sizeof(struct scheduler_thread));
310
            scheduler_threads[i].state = run ? THREAD_READY : THREAD_SUSPENDED;
15 theseven 311
            scheduler_threads[i].type = type;
14 theseven 312
            scheduler_threads[i].name = name;
313
            scheduler_threads[i].priority = priority;
314
            scheduler_threads[i].cpsr = 0x13;
315
            scheduler_threads[i].regs[15] = (uint32_t)code;
316
            scheduler_threads[i].regs[14] = (uint32_t)thread_exit;
317
            scheduler_threads[i].regs[13] = (uint32_t)stack + stacksize;
318
            scheduler_threads[i].stack = stack;
319
            break;
320
        }
321
 
322
    leave_critical_section(mode);
323
    return ret;
324
}
325
 
326
int thread_suspend(int thread)
327
{
328
    int ret = THREAD_OK;
329
    struct scheduler_thread* t = &scheduler_threads[thread];
330
    bool needsswitch = false;
331
    uint32_t mode = enter_critical_section();
332
 
333
    if (thread == -1) t = current_thread;
334
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
335
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
336
    else if (t->state == THREAD_SUSPENDED) ret = ALREADY_SUSPENDED;
337
    if (ret == THREAD_OK)
338
    {
339
        if (t->state == THREAD_RUNNING) needsswitch = true;
340
        else if (t->state == THREAD_BLOCKED)
341
        {
342
            if (t->block_type == THREAD_BLOCK_SLEEP)
15 theseven 343
            {
344
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
345
            }
14 theseven 346
            else if (t->block_type == THREAD_BLOCK_MUTEX)
347
            {
348
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
15 theseven 349
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
14 theseven 350
            }
351
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
15 theseven 352
            {
353
                if (t->timeout != -1) t->timeout -= USEC_TIMER - t->blocked_since;
354
            }
14 theseven 355
        }
356
        t->state = THREAD_SUSPENDED;
357
    }
358
 
359
    leave_critical_section(mode);
360
 
361
    if (needsswitch) context_switch();
362
 
363
    return ret;
364
}
365
 
366
int thread_resume(int thread)
367
{
368
    int ret = THREAD_OK;
369
    struct scheduler_thread* t = &scheduler_threads[thread];
370
    bool needsswitch = false;
371
    uint32_t mode = enter_critical_section();
372
 
373
    if (thread == -1) t = current_thread;
374
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
375
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
376
    else if (t->state != THREAD_SUSPENDED) ret = ALREADY_RESUMED;
377
    if (ret == THREAD_OK)
378
    {
379
        if (t->block_type == THREAD_BLOCK_SLEEP)
380
            t->blocked_since = USEC_TIMER;
381
        else if (t->block_type == THREAD_BLOCK_MUTEX)
382
        {
383
            mutex_add_to_queue((struct mutex*)t->blocked_by, t);
384
            t->blocked_since = USEC_TIMER;
385
            t->state = THREAD_BLOCKED;
386
        }
387
        else if (t->block_type == THREAD_BLOCK_WAKEUP)
388
        {
389
            t->blocked_since = USEC_TIMER;
390
            t->state = THREAD_BLOCKED;
391
        }
392
        else t->state = THREAD_READY;
393
    }
394
 
395
    leave_critical_section(mode);
396
    return ret;
397
}
398
 
399
int thread_terminate(int thread)
400
{
401
    int ret = THREAD_OK;
402
    struct scheduler_thread* t = &scheduler_threads[thread];
403
    bool needsswitch = false;
404
    uint32_t mode = enter_critical_section();
405
 
406
    if (thread == -1) t = current_thread;
407
    else if (thread < 0 || thread >= MAX_THREADS) ret = INVALID_THREAD;
408
    else if (t->state == THREAD_FREE) ret = INVALID_THREAD;
409
    if (ret == THREAD_OK)
410
    {
411
        if (t->state == THREAD_RUNNING) needsswitch = true;
412
        else if (t->state == THREAD_BLOCKED)
413
        {
414
            if (t->block_type == THREAD_BLOCK_MUTEX)
415
                mutex_remove_from_queue((struct mutex*)t->blocked_by, t);
416
            else if (t->block_type == THREAD_BLOCK_WAKEUP)
417
                ((struct wakeup*)t->blocked_by)->waiter = NULL;
418
        }
419
        t->state = THREAD_FREE;
420
    }
421
 
422
    leave_critical_section(mode);
423
 
424
    if (needsswitch) context_switch();
425
 
426
    return ret;
427
}
428
 
429
void thread_exit()
430
{
431
    thread_terminate(-1);
432
}