summaryrefslogtreecommitdiff
path: root/core/thread/semaphore.c
blob: 6a2e4c13bcb74cb90f6429f5deb27bd24cc3b3bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#include <sys/cpu.h>
#include "thread.h"

void sem_init(struct semaphore *sem, int count)
{
    sem->list.next = sem->list.prev = &sem->list;
    sem->count = count;
}

mstime_t __sem_down_slow(struct semaphore *sem, mstime_t timeout)
{
    irq_state_t irq;
    mstime_t rv;

    irq = irq_save();

    if (sem->count >= 0) {
	/* Something already freed the semaphore on us */
	rv = 0;
    } else if (timeout == -1) {
	/* Immediate timeout */
	sem->count++;
	rv = -1;
    } else {
	/* Put the thread to sleep... */

	struct thread_block block;
	struct thread *curr = current();
	mstime_t now = ms_timer();

	block.thread     = curr;
	block.semaphore  = sem;
	block.block_time = now;
	block.timeout    = timeout ? now+timeout : 0;
	block.timed_out  = false;

	curr->blocked    = &block;

	/* Add to the end of the wakeup list */
	block.list.prev       = sem->list.prev;
	block.list.next       = &sem->list;
	sem->list.prev        = &block.list;
	block.list.prev->next = &block.list;

	__schedule();

	rv = block.timed_out ? -1 : ms_timer() - block.block_time;
    }

    irq_restore(irq);
    return rv;
}

void __sem_up_slow(struct semaphore *sem)
{
    irq_state_t irq;
    struct thread_list *l;

    irq = irq_save();

    /*
     * It's possible that something did a down on the semaphore, but
     * didn't get to add themselves to the queue just yet.  In that case
     * we don't have to do anything, since the bailout clause in
     * __sem_down_slow will take care of it.
     */
    l = sem->list.next;
    if (l != &sem->list) {
	struct thread_block *block;
	block = container_of(l, struct thread_block, list);

	sem->list.next = block->list.next;
	block->list.next->prev = &sem->list;

	block->thread->blocked = NULL;

	__schedule();
    }

    irq_restore(irq);
}