summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-09-08 21:06:34 -0700
committerH. Peter Anvin <hpa@zytor.com>2009-09-08 21:11:49 -0700
commit0785d12409b73cad87d02cac735f7cbb79580201 (patch)
tree3e01d306543a8e49d224b960ea2040a20c1e6856
parent408e9890f28abc5f204e7cd3643e6417890e628d (diff)
downloadsyslinux-0785d12409b73cad87d02cac735f7cbb79580201.tar.gz
core: simple thread library
Simple thread library with the intent of making lwIP easier to port. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--core/include/thread.h84
-rw-r--r--core/thread/exit_thread.c26
-rw-r--r--core/thread/idle_thread.c25
-rw-r--r--core/thread/kill_thread.c39
-rw-r--r--core/thread/root_thread.c9
-rw-r--r--core/thread/schedule.c39
-rw-r--r--core/thread/sem_asm.S15
-rw-r--r--core/thread/semaphore.c74
-rw-r--r--core/thread/start_thread.c33
-rw-r--r--core/thread/thread_asm.S33
-rw-r--r--core/thread/timeout.c39
11 files changed, 416 insertions, 0 deletions
diff --git a/core/include/thread.h b/core/include/thread.h
new file mode 100644
index 00000000..bba92d7c
--- /dev/null
+++ b/core/include/thread.h
@@ -0,0 +1,84 @@
+#ifndef _THREAD_H
+#define _THREAD_H
+
+#include <stddef.h>
+#include <inttypes.h>
+#include "core.h"
+
+struct semaphore;
+
+struct thread_state {
+ uint32_t ebx, esp, ebp, esi, edi;
+};
+
+struct thread_list {
+ struct thread_list *next, *prev;
+};
+
+struct thread_block {
+ struct thread_list list;
+ struct thread *thread;
+ struct semaphore *semaphore;
+ jiffies_t block_time;
+ jiffies_t timeout;
+ bool timed_out;
+};
+
+struct thread {
+ struct thread_state state;
+ struct thread_list list;
+ struct thread_block *blocked;
+ int prio;
+};
+
+void __schedule(void);
+void __switch_to(struct thread *);
+void thread_yield(void);
+
+extern struct thread *__current;
+static inline struct thread *current(void)
+{
+ return __current;
+}
+
+struct semaphore {
+ int count;
+ struct thread_list list;
+};
+
+jiffies_t sem_down(struct semaphore *, jiffies_t);
+void sem_up(struct semaphore *);
+void sem_init(struct semaphore *, int);
+
+typedef unsigned long irq_state_t;
+
+static inline irq_state_t irq_state(void)
+{
+ irq_state_t __st;
+
+ asm volatile("pushfl ; popl %0" : "=rm" (__st));
+ return __st;
+}
+
+static inline irq_state_t irq_save(void)
+{
+ irq_state_t __st;
+
+ asm volatile("pushfl ; popl %0 ; cli" : "=rm" (__st));
+ return __st;
+}
+
+static inline void irq_restore(irq_state_t __st)
+{
+ asm volatile("pushl %0 ; popfl" : : "rm" (__st));
+}
+
+void start_thread(struct thread *t, void *stack, size_t stack_size, int prio,
+ void (*start_func)(void *), void *func_arg);
+void __exit_thread(void);
+void kill_thread(struct thread *);
+
+void start_idle_thread(void);
+void test_thread(void);
+
+#endif /* _THREAD_H */
diff --git a/core/thread/exit_thread.c b/core/thread/exit_thread.c
new file mode 100644
index 00000000..a5f12af4
--- /dev/null
+++ b/core/thread/exit_thread.c
@@ -0,0 +1,26 @@
+#include "thread.h"
+#include <limits.h>
+
+__noreturn __exit_thread(void)
+{
+ irq_state_t irq;
+ struct thread *curr = current();
+
+ irq = irq_save();
+
+ /* Remove from the linked list */
+ curr->list.prev->next = curr->list.next;
+ curr->list.next->prev = curr->list.prev;
+
+ /*
+ * Note: __schedule() can explictly handle the case where
+ * curr isn't part of the linked list anymore, as long as
+ * curr->list.next is still valid.
+ */
+ __schedule();
+
+ /* We should never get here */
+ irq_restore(irq);
+ while (1)
+ asm volatile("hlt");
+}
diff --git a/core/thread/idle_thread.c b/core/thread/idle_thread.c
new file mode 100644
index 00000000..8a319ff4
--- /dev/null
+++ b/core/thread/idle_thread.c
@@ -0,0 +1,25 @@
+#include "thread.h"
+#include <limits.h>
+#include <sys/cpu.h>
+
+static struct thread idle_thread;
+
+static char idle_thread_stack[4096];
+
+static void idle_thread_func(void *dummy)
+{
+ (void)dummy;
+ sti();
+
+ for (;;) {
+ thread_yield();
+ asm volatile("hlt");
+ }
+}
+
+void start_idle_thread(void)
+{
+ start_thread(&idle_thread, idle_thread_stack, sizeof idle_thread_stack,
+ INT_MAX, idle_thread_func, NULL);
+}
+
diff --git a/core/thread/kill_thread.c b/core/thread/kill_thread.c
new file mode 100644
index 00000000..ed2e05f4
--- /dev/null
+++ b/core/thread/kill_thread.c
@@ -0,0 +1,39 @@
+#include "thread.h"
+#include <limits.h>
+
+void kill_thread(struct thread *thread)
+{
+ irq_state_t irq;
+ struct thread_block *block;
+
+ if (thread == current())
+ __exit_thread();
+
+ irq = irq_save();
+
+ /*
+ * Muck with the stack so that the next time the thread is run then
+ * we end up going to __exit_thread.
+ */
+ *(size_t *)thread->state.esp = (size_t)__exit_thread;
+ thread->prio = INT_MIN;
+
+ block = thread->blocked;
+ if (block) {
+ struct semaphore *sem = block->semaphore;
+ /* Remove us from the queue and increase the count */
+ block->list.next->prev = block->list.prev;
+ block->list.prev->next = block->list.next;
+ sem->count++;
+
+ thread->blocked = NULL;
+ block->timed_out = true; /* Fake an immediate timeout */
+ }
+
+ __schedule();
+
+ irq_restore(irq);
+}
+
+
+
diff --git a/core/thread/root_thread.c b/core/thread/root_thread.c
new file mode 100644
index 00000000..c5efd65b
--- /dev/null
+++ b/core/thread/root_thread.c
@@ -0,0 +1,9 @@
+#include "thread.h"
+
+struct thread __root_thread = {
+ .list = { .next = &__root_thread.list, .prev = &__root_thread.list },
+ .blocked = NULL,
+ .prio = 0,
+};
+
+struct thread *__current = &__root_thread;
diff --git a/core/thread/schedule.c b/core/thread/schedule.c
new file mode 100644
index 00000000..b139c43d
--- /dev/null
+++ b/core/thread/schedule.c
@@ -0,0 +1,39 @@
+#include <sys/cpu.h>
+#include "thread.h"
+
+/*
+ * __schedule() should only be called with interrupts locked out!
+ */
+void __schedule(void)
+{
+ struct thread *curr = current();
+ struct thread *st, *nt, *best;
+
+ best = NULL;
+
+ /*
+ * The unusual form of this walk is because we have to start with
+ * the thread *following* curr, and curr may not actually be part
+ * of the list anymore (in the case of __exit_thread).
+ */
+ nt = st = container_of(curr->list.next, struct thread, list);
+ do {
+ if (!nt->blocked)
+ if (!best || nt->prio < best->prio)
+ best = nt;
+ nt = container_of(nt->list.next, struct thread, list);
+ } while (nt != st);
+
+ if (best != curr)
+ __switch_to(best);
+}
+
+/*
+ * This can be called from "normal" code...
+ */
+void thread_yield(void)
+{
+ irq_state_t irq = irq_save();
+ __schedule();
+ irq_restore(irq);
+}
diff --git a/core/thread/sem_asm.S b/core/thread/sem_asm.S
new file mode 100644
index 00000000..2b3014f2
--- /dev/null
+++ b/core/thread/sem_asm.S
@@ -0,0 +1,15 @@
+ .globl sem_down
+ .type sem_down, @function
+sem_down:
+ decl (%eax)
+ js __sem_down_slow
+ ret
+ .size sem_down, .-sem_down
+
+ .globl sem_up
+ .type sem_up, @function
+sem_up:
+ incl (%eax)
+ jle __sem_up_slow
+ ret
+ .size sem_up, .-sem_up
diff --git a/core/thread/semaphore.c b/core/thread/semaphore.c
new file mode 100644
index 00000000..1fada459
--- /dev/null
+++ b/core/thread/semaphore.c
@@ -0,0 +1,74 @@
+#include <sys/cpu.h>
+#include "thread.h"
+
+void sem_init(struct semaphore *sem, int count)
+{
+ sem->list.next = sem->list.prev = &sem->list;
+ sem->count = count;
+}
+
+jiffies_t __sem_down_slow(struct semaphore *sem, jiffies_t timeout)
+{
+ struct thread *curr;
+ struct thread_block block;
+ irq_state_t irq;
+ jiffies_t now;
+
+ irq = irq_save();
+
+ /* Check if something already freed the semaphore on us */
+ if (sem->count >= 0) {
+ sti();
+ return 0;
+ }
+
+ curr = current();
+ now = jiffies();
+
+ block.thread = curr;
+ block.semaphore = sem;
+ block.block_time = now;
+ block.timeout = timeout ? now+timeout : 0;
+ block.timed_out = false;
+
+ curr->blocked = &block;
+
+ /* Add to the end of the wakeup list */
+ block.list.prev = sem->list.prev;
+ block.list.next = &sem->list;
+ sem->list.prev = &block.list;
+ block.list.prev->next = &block.list;
+
+ __schedule();
+
+ irq_restore(irq);
+ return block.timed_out ? -1 : jiffies() - block.block_time;
+}
+
+void __sem_up_slow(struct semaphore *sem)
+{
+ irq_state_t irq;
+ struct thread_list *l;
+
+ irq = irq_save();
+
+ /*
+ * It's possible that something did a down on the semaphore, but
+ * didn't get to add themselves to the queue just yet. In that case
+ * we don't have to do anything, since the bailout clause in
+ * __sem_down_slow will take care of it.
+ */
+ l = sem->list.next;
+ if (l != &sem->list) {
+ struct thread_block *block = container_of(l, struct thread_block, list);
+
+ sem->list.next = block->list.next;
+ block->list.next->prev = &sem->list;
+
+ block->thread->blocked = NULL;
+
+ __schedule();
+ }
+
+ irq_restore(irq);
+}
diff --git a/core/thread/start_thread.c b/core/thread/start_thread.c
new file mode 100644
index 00000000..f07984ff
--- /dev/null
+++ b/core/thread/start_thread.c
@@ -0,0 +1,33 @@
+#include <string.h>
+#include "thread.h"
+
+extern void (*__start_thread)(void);
+
+void start_thread(struct thread *t, void *stack, size_t stack_size, int prio,
+ void (*start_func)(void *), void *func_arg)
+{
+ irq_state_t irq;
+ struct thread *curr;
+
+ memset(t, 0, sizeof *t);
+
+ t->state.esp = (((size_t)stack + stack_size) & ~3) - 4;
+ *(size_t *)t->state.esp = (size_t)&__start_thread;
+
+ t->state.esi = (size_t)start_func;
+ t->state.edi = (size_t)func_arg;
+ t->state.ebx = irq_state(); /* Inherit the IRQ state from the spawner */
+ t->prio = prio;
+
+ irq = irq_save();
+ curr = current();
+
+ t->list.prev = &curr->list;
+ t->list.next = curr->list.next;
+ curr->list.next = &t->list;
+ t->list.next->prev = &t->list;
+
+ __schedule();
+
+ irq_restore(irq);
+}
diff --git a/core/thread/thread_asm.S b/core/thread/thread_asm.S
new file mode 100644
index 00000000..64f9c9b9
--- /dev/null
+++ b/core/thread/thread_asm.S
@@ -0,0 +1,33 @@
+ .globl __switch_to
+ .type __switch_to, @function
+__switch_to:
+ movl __current, %edx
+ movl %ebx, (%edx)
+ movl %esp, 4(%edx)
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %edi, 16(%edx)
+
+ movl (%eax), %ebx
+ movl 4(%eax), %esp
+ movl 8(%eax), %ebp
+ movl 12(%eax), %esi
+ movl 16(%eax), %edi
+ movl %eax, __current
+ ret
+ .size __switch_to, .-__switch_to
+
+ .globl __start_thread
+ .type __start_thread, @function
+__start_thread:
+ movl %edi, %eax /* Thread function argument */
+
+ pushl $0 /* For gdb's benefit */
+ movl %esp, %ebp /* For gdb's benefit */
+
+ pushl %ebx /* Set up the flags/interrupt state */
+ popfl
+
+ call *%esi /* Run the desired function */
+ jmp __exit_thread /* If we get here, kill the thread */
+ .size __start_thread, .-__start_thread
diff --git a/core/thread/timeout.c b/core/thread/timeout.c
new file mode 100644
index 00000000..ff01ebe5
--- /dev/null
+++ b/core/thread/timeout.c
@@ -0,0 +1,39 @@
+/*
+ * timeout.c
+ *
+ */
+
+#include "thread.h"
+
+/*
+ * __thread_process_timeouts()
+ *
+ * Look for threads that have timed out. This should be called
+ * under interrupt lock, before calling __schedule().
+ */
+void __thread_process_timeouts(void)
+{
+ struct thread *curr = current();
+ struct thread_list *tp;
+ struct thread *t;
+ jiffies_t now = jiffies();
+ struct thread_block *block;
+ jiffies_t timeout;
+
+ /* The current thread is obviously running, so no need to check... */
+ for (tp = curr->list.next; tp != &curr->list; tp = tp->next) {
+ t = container_of(tp, struct thread, list);
+ if ((block = t->blocked) && (timeout = block->timeout)) {
+ if ((signed int)(timeout - now) <= 0) {
+ struct semaphore *sem = block->semaphore;
+ /* Remove us from the queue and increase the count */
+ block->list.next->prev = block->list.prev;
+ block->list.prev->next = block->list.next;
+ sem->count++;
+
+ t->blocked = NULL;
+ block->timed_out = true;
+ }
+ }
+ }
+}