diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/Kconfig | 16 | ||||
-rw-r--r-- | src/arch/armv7/include/arch/cpu.h | 5 | ||||
-rw-r--r-- | src/arch/x86/include/arch/cpu.h | 5 | ||||
-rw-r--r-- | src/include/thread.h | 83 | ||||
-rw-r--r-- | src/lib/Makefile.inc | 1 | ||||
-rw-r--r-- | src/lib/hardwaremain.c | 3 | ||||
-rw-r--r-- | src/lib/thread.c | 376 |
7 files changed, 489 insertions, 0 deletions
diff --git a/src/Kconfig b/src/Kconfig index ce5a048bcc..4366bc809b 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -324,6 +324,22 @@ config TIMER_QUEUE help Provide a timer queue for performing time-based callbacks. +config COOP_MULTITASKING + def_bool n + depends on TIMER_QUEUE + help + Cooperative multitasking allows callbacks to be multiplexed on the + main thread of ramstage. With this enabled it allows for multiple + execution paths to take place when they have udelay() calls within + their code. + +config NUM_THREADS + int + default 4 + depends on COOP_MULTITASKING + help + How many execution threads to cooperatively multitask with. + config HIGH_SCRATCH_MEMORY_SIZE hex default 0x0 diff --git a/src/arch/armv7/include/arch/cpu.h b/src/arch/armv7/include/arch/cpu.h index 20a12c929e..5621aeda55 100644 --- a/src/arch/armv7/include/arch/cpu.h +++ b/src/arch/armv7/include/arch/cpu.h @@ -30,9 +30,14 @@ struct cpu_driver { struct cpu_device_id *id_table; }; +struct thread; + struct cpu_info { device_t cpu; unsigned long index; +#if CONFIG_COOP_MULTITASKING + struct thread *thread; +#endif }; struct cpuinfo_arm { diff --git a/src/arch/x86/include/arch/cpu.h b/src/arch/x86/include/arch/cpu.h index 890c77f4f1..1fe12e5230 100644 --- a/src/arch/x86/include/arch/cpu.h +++ b/src/arch/x86/include/arch/cpu.h @@ -159,9 +159,14 @@ struct cpu_driver { struct cpu_driver *find_cpu_driver(struct device *cpu); +struct thread; + struct cpu_info { device_t cpu; unsigned int index; +#if CONFIG_COOP_MULTITASKING + struct thread *thread; +#endif }; static inline struct cpu_info *cpu_info(void) diff --git a/src/include/thread.h b/src/include/thread.h new file mode 100644 index 0000000000..148c448bb2 --- /dev/null +++ b/src/include/thread.h @@ -0,0 +1,83 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2013 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef THREAD_H_ +#define THREAD_H_ + +#include <stddef.h> +#include <stdint.h> +#include <bootstate.h> +#include <timer.h> +#include <arch/cpu.h> + +#if CONFIG_COOP_MULTITASKING && !defined(__SMM__) && !defined(__PRE_RAM__) + +struct thread { + int id; + uintptr_t stack_current; + uintptr_t stack_orig; + struct thread *next; + void (*entry)(void *); + void *entry_arg; + int can_yield; +}; + +void threads_initialize(void); +/* Run func(arrg) on a new thread. Return 0 on successful start of thread, < 0 + * when thread could not be started. Note that the thread will block the + * current state in the boot state machine until it is complete. */ +int thread_run(void (*func)(void *), void *arg); +/* thread_run_until is the same as thread_run() except that it blocks state + * transitions from occuring in the (state, seq) pair of the boot state + * machine. */ +int thread_run_until(void (*func)(void *), void *arg, + boot_state_t state, boot_state_sequence_t seq); +/* Return 0 on successful yield for the given amount of time, < 0 when thread + * did not yield. */ +int thread_yield_microseconds(unsigned microsecs); + +/* Allow and prevent thread cooperation on current running thread. By default + * all threads are marked to be cooperative. That means a thread can yeild + * to another thread at a pre-determined switch point. Current there is + * only a single place where switching may occur: a call to udelay(). */ +void thread_cooperate(void); +void thread_prevent_coop(void); + +static inline void thread_init_cpu_info_non_bsp(struct cpu_info *ci) +{ + ci->thread = NULL; +} + +/* Architecture specific thread functions. */ +void asmlinkage switch_to_thread(uintptr_t new_stack, uintptr_t *saved_stack); +/* Set up the stack frame for a new thread so that a switch_to_thread() call + * will enter the thread_entry() function with arg as a parameter. The + * saved_stack field in the struct thread needs to be updated accordingly. */ +void arch_prepare_thread(struct thread *t, + void asmlinkage (*thread_entry)(void *), void *arg); +#else +static inline void threads_initialize(void) {} +static inline int thread_run(void (*func)(void *), void *arg) { return -1; } +static inline int thread_yield_microseconds(unsigned microsecs) { return -1; } +static inline void thread_cooperate(void) {} +static inline void thread_prevent_coop(void) {} +struct cpu_info; +static inline void thread_init_cpu_info_non_bsp(struct cpu_info *ci) { } +#endif + +#endif /* THREAD_H_ */ diff --git a/src/lib/Makefile.inc b/src/lib/Makefile.inc index 7306e6d857..2600aa50f9 100644 --- a/src/lib/Makefile.inc +++ b/src/lib/Makefile.inc @@ -90,6 +90,7 @@ ramstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c ramstage-$(CONFIG_COVERAGE) += libgcov.c ramstage-$(CONFIG_MAINBOARD_DO_NATIVE_VGA_INIT) += edid.c ramstage-y += memrange.c +ramstage-$(CONFIG_COOP_MULTITASKING) += thread.c ramstage-$(CONFIG_TIMER_QUEUE) += timer_queue.c # The CBMEM implementations are chosen based on CONFIG_DYNAMIC_CBMEM. diff --git a/src/lib/hardwaremain.c b/src/lib/hardwaremain.c index 99b4a069b4..14b3fff9ba 100644 --- a/src/lib/hardwaremain.c +++ b/src/lib/hardwaremain.c @@ -39,6 +39,7 @@ #endif #include <timer.h> #include <timestamp.h> +#include <thread.h> #if BOOT_STATE_DEBUG #define BS_DEBUG_LVL BIOS_DEBUG @@ -459,6 +460,8 @@ void hardwaremain(void) post_code(POST_CONSOLE_BOOT_MSG); + threads_initialize(); + /* Schedule the static boot state entries. */ boot_state_schedule_static_entries(); diff --git a/src/lib/thread.c b/src/lib/thread.c new file mode 100644 index 0000000000..6508bfad14 --- /dev/null +++ b/src/lib/thread.c @@ -0,0 +1,376 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2013 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <stddef.h> +#include <stdint.h> +#include <stdlib.h> +#include <arch/cpu.h> +#include <bootstate.h> +#include <console/console.h> +#include <thread.h> + +static void idle_thread_init(void); + +/* There needs to be at least one thread to run the ramstate state machine. */ +#define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1) +extern char thread_stacks[CONFIG_NUM_THREADS*CONFIG_STACK_SIZE]; + +/* Storage space for the thread structs .*/ +static struct thread all_threads[TOTAL_NUM_THREADS]; + +/* All runnable (but not running) and free threads are kept on their + * respective lists. */ +static struct thread *runnable_threads; +static struct thread *free_threads; + +static inline struct cpu_info *thread_cpu_info(const struct thread *t) +{ + return (void *)(t->stack_orig); +} + +static inline int thread_can_yield(const struct thread *t) +{ + return (t != NULL && t->can_yield); +} + +/* Assumes current cpu info can switch. */ +static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci) +{ + return ci->thread; +} + +static inline struct thread *current_thread(void) +{ + return cpu_info_to_thread(cpu_info()); +} + +static inline int thread_list_empty(struct thread **list) +{ + return *list == NULL; +} + +static inline struct thread *pop_thread(struct thread **list) +{ + struct thread *t; + + t = *list; + *list = t->next; + t->next = NULL; + return t; +} + +static inline void push_thread(struct thread **list, struct thread *t) +{ + t->next = *list; + *list = t; +} + +static inline void push_runnable(struct thread *t) +{ + push_thread(&runnable_threads, t); +} + +static inline struct thread *pop_runnable(void) +{ + return pop_thread(&runnable_threads); +} + +static inline struct thread *get_free_thread(void) +{ + struct thread *t; + struct cpu_info *ci; + struct cpu_info *new_ci; + + if (thread_list_empty(&free_threads)) + return NULL; + + t = pop_thread(&free_threads); + + ci = cpu_info(); + + /* Initialize the cpu_info structure on the new stack. */ + new_ci = thread_cpu_info(t); + *new_ci = *ci; + new_ci->thread = t; + + /* Reset the current stack value to the original. */ + t->stack_current = t->stack_orig; + + return t; +} + +static inline void free_thread(struct thread *t) +{ + push_thread(&free_threads, t); +} + +/* The idle thread is ran whenever there isn't anything else that is runnable. + * It's sole responsibility is to ensure progress is made by running the timer + * callbacks. */ +static void idle_thread(void *unused) +{ + /* This thread never voluntarily yields. */ + thread_prevent_coop(); + while (1) { + timers_run(); + } +} + +static void schedule(struct thread *t) +{ + struct thread *current = current_thread(); + + /* If t is NULL need to find new runnable thread. */ + if (t == NULL) { + if (thread_list_empty(&runnable_threads)) + die("Runnable thread list is empty!\n"); + t = pop_runnable(); + } else { + /* current is still runnable. */ + push_runnable(current); + } + switch_to_thread(t->stack_current, ¤t->stack_current); +} + +static void terminate_thread(struct thread *t) +{ + free_thread(t); + schedule(NULL); +} + +static void asmlinkage call_wrapper(void *unused) +{ + struct thread *current = current_thread(); + + current->entry(current->entry_arg); + terminate_thread(current); +} + +/* Block the current state transitions until thread is complete. */ +static void asmlinkage call_wrapper_block_current(void *unused) +{ + struct thread *current = current_thread(); + + boot_state_current_block(); + current->entry(current->entry_arg); + boot_state_current_unblock(); + terminate_thread(current); +} + +struct block_boot_state { + boot_state_t state; + boot_state_sequence_t seq; +}; + +/* Block the provided state until thread is complete. */ +static void asmlinkage call_wrapper_block_state(void *arg) +{ + struct block_boot_state *bbs = arg; + struct thread *current = current_thread(); + + boot_state_block(bbs->state, bbs->seq); + current->entry(current->entry_arg); + boot_state_unblock(bbs->state, bbs->seq); + terminate_thread(current); +} + +/* Prepare a thread so that it starts by executing thread_entry(thread_arg). + * Within thread_entry() it will call func(arg). */ +static void prepare_thread(struct thread *t, void *func, void *arg, + void asmlinkage (*thread_entry)(void *), + void *thread_arg) +{ + /* Stash the function and argument to run. */ + t->entry = func; + t->entry_arg = arg; + + /* All new threads can yield by default. */ + t->can_yield = 1; + + arch_prepare_thread(t, thread_entry, thread_arg); +} + +static void thread_resume_from_timeout(struct timeout_callback *tocb) +{ + struct thread *to; + + to = tocb->priv; + schedule(to); +} + +static void idle_thread_init(void) +{ + struct thread *t; + + t = get_free_thread(); + + if (t == NULL) { + die("No threads available for idle thread!\n"); + } + + /* Queue idle thread to run once all other threads have yielded. */ + prepare_thread(t, idle_thread, NULL, call_wrapper, NULL); + push_runnable(t); + /* Mark the currently executing thread to cooperate. */ + thread_cooperate(); +} + +/* Don't inline this function so the timeout_callback won't have its storage + * space on the stack cleaned up before the call to schedule(). */ +static int __attribute__((noinline)) +thread_yield_timed_callback(struct timeout_callback *tocb, unsigned microsecs) +{ + tocb->priv = current_thread(); + tocb->callback = thread_resume_from_timeout; + + if (timer_sched_callback(tocb, microsecs)) + return -1; + + /* The timer callback will wake up the current thread. */ + schedule(NULL); + return 0; +} + +static void *thread_alloc_space(struct thread *t, size_t bytes) +{ + /* Allocate the amount of space on the stack keeping the stack + * aligned to the pointer size. */ + t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t)); + + return (void *)t->stack_current; +} + +void threads_initialize(void) +{ + int i; + struct thread *t; + char *stack_top; + struct cpu_info *ci; + + /* Initialize the BSP thread first. The cpu_info structure is assumed + * to be just under the top of the stack. */ + t = &all_threads[0]; + ci = cpu_info(); + ci->thread = t; + t->stack_orig = (uintptr_t)ci; + t->id = 0; + + stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info); + for (i = 1; i < TOTAL_NUM_THREADS; i++) { + t = &all_threads[i]; + t->stack_orig = (uintptr_t)stack_top; + t->id = i; + stack_top += CONFIG_STACK_SIZE; + free_thread(t); + } + + idle_thread_init(); +} + +int thread_run(void (*func)(void *), void *arg) +{ + struct thread *current; + struct thread *t; + + current = current_thread(); + + if (!thread_can_yield(current)) { + printk(BIOS_ERR, + "thread_run() called from non-yielding context!\n"); + return -1; + } + + t = get_free_thread(); + + if (t == NULL) { + printk(BIOS_ERR, "thread_run() No more threads!\n"); + return -1; + } + + prepare_thread(t, func, arg, call_wrapper_block_current, NULL); + schedule(t); + + return 0; +} + +int thread_run_until(void (*func)(void *), void *arg, + boot_state_t state, boot_state_sequence_t seq) +{ + struct thread *current; + struct thread *t; + struct block_boot_state *bbs; + + current = current_thread(); + + if (!thread_can_yield(current)) { + printk(BIOS_ERR, + "thread_run() called from non-yielding context!\n"); + return -1; + } + + t = get_free_thread(); + + if (t == NULL) { + printk(BIOS_ERR, "thread_run() No more threads!\n"); + return -1; + } + + bbs = thread_alloc_space(t, sizeof(*bbs)); + bbs->state = state; + bbs->seq = seq; + prepare_thread(t, func, arg, call_wrapper_block_state, bbs); + schedule(t); + + return 0; +} + +int thread_yield_microseconds(unsigned microsecs) +{ + struct thread *current; + struct timeout_callback tocb; + + current = current_thread(); + + if (!thread_can_yield(current)) + return -1; + + if (thread_yield_timed_callback(&tocb, microsecs)) + return -1; + + return 0; +} + +void thread_cooperate(void) +{ + struct thread *current; + + current = current_thread(); + + if (current != NULL) + current->can_yield = 1; +} + +void thread_prevent_coop(void) +{ + struct thread *current; + + current = current_thread(); + + if (current != NULL) + current->can_yield = 0; +} |