diff --git a/arch/lkl/include/asm/cpu.h b/arch/lkl/include/asm/cpu.h new file mode 100644 index 00000000000000..1bffb16a51f467 --- /dev/null +++ b/arch/lkl/include/asm/cpu.h @@ -0,0 +1,14 @@ +#ifndef _ASM_LKL_CPU_H +#define _ASM_LKL_CPU_H + +int lkl_cpu_get(void); +void lkl_cpu_put(void); +int lkl_cpu_try_run_irq(int irq); +int lkl_cpu_init(void); +void lkl_cpu_shutdown(void); +void lkl_cpu_wait_shutdown(void); +void lkl_cpu_wakeup(void); +void lkl_cpu_change_owner(lkl_thread_t owner); +void lkl_cpu_set_irqs_pending(void); + +#endif /* _ASM_LKL_CPU_H */ diff --git a/arch/lkl/include/asm/irq.h b/arch/lkl/include/asm/irq.h index d1c87bb3404845..975894e1c00794 100644 --- a/arch/lkl/include/asm/irq.h +++ b/arch/lkl/include/asm/irq.h @@ -2,7 +2,10 @@ #define _ASM_LKL_IRQ_H #define IRQ_STATUS_BITS (sizeof(long) * 8) -#define NR_IRQS ((int)IRQ_STATUS_BITS * IRQ_STATUS_BITS) +#define NR_IRQS ((int)(IRQ_STATUS_BITS * IRQ_STATUS_BITS)) + +void run_irqs(void); +void set_irq_pending(int irq); #include diff --git a/arch/lkl/include/asm/setup.h b/arch/lkl/include/asm/setup.h index 36169133dc6767..831a2feca220cd 100644 --- a/arch/lkl/include/asm/setup.h +++ b/arch/lkl/include/asm/setup.h @@ -3,8 +3,4 @@ #define COMMAND_LINE_SIZE 4096 -#ifndef __ASSEMBLY__ -void wakeup_cpu(void); -#endif - #endif diff --git a/arch/lkl/include/asm/syscalls.h b/arch/lkl/include/asm/syscalls.h index b1e2d08e1bddf3..43956b4bbf0ad6 100644 --- a/arch/lkl/include/asm/syscalls.h +++ b/arch/lkl/include/asm/syscalls.h @@ -1,8 +1,8 @@ #ifndef _ASM_LKL_SYSCALLS_H #define _ASM_LKL_SYSCALLS_H -int initial_syscall_thread(void *); -void free_initial_syscall_thread(void); +int syscalls_init(void); +void syscalls_cleanup(void); long lkl_syscall(long no, long *params); #define sys_mmap sys_mmap_pgoff diff --git a/arch/lkl/include/asm/thread_info.h b/arch/lkl/include/asm/thread_info.h index 0f1ec2b8add739..cd4b91dd1464b6 100644 --- a/arch/lkl/include/asm/thread_info.h +++ b/arch/lkl/include/asm/thread_info.h @@ -18,6 +18,7 @@ struct thread_info { int preempt_count; mm_segment_t addr_limit; struct lkl_sem *sched_sem; + struct lkl_jmp_buf sched_jb; bool dead; lkl_thread_t tid; struct task_struct *prev_sched; @@ -46,7 +47,7 @@ static inline struct thread_info *current_thread_info(void) unsigned long *alloc_thread_stack_node(struct task_struct *, int node); void free_thread_stack(unsigned long *); -int threads_init(void); +void threads_init(void); void threads_cleanup(void); #define TIF_SYSCALL_TRACE 0 @@ -56,6 +57,24 @@ void threads_cleanup(void); #define TIF_RESTORE_SIGMASK 4 #define TIF_MEMDIE 5 #define TIF_NOHZ 6 +#define TIF_SCHED_JB 7 +#define TIF_SCHED_EXIT 8 +#define TIF_HOST_THREAD 9 + +static inline void set_ti_thread_flag(struct thread_info *ti, int flag); + +static inline int thread_set_sched_jmp(void) +{ + set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB); + return lkl_ops->jmp_buf_set(¤t_thread_info()->sched_jb); +} + +static inline void thread_set_sched_exit(void) +{ + set_ti_thread_flag(current_thread_info(), TIF_SCHED_EXIT); +} + +void switch_to_host_task(struct task_struct *); #define __HAVE_THREAD_FUNCTIONS diff --git a/arch/lkl/include/asm/unistd.h b/arch/lkl/include/asm/unistd.h index 39d62476b35f5a..c3451dfdb4e937 100644 --- a/arch/lkl/include/asm/unistd.h +++ b/arch/lkl/include/asm/unistd.h @@ -1,6 +1,5 @@ #include -__SYSCALL(__NR_create_syscall_thread, sys_create_syscall_thread) __SYSCALL(__NR_virtio_mmio_device_add, sys_virtio_mmio_device_add) #define __SC_ASCII(t, a) #t "," #a diff --git a/arch/lkl/include/uapi/asm/host_ops.h b/arch/lkl/include/uapi/asm/host_ops.h index 30d6f1dcedf456..9e352d932131e5 100644 --- a/arch/lkl/include/uapi/asm/host_ops.h +++ b/arch/lkl/include/uapi/asm/host_ops.h @@ -5,6 +5,9 @@ struct lkl_mutex; struct lkl_sem; typedef unsigned long lkl_thread_t; +struct lkl_jmp_buf { + unsigned long buf[32]; +}; /** * lkl_host_operations - host operations used by the Linux kernel @@ -25,7 +28,8 @@ typedef unsigned long lkl_thread_t; * @sem_up - perform an up operation on the semaphore * @sem_down - perform a down operation on the semaphore * - * @mutex_alloc - allocate and initialize a host mutex + * @mutex_alloc - allocate and initialize a host mutex; the recursive parameter + * determines if the mutex is recursive or not * @mutex_free - free a host mutex * @mutex_lock - acquire the mutex * @mutex_unlock - release the mutex @@ -77,7 +81,7 @@ struct lkl_host_operations { void (*sem_up)(struct lkl_sem *sem); void (*sem_down)(struct lkl_sem *sem); - struct lkl_mutex *(*mutex_alloc)(void); + struct lkl_mutex *(*mutex_alloc)(int recursive); void (*mutex_free)(struct lkl_mutex *mutex); void (*mutex_lock)(struct lkl_mutex *mutex); void (*mutex_unlock)(struct lkl_mutex *mutex); @@ -86,6 +90,8 @@ struct lkl_host_operations { void (*thread_detach)(void); void (*thread_exit)(void); int (*thread_join)(lkl_thread_t tid); + lkl_thread_t (*thread_self)(void); + int (*thread_equal)(lkl_thread_t a, lkl_thread_t b); int (*tls_alloc)(unsigned int *key, void (*destructor)(void *)); int (*tls_free)(unsigned int key); @@ -106,6 +112,9 @@ struct lkl_host_operations { int write); long (*gettid)(void); + + int (*jmp_buf_set)(struct lkl_jmp_buf *jmpb); + void (*jmp_buf_longjmp)(struct lkl_jmp_buf *jmpb, int val); }; /** @@ -127,4 +136,7 @@ int lkl_start_kernel(struct lkl_host_operations *lkl_ops, */ int lkl_is_running(void); +int lkl_printf(const char *, ...); +void lkl_bug(const char *, ...); + #endif diff --git a/arch/lkl/include/uapi/asm/unistd.h b/arch/lkl/include/uapi/asm/unistd.h index ad47ffe9eb6dd3..654215e8189cc6 100644 --- a/arch/lkl/include/uapi/asm/unistd.h +++ b/arch/lkl/include/uapi/asm/unistd.h @@ -8,5 +8,4 @@ #include -#define __NR_create_syscall_thread (__NR_arch_specific_syscall + 0) -#define __NR_virtio_mmio_device_add (__NR_arch_specific_syscall + 1) +#define __NR_virtio_mmio_device_add (__NR_arch_specific_syscall + 0) diff --git a/arch/lkl/kernel/Makefile b/arch/lkl/kernel/Makefile index 3d9a324942149f..ef489f2f717618 100644 --- a/arch/lkl/kernel/Makefile +++ b/arch/lkl/kernel/Makefile @@ -1,3 +1,4 @@ extra-y := vmlinux.lds -obj-y = setup.o threads.o irq.o time.o syscalls.o misc.o console.o syscalls_32.o +obj-y = setup.o threads.o irq.o time.o syscalls.o misc.o console.o \ + syscalls_32.o cpu.o diff --git a/arch/lkl/kernel/cpu.c b/arch/lkl/kernel/cpu.c new file mode 100644 index 00000000000000..c99db15abe3f17 --- /dev/null +++ b/arch/lkl/kernel/cpu.c @@ -0,0 +1,244 @@ +#include +#include +#include +#include +#include +#include +#include + + +/* + * This structure is used to get access to the "LKL CPU" that allows us to run + * Linux code. Because we have to deal with various synchronization requirements + * between idle thread, system calls, interrupts, "reentrancy", CPU shutdown, + * imbalance wake up (i.e. acquire the CPU from one thread and release it from + * another), we can't use a simple synchronization mechanism such as (recursive) + * mutex or semaphore. Instead, we use a mutex and a bunch of status data plus a + * semaphore. + */ +struct lkl_cpu { + /* lock that protects the CPU status data */ + struct lkl_mutex *lock; + /* + * Since we must free the cpu lock during shutdown we need a + * synchronization algorithm between lkl_cpu_shutdown() and the CPU + * access functions since lkl_cpu_get() gets called from thread + * destructor callback functions which may be scheduled after + * lkl_cpu_shutdown() has freed the cpu lock. + * + * An atomic counter is used to keep track of the number of running + * CPU access functions and allow the shutdown function to wait for + * them. + * + * The shutdown functions adds MAX_THREADS to this counter which allows + * the CPU access functions to check if the shutdown process has + * started. + * + * This algorithm assumes that we never have more the MAX_THREADS + * requesting CPU access. + */ + #define MAX_THREADS 1000000 + unsigned int shutdown_gate; + bool irqs_pending; + /* no of threads waiting the CPU */ + unsigned int sleepers; + /* no of times the current thread got the CPU */ + unsigned int count; + /* current thread that owns the CPU */ + lkl_thread_t owner; + /* semaphore for threads waiting the CPU */ + struct lkl_sem *sem; + /* semaphore for the idle thread */ + struct lkl_sem *idle_sem; + /* semaphore used for shutdown */ + struct lkl_sem *shutdown_sem; +} cpu; + +static int __cpu_try_get_lock(int n) +{ + lkl_thread_t self; + + if (__sync_fetch_and_add(&cpu.shutdown_gate, n) >= MAX_THREADS) + return -2; + + lkl_ops->mutex_lock(cpu.lock); + + if (cpu.shutdown_gate >= MAX_THREADS) + return -1; + + self = lkl_ops->thread_self(); + + if (cpu.owner && !lkl_ops->thread_equal(cpu.owner, self)) + return 0; + + cpu.owner = self; + cpu.count++; + + return 1; +} + +static void __cpu_try_get_unlock(int lock_ret, int n) +{ + if (lock_ret >= -1) + lkl_ops->mutex_unlock(cpu.lock); + __sync_fetch_and_sub(&cpu.shutdown_gate, n); +} + +void lkl_cpu_change_owner(lkl_thread_t owner) +{ + lkl_ops->mutex_lock(cpu.lock); + if (cpu.count > 1) + lkl_bug("bad count while changing owner\n"); + cpu.owner = owner; + lkl_ops->mutex_unlock(cpu.lock); +} + +int lkl_cpu_get(void) +{ + int ret; + + ret = __cpu_try_get_lock(1); + + while (ret == 0) { + cpu.sleepers++; + __cpu_try_get_unlock(ret, 0); + lkl_ops->sem_down(cpu.sem); + ret = __cpu_try_get_lock(0); + } + + __cpu_try_get_unlock(ret, 1); + + return ret; +} + +void lkl_cpu_put(void) +{ + lkl_ops->mutex_lock(cpu.lock); + + if (!cpu.count || !cpu.owner || + !lkl_ops->thread_equal(cpu.owner, lkl_ops->thread_self())) + lkl_bug("%s: unbalanced put\n", __func__); + + while (cpu.irqs_pending && !irqs_disabled()) { + cpu.irqs_pending = false; + lkl_ops->mutex_unlock(cpu.lock); + run_irqs(); + lkl_ops->mutex_lock(cpu.lock); + } + + if (need_resched()) { + if (test_thread_flag(TIF_HOST_THREAD)) { + if (cpu.count == 1 && !in_interrupt()) { + lkl_ops->mutex_unlock(cpu.lock); + set_current_state(TASK_UNINTERRUPTIBLE); + if (!thread_set_sched_jmp()) + schedule(); + return; + } + } else { + lkl_cpu_wakeup(); + } + } + + if (--cpu.count > 0) { + lkl_ops->mutex_unlock(cpu.lock); + return; + } + + if (cpu.sleepers) { + cpu.sleepers--; + lkl_ops->sem_up(cpu.sem); + } + + cpu.owner = 0; + + lkl_ops->mutex_unlock(cpu.lock); +} + +int lkl_cpu_try_run_irq(int irq) +{ + int ret; + + ret = __cpu_try_get_lock(1); + if (!ret) { + set_irq_pending(irq); + cpu.irqs_pending = true; + } + __cpu_try_get_unlock(ret, 1); + + return ret; +} + +void lkl_cpu_shutdown(void) +{ + __sync_fetch_and_add(&cpu.shutdown_gate, MAX_THREADS); +} + +void lkl_cpu_wait_shutdown(void) +{ + lkl_ops->sem_down(cpu.shutdown_sem); + lkl_ops->sem_free(cpu.shutdown_sem); +} + +static void lkl_cpu_cleanup(bool shutdown) +{ + while (__sync_fetch_and_add(&cpu.shutdown_gate, 0) > MAX_THREADS) + ; + + if (shutdown) + lkl_ops->sem_up(cpu.shutdown_sem); + else if (cpu.shutdown_sem) + lkl_ops->sem_free(cpu.shutdown_sem); + if (cpu.idle_sem) + lkl_ops->sem_free(cpu.idle_sem); + if (cpu.sem) + lkl_ops->sem_free(cpu.sem); + if (cpu.lock) + lkl_ops->mutex_free(cpu.lock); +} + +void arch_cpu_idle(void) +{ + if (cpu.shutdown_gate >= MAX_THREADS) { + + lkl_ops->mutex_lock(cpu.lock); + while (cpu.sleepers--) + lkl_ops->sem_up(cpu.sem); + lkl_ops->mutex_unlock(cpu.lock); + + lkl_cpu_cleanup(true); + + lkl_ops->thread_exit(); + } + + /* enable irqs now to allow direct irqs to run */ + local_irq_enable(); + + lkl_cpu_put(); + + lkl_ops->sem_down(cpu.idle_sem); + + lkl_cpu_get(); + + run_irqs(); +} + +void lkl_cpu_wakeup(void) +{ + lkl_ops->sem_up(cpu.idle_sem); +} + +int lkl_cpu_init(void) +{ + cpu.lock = lkl_ops->mutex_alloc(0); + cpu.sem = lkl_ops->sem_alloc(0); + cpu.idle_sem = lkl_ops->sem_alloc(0); + cpu.shutdown_sem = lkl_ops->sem_alloc(0); + + if (!cpu.lock || !cpu.sem || !cpu.idle_sem || !cpu.shutdown_sem) { + lkl_cpu_cleanup(false); + return -ENOMEM; + } + + return 0; +} diff --git a/arch/lkl/kernel/irq.c b/arch/lkl/kernel/irq.c index 526870bc0138a6..b5dbbaa7ebba39 100644 --- a/arch/lkl/kernel/irq.c +++ b/arch/lkl/kernel/irq.c @@ -8,6 +8,7 @@ #include #include #include +#include /* * To avoid much overhead we use an indirect approach: the irqs are marked using @@ -36,7 +37,7 @@ static inline unsigned long test_and_clear_irq_status(int index) return __sync_fetch_and_and(&irq_status[index], 0); } -static inline void set_irq_status(int irq) +void set_irq_pending(int irq) { int index = irq / IRQ_STATUS_BITS; int bit = irq % IRQ_STATUS_BITS; @@ -45,24 +46,54 @@ static inline void set_irq_status(int irq) __sync_fetch_and_or(&irq_index_status, BIT(index)); } - static struct irq_info { const char *user; } irqs[NR_IRQS]; +static bool irqs_enabled; + +static void run_irq(int irq) +{ + unsigned long flags; + + /* interrupt handlers need to run with interrupts disabled */ + local_irq_save(flags); + irq_enter(); + generic_handle_irq(irq); + irq_exit(); + local_irq_restore(flags); +} /** - * DO NOT run any linux calls (e.g. printk) here as they may race with the - * existing linux threads. + * This function can be called from arbitrary host threads, so do not + * issue any Linux calls (e.g. prink) if lkl_cpu_get() was not issued + * before. */ int lkl_trigger_irq(int irq) { + int ret; + if (!irq || irq > NR_IRQS) return -EINVAL; - set_irq_status(irq); + ret = lkl_cpu_try_run_irq(irq); + if (ret <= 0) + return ret; + + /* + * Since this can be called from Linux context (e.g. lkl_trigger_irq -> + * IRQ -> softirq -> lkl_trigger_irq) make sure we are actually allowed + * to run irqs at this point + */ + if (!irqs_enabled) { + set_irq_pending(irq); + lkl_cpu_put(); + return 0; + } + + run_irq(irq); - wakeup_cpu(); + lkl_cpu_put(); return 0; } @@ -81,9 +112,7 @@ static inline void for_each_bit(unsigned long word, void (*f)(int, int), int j) static inline void deliver_irq(int bit, int index) { - irq_enter(); - generic_handle_irq(index * IRQ_STATUS_BITS + bit); - irq_exit(); + run_irq(index * IRQ_STATUS_BITS + bit); } static inline void check_irq_status(int i, int unused) @@ -91,7 +120,7 @@ static inline void check_irq_status(int i, int unused) for_each_bit(test_and_clear_irq_status(i), deliver_irq, i); } -static void run_irqs(void) +void run_irqs(void) { for_each_bit(test_and_clear_irq_index_status(), check_irq_status, 0); } @@ -128,8 +157,6 @@ void lkl_put_irq(int i, const char *user) irqs[i].user = NULL; } -static bool irqs_enabled; - unsigned long arch_local_save_flags(void) { return irqs_enabled; diff --git a/arch/lkl/kernel/setup.c b/arch/lkl/kernel/setup.c index b329face1dbd0d..59add61bfb0eb6 100644 --- a/arch/lkl/kernel/setup.c +++ b/arch/lkl/kernel/setup.c @@ -3,21 +3,19 @@ #include #include #include -#include #include #include #include +#include #include #include #include #include +#include struct lkl_host_operations *lkl_ops; static char cmd_line[COMMAND_LINE_SIZE]; -static void *idle_sem; static void *init_sem; -static void *halt_sem; -static bool halt; static int is_running; void (*pm_power_off)(void) = NULL; static unsigned long mem_size; @@ -37,9 +35,8 @@ void __init setup_arch(char **cl) static void __init lkl_run_kernel(void *arg) { - /* Nobody will ever join us */ - lkl_ops->thread_detach(); - + threads_init(); + lkl_cpu_get(); start_kernel(); } @@ -63,33 +60,28 @@ int __init lkl_start_kernel(struct lkl_host_operations *ops, memcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); - ret = threads_init(); - if (ret) - return ret; - init_sem = lkl_ops->sem_alloc(0); if (!init_sem) return -ENOMEM; - idle_sem = lkl_ops->sem_alloc(0); - if (!idle_sem) { - ret = -ENOMEM; + ret = lkl_cpu_init(); + if (ret) goto out_free_init_sem; - } ret = lkl_ops->thread_create(lkl_run_kernel, NULL); if (!ret) { ret = -ENOMEM; - goto out_free_idle_sem; + goto out_free_init_sem; } lkl_ops->sem_down(init_sem); + current_thread_info()->tid = lkl_ops->thread_self(); + lkl_cpu_change_owner(current_thread_info()->tid); + lkl_cpu_put(); is_running = 1; - return 0; -out_free_idle_sem: - lkl_ops->sem_free(idle_sem); + return 0; out_free_init_sem: lkl_ops->sem_free(init_sem); @@ -104,7 +96,7 @@ int lkl_is_running(void) void machine_halt(void) { - halt = true; + lkl_cpu_shutdown(); } void machine_power_off(void) @@ -122,52 +114,23 @@ long lkl_sys_halt(void) long err; long params[6] = { 0, }; - halt_sem = lkl_ops->sem_alloc(0); - if (!halt_sem) - return -ENOMEM; - err = lkl_syscall(__NR_reboot, params); - if (err < 0) { - lkl_ops->sem_free(halt_sem); + if (err < 0) return err; - } - - lkl_ops->sem_down(halt_sem); - lkl_ops->sem_free(halt_sem); - lkl_ops->sem_free(idle_sem); - lkl_ops->sem_free(init_sem); + is_running = false; - free_initial_syscall_thread(); + lkl_cpu_wait_shutdown(); + syscalls_cleanup(); + threads_cleanup(); + /* Shutdown the clockevents source. */ + tick_suspend_local(); free_mem(); return 0; } -void arch_cpu_idle(void) -{ - if (halt) { - threads_cleanup(); - - /* Shutdown the clockevents source. */ - tick_suspend_local(); - - is_running = false; - lkl_ops->sem_up(halt_sem); - lkl_ops->thread_exit(); - } - - lkl_ops->sem_down(idle_sem); - - local_irq_enable(); -} - -void wakeup_cpu(void) -{ - if (!halt) - lkl_ops->sem_up(idle_sem); -} static int lkl_run_init(struct linux_binprm *bprm); @@ -192,9 +155,12 @@ static int lkl_run_init(struct linux_binprm *bprm) set_binfmt(&lkl_run_init_binfmt); - initial_syscall_thread(init_sem); + init_pid_ns.child_reaper = 0; + + syscalls_init(); - kernel_halt(); + lkl_ops->sem_up(init_sem); + lkl_ops->thread_exit(); return 0; } diff --git a/arch/lkl/kernel/syscalls.c b/arch/lkl/kernel/syscalls.c index 78b6a92973f864..790d6c9d4c42c8 100644 --- a/arch/lkl/kernel/syscalls.c +++ b/arch/lkl/kernel/syscalls.c @@ -13,9 +13,8 @@ #include #include #include +#include -struct syscall_thread_data; -static asmlinkage long sys_create_syscall_thread(struct syscall_thread_data *); static asmlinkage long sys_virtio_mmio_device_add(long base, long size, unsigned int irq); @@ -33,400 +32,126 @@ syscall_handler_t syscall_table[__NR_syscalls] = { #endif }; -struct syscall { - long no, *params, ret; -}; - -static struct syscall_thread_data { - struct syscall *s; - void *mutex, *completion; - int irq; - /* to be accessed from Linux context only */ - wait_queue_head_t wqh; - struct list_head list; - bool stop; - struct completion stopped; -} default_syscall_thread_data; - -static LIST_HEAD(syscall_threads); - -static struct syscall *dequeue_syscall(struct syscall_thread_data *data) -{ - - return (struct syscall *)__sync_fetch_and_and((long *)&data->s, 0); -} - -static long run_syscall(struct syscall *s) +static long run_syscall(long no, long *params) { long ret; - if (s->no < 0 || s->no >= __NR_syscalls) - ret = -ENOSYS; - else { - ret = syscall_table[s->no](s->params[0], s->params[1], - s->params[2], s->params[3], - s->params[4], s->params[5]); - } - s->ret = ret; + if (no < 0 || no >= __NR_syscalls) + return -ENOSYS; + + ret = syscall_table[no](params[0], params[1], params[2], params[3], + params[4], params[5]); task_work_run(); return ret; } -static irqreturn_t syscall_irq_handler(int irq, void *dev_id) -{ - struct syscall_thread_data *data = (struct syscall_thread_data *)dev_id; - - wake_up(&data->wqh); - return IRQ_HANDLED; -} +#define CLONE_FLAGS (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_THREAD | \ + CLONE_SIGHAND | SIGCHLD) -static void cleanup_syscall_threads(void); +static int host_task_id; +static struct task_struct *host0; -int syscall_thread(void *_data) +static int new_host_task(struct task_struct **task) { - struct syscall_thread_data *data; - struct syscall *s; - int ret; - static int count; - - data = (struct syscall_thread_data *)_data; - init_waitqueue_head(&data->wqh); - list_add(&data->list, &syscall_threads); - init_completion(&data->stopped); - - snprintf(current->comm, sizeof(current->comm), "ksyscalld%d", count++); - - data->irq = lkl_get_free_irq("syscall"); - if (data->irq < 0) { - pr_err("lkl: %s: failed to allocate irq: %d\n", __func__, - data->irq); - return data->irq; - } - - ret = request_irq(data->irq, syscall_irq_handler, 0, current->comm, - data); - if (ret) { - pr_err("lkl: %s: failed to request irq %d: %d\n", __func__, - data->irq, ret); - lkl_put_irq(data->irq, "syscall"); - data->irq = -1; - return ret; - } - - pr_info("lkl: syscall thread %s initialized (irq%d)\n", current->comm, - data->irq); - - /* system call thread is ready */ - lkl_ops->sem_up(data->completion); - - while (1) { - wait_event(data->wqh, - (s = dequeue_syscall(data)) != NULL || data->stop); - - if (data->stop || s->no == __NR_reboot) - break; - - run_syscall(s); - - lkl_ops->sem_up(data->completion); - } + pid_t pid; - if (data == &default_syscall_thread_data) - cleanup_syscall_threads(); + switch_to_host_task(host0); - pr_info("lkl: exiting syscall thread %s\n", current->comm); + pid = kernel_thread(NULL, NULL, CLONE_FLAGS); + if (pid < 0) + return pid; - list_del(&data->list); + rcu_read_lock(); + *task = find_task_by_pid_ns(pid, &init_pid_ns); + rcu_read_unlock(); - free_irq(data->irq, data); - lkl_put_irq(data->irq, "syscall"); + host_task_id++; - if (data->stop) { - complete(&data->stopped); - } else { - s->ret = 0; - lkl_ops->sem_up(data->completion); - } + snprintf((*task)->comm, sizeof((*task)->comm), "host%d", host_task_id); return 0; } -static unsigned int syscall_thread_data_key; - -static int syscall_thread_data_init(struct syscall_thread_data *data, - void *completion) +static void del_host_task(void *arg) { - data->mutex = lkl_ops->sem_alloc(1); - if (!data->mutex) - return -ENOMEM; + struct task_struct *task = (struct task_struct *)arg; - if (!completion) - data->completion = lkl_ops->sem_alloc(0); - else - data->completion = completion; - if (!data->completion) { - lkl_ops->sem_free(data->mutex); - data->mutex = NULL; - return -ENOMEM; - } + if (lkl_cpu_get() < 0) + return; - return 0; + switch_to_host_task(task); + host_task_id--; + thread_set_sched_exit(); + do_exit(0); } -static long __lkl_syscall(struct syscall_thread_data *data, long no, - long *params) -{ - struct syscall s; - - s.no = no; - s.params = params; - - lkl_ops->sem_down(data->mutex); - data->s = &s; - lkl_trigger_irq(data->irq); - lkl_ops->sem_down(data->completion); - lkl_ops->sem_up(data->mutex); +static unsigned int task_key; - return s.ret; -} - -static struct syscall_thread_data *__lkl_create_syscall_thread(void) +long lkl_syscall(long no, long *params) { - struct syscall_thread_data *data; - long params[6], ret; - - if (!lkl_ops->tls_set) - return ERR_PTR(-ENOTSUPP); - - data = lkl_ops->mem_alloc(sizeof(*data)); - if (!data) - return ERR_PTR(-ENOMEM); - - memset(data, 0, sizeof(*data)); - - ret = syscall_thread_data_init(data, NULL); - if (ret < 0) - goto out_free; - - ret = lkl_ops->tls_set(syscall_thread_data_key, data); - if (ret < 0) - goto out_free; + struct task_struct *task = host0; + static int count; + long ret; - params[0] = (long)data; - ret = __lkl_syscall(&default_syscall_thread_data, - __NR_create_syscall_thread, params); + ret = lkl_cpu_get(); if (ret < 0) - goto out_free; - - lkl_ops->sem_down(data->completion); - - return data; - -out_free: - lkl_ops->sem_free(data->completion); - lkl_ops->sem_free(data->mutex); - lkl_ops->mem_free(data); - - return ERR_PTR(ret); -} - -int lkl_create_syscall_thread(void) -{ - struct syscall_thread_data *data = __lkl_create_syscall_thread(); - - if (IS_ERR(data)) - return PTR_ERR(data); - return 0; -} - -static int kernel_stop_syscall_thread(struct syscall_thread_data *data) -{ - data->stop = true; - wake_up(&data->wqh); - wait_for_completion(&data->stopped); - - return 0; -} - -static int __lkl_stop_syscall_thread(struct syscall_thread_data *data, - bool host) -{ - long ret, params[6]; - - if (host) - ret = __lkl_syscall(data, __NR_reboot, params); - else - ret = kernel_stop_syscall_thread(data); - if (ret) return ret; - lkl_ops->sem_free(data->completion); - lkl_ops->sem_free(data->mutex); - lkl_ops->mem_free(data); - - return 0; -} + count++; -int lkl_stop_syscall_thread(void) -{ - struct syscall_thread_data *data = NULL; - int ret; - - if (lkl_ops->tls_get) - data = lkl_ops->tls_get(syscall_thread_data_key); - if (!data) - return -EINVAL; - - ret = __lkl_stop_syscall_thread(data, true); - if (!ret && lkl_ops->tls_set) - lkl_ops->tls_set(syscall_thread_data_key, NULL); - return ret; -} - -static int auto_syscall_threads = true; -static int __init setup_auto_syscall_threads(char *str) -{ - get_option (&str, &auto_syscall_threads); - - return 1; -} -__setup("lkl_auto_syscall_threads=", setup_auto_syscall_threads); - - -long lkl_syscall(long no, long *params) -{ - struct syscall_thread_data *data = NULL; - - if (auto_syscall_threads && lkl_ops->tls_get) { - data = lkl_ops->tls_get(syscall_thread_data_key); - if (!data) { - data = __lkl_create_syscall_thread(); - if (!data) - lkl_puts("failed to create syscall thread\n"); + if (lkl_ops->tls_get) { + task = lkl_ops->tls_get(task_key); + if (!task) { + ret = new_host_task(&task); + if (ret) + goto out; + lkl_ops->tls_set(task_key, task); } } - if (!data || no == __NR_reboot) - data = &default_syscall_thread_data; - return __lkl_syscall(data, no, params); -} + switch_to_host_task(task); -static asmlinkage long -sys_create_syscall_thread(struct syscall_thread_data *data) -{ - pid_t pid; + ret = run_syscall(no, params); - pid = kernel_thread(syscall_thread, data, CLONE_VM | CLONE_FS | - CLONE_FILES | CLONE_THREAD | CLONE_SIGHAND | SIGCHLD); - if (pid < 0) - return pid; - - return 0; -} - - -/* - * A synchronization algorithm between cleanup_syscall_threads (which terminates - * all remaining syscall threads) and destructors functions (which frees a - * syscall thread as soon as the associated host thread terminates) is required - * since destructor functions run in host context and is not subject to kernel - * scheduling. - * - * An atomic counter is used to count the number of running destructor functions - * and allows the cleanup function to wait for the running destructor functions - * to complete. - * - * The cleanup functions adds MAX_SYSCALL_THREADS to this counter and this - * allows the destructor functions to check if the cleanup process has started - * and abort execution. This prevents "late" destructors from trying to free the - * syscall threads. - * - * This algorithm assumes that we never have more the MAX_SYSCALL_THREADS - * running. - */ -#define MAX_SYSCALL_THREADS 1000000 -static unsigned int destrs; - -/* - * This is called when the host thread terminates if auto_syscall_threads is - * enabled. We use it to remove the associated kernel syscall thread since it is - * not going to be used anymore. - * - * Note that this run in host context, not kernel context. - * - * To avoid races between the destructor and lkl_sys_halt we announce that a - * destructor is running and also check to see if lkl_sys_halt is running, in - * which case we bail out - the kernel thread is going to be / has been stopped - * by lkl_sys_halt. - */ -static void syscall_thread_destructor(void *_data) -{ - struct syscall_thread_data *data = _data; - - if (!data) - return; - - if (__sync_fetch_and_add(&destrs, 1) < MAX_SYSCALL_THREADS) - __lkl_stop_syscall_thread(data, true); - __sync_fetch_and_sub(&destrs, 1); -} - -static void cleanup_syscall_threads(void) -{ - struct syscall_thread_data *i = NULL, *aux; - - /* announce destructors that we are stopping */ - __sync_fetch_and_add(&destrs, MAX_SYSCALL_THREADS); + if (count > 1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!thread_set_sched_jmp()) + schedule(); + count--; + return ret; + } - /* wait for any pending destructors to complete */ - while (__sync_fetch_and_add(&destrs, 0) > MAX_SYSCALL_THREADS) - schedule_timeout(1); +out: + count--; + lkl_cpu_put(); - /* no more destructors, we can safely remove the remaining threads */ - list_for_each_entry_safe(i, aux, &syscall_threads, list) { - if (i == &default_syscall_thread_data) - continue; - __lkl_stop_syscall_thread(i, false); - } + return ret; } -int initial_syscall_thread(void *sem) +int syscalls_init(void) { - void (*destr)(void *) = NULL; int ret = 0; - if (auto_syscall_threads) - destr = syscall_thread_destructor; - - if (lkl_ops->tls_alloc) - ret = lkl_ops->tls_alloc(&syscall_thread_data_key, destr); - if (ret) - return ret; - - init_pid_ns.child_reaper = 0; - - ret = syscall_thread_data_init(&default_syscall_thread_data, sem); - if (ret) - goto out; - - ret = syscall_thread(&default_syscall_thread_data); - -out: - if (lkl_ops->tls_free) - lkl_ops->tls_free(syscall_thread_data_key); + snprintf(current->comm, sizeof(current->comm), "host0"); + set_thread_flag(TIF_HOST_THREAD); + host0 = current; + if (lkl_ops->tls_alloc) { + ret = lkl_ops->tls_alloc(&task_key, del_host_task); + if (ret) + return ret; + } return ret; } -void free_initial_syscall_thread(void) +void syscalls_cleanup(void) { - /* NB: .completion is freed in lkl_sys_halt, because it is - * allocated in the LKL init routine. */ - lkl_ops->sem_free(default_syscall_thread_data.mutex); + if (lkl_ops->tls_free) + lkl_ops->tls_free(task_key); } SYSCALL_DEFINE3(virtio_mmio_device_add, long, base, long, size, unsigned int, diff --git a/arch/lkl/kernel/threads.c b/arch/lkl/kernel/threads.c index f8b673e838c0ae..049344b585209a 100644 --- a/arch/lkl/kernel/threads.c +++ b/arch/lkl/kernel/threads.c @@ -2,6 +2,7 @@ #include #include #include +#include static volatile int threads_counter; @@ -52,9 +53,11 @@ void setup_thread_stack(struct task_struct *p, struct task_struct *org) static void kill_thread(struct thread_info *ti) { - ti->dead = true; - lkl_ops->sem_up(ti->sched_sem); - lkl_ops->thread_join(ti->tid); + if (!test_ti_thread_flag(ti, TIF_HOST_THREAD)) { + ti->dead = true; + lkl_ops->sem_up(ti->sched_sem); + lkl_ops->thread_join(ti->tid); + } lkl_ops->sem_free(ti->sched_sem); } @@ -69,30 +72,41 @@ void free_thread_stack(unsigned long *stack) struct thread_info *_current_thread_info = &init_thread_union.thread_info; +/* + * schedule() expects the return of this function to be the task that we + * switched away from. Returning prev is not going to work because we are + * actually going to return the previous taks that was scheduled before the + * task we are going to wake up, and not the current task, e.g.: + * + * swapper -> init: saved prev on swapper stack is swapper + * init -> ksoftirqd0: saved prev on init stack is init + * ksoftirqd0 -> swapper: returned prev is swapper + */ +static struct task_struct *abs_prev = &init_task; + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_info *_prev = task_thread_info(prev); struct thread_info *_next = task_thread_info(next); - /* - * schedule() expects the return of this function to be the task that we - * switched away from. Returning prev is not going to work because we - * are actually going to return the previous taks that was scheduled - * before the task we are going to wake up, and not the current task, - * e.g.: - * - * swapper -> init: saved prev on swapper stack is swapper - * init -> ksoftirqd0: saved prev on init stack is init - * ksoftirqd0 -> swapper: returned prev is swapper - */ - static struct task_struct *abs_prev = &init_task; + unsigned long _prev_flags = _prev->flags; _current_thread_info = task_thread_info(next); _next->prev_sched = prev; abs_prev = prev; + BUG_ON(!_next->tid); + lkl_cpu_change_owner(_next->tid); + lkl_ops->sem_up(_next->sched_sem); - lkl_ops->sem_down(_prev->sched_sem); + if (test_bit(TIF_SCHED_JB, &_prev_flags)) { + clear_ti_thread_flag(_prev, TIF_SCHED_JB); + lkl_ops->jmp_buf_longjmp(&_prev->sched_jb, 1); + } else if (test_bit(TIF_SCHED_EXIT, &_prev_flags)) { + lkl_ops->thread_exit(); + } else { + lkl_ops->sem_down(_prev->sched_sem); + } if (_prev->dead) { __sync_fetch_and_sub(&threads_counter, 1); @@ -102,6 +116,30 @@ struct task_struct *__switch_to(struct task_struct *prev, return abs_prev; } +void switch_to_host_task(struct task_struct *task) +{ + if (current == task) + return; + + if (WARN_ON(!test_tsk_thread_flag(task, TIF_HOST_THREAD))) + return; + + task_thread_info(task)->tid = lkl_ops->thread_self(); + + wake_up_process(task); + if (test_thread_flag(TIF_HOST_THREAD)) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!thread_set_sched_jmp()) + schedule(); + } else { + lkl_cpu_wakeup(); + lkl_cpu_put(); + } + + lkl_ops->sem_down(task_thread_info(task)->sched_sem); + schedule_tail(abs_prev); +} + struct thread_bootstrap_arg { struct thread_info *ti; int (*f)(void *); @@ -130,6 +168,11 @@ int copy_thread(unsigned long clone_flags, unsigned long esp, struct thread_info *ti = task_thread_info(p); struct thread_bootstrap_arg *tba; + if (!esp) { + set_ti_thread_flag(ti, TIF_HOST_THREAD); + return 0; + } + tba = kmalloc(sizeof(*tba), GFP_KERNEL); if (!tba) return -ENOMEM; @@ -153,26 +196,20 @@ void show_stack(struct task_struct *task, unsigned long *esp) { } -static inline void pr_early(const char *str) -{ - if (lkl_ops->print) - lkl_ops->print(str, strlen(str)); -} - /** * This is called before the kernel initializes, so no kernel calls (including * printk) can't be made yet. */ -int threads_init(void) +void threads_init(void) { - struct thread_info *ti = &init_thread_union.thread_info; int ret; + struct thread_info *ti = &init_thread_union.thread_info; ret = init_ti(ti); if (ret < 0) - pr_early("lkl: failed to allocate init schedule semaphore\n"); + lkl_printf("lkl: failed to allocate init schedule semaphore\n"); - return ret; + ti->tid = lkl_ops->thread_self(); } void threads_cleanup(void) diff --git a/tools/lkl/include/lkl.h b/tools/lkl/include/lkl.h index 8026a9c70b5597..eaac8f2eb4aa1c 100644 --- a/tools/lkl/include/lkl.h +++ b/tools/lkl/include/lkl.h @@ -340,24 +340,6 @@ void lkl_netdev_free(struct lkl_netdev *nd); */ int lkl_netdev_get_ifindex(int id); -/** - * lkl_create_syscall_thread - create an additional system call thread - * - * Create a new system call thread. All subsequent system calls issued from this - * host thread are queued to the newly created system call thread. - * - * System call threads must be stopped up by calling @lkl_stop_syscall_thread - * before @lkl_halt is called. - */ -int lkl_create_syscall_thread(void); - -/** - * lkl_stop_syscall_thread - stop the associated system call thread - * - * Stop the system call thread associated with this host thread, if any. - */ -int lkl_stop_syscall_thread(void); - /** * lkl_netdev_tap_create - create TAP net_device for the virtio net backend * diff --git a/tools/lkl/lib/Build b/tools/lkl/lib/Build index cf420db1f380c4..468878b0049fa6 100644 --- a/tools/lkl/lib/Build +++ b/tools/lkl/lib/Build @@ -5,6 +5,7 @@ CFLAGS_nt-host.o += -D_WIN32_WINNT=0x0600 lkl-y += fs.o lkl-y += iomem.o lkl-y += net.o +lkl-y += jmp_buf.o lkl-$(CONFIG_AUTO_LKL_POSIX_HOST) += posix-host.o lkl-$(CONFIG_AUTO_LKL_NT_HOST) += nt-host.o lkl-y += utils.o diff --git a/tools/lkl/lib/fs.c b/tools/lkl/lib/fs.c index c975b4b6d6df38..cfdea62d543a7e 100644 --- a/tools/lkl/lib/fs.c +++ b/tools/lkl/lib/fs.c @@ -72,7 +72,7 @@ int lkl_get_virtio_blkdev(int disk_id, uint32_t *pdevid) char sysfs_path[LKL_PATH_MAX]; int sysfs_path_len = 0; char buf[16] = { 0, }; - long fd, ret; + long fd, ret = 0; int major, minor; int opendir_ret; char *virtio_name = NULL; @@ -138,13 +138,15 @@ int lkl_get_virtio_blkdev(int disk_id, uint32_t *pdevid) } device_id = new_encode_dev(major, minor); + ret = 0; out_close: lkl_sys_close(fd); - *pdevid = device_id; + if (!ret) + *pdevid = device_id; - return 0; + return ret; } long lkl_mount_dev(unsigned int disk_id, const char *fs_type, int flags, diff --git a/tools/lkl/lib/jmp_buf.c b/tools/lkl/lib/jmp_buf.c new file mode 100644 index 00000000000000..6fd99f606ae041 --- /dev/null +++ b/tools/lkl/lib/jmp_buf.c @@ -0,0 +1,12 @@ +#include +#include + +int jmp_buf_set(struct lkl_jmp_buf *jmpb) +{ + return setjmp(*((jmp_buf *)jmpb->buf)); +} + +void jmp_buf_longjmp(struct lkl_jmp_buf *jmpb, int val) +{ + longjmp(*((jmp_buf *)jmpb->buf), val); +} diff --git a/tools/lkl/lib/jmp_buf.h b/tools/lkl/lib/jmp_buf.h new file mode 100644 index 00000000000000..59d7c7c78e7c5c --- /dev/null +++ b/tools/lkl/lib/jmp_buf.h @@ -0,0 +1,7 @@ +#ifndef _LKL_LIB_JMP_BUF_H +#define _LKL_LIB_JMP_BUF_H + +int jmp_buf_set(struct lkl_jmp_buf *jmpb); +void jmp_buf_longjmp(struct lkl_jmp_buf *jmpb, int val); + +#endif diff --git a/tools/lkl/lib/nt-host.c b/tools/lkl/lib/nt-host.c index b4859b89c5ad2a..e34f01c0d395c1 100644 --- a/tools/lkl/lib/nt-host.c +++ b/tools/lkl/lib/nt-host.c @@ -4,11 +4,13 @@ #undef s_addr #include #include "iomem.h" +#include "jmp_buf.h" #define DIFF_1601_TO_1970_IN_100NS (11644473600L * 10000000L) struct lkl_mutex { - HANDLE mutex; + int recursive; + HANDLE handle; }; struct lkl_sem { @@ -39,37 +41,48 @@ static void sem_free(struct lkl_sem *sem) free(sem); } -static struct lkl_mutex *mutex_alloc(void) +static struct lkl_mutex *mutex_alloc(int recursive) { struct lkl_mutex *_mutex = malloc(sizeof(struct lkl_mutex)); if (!_mutex) return NULL; - _mutex->mutex = CreateMutex(0, FALSE, 0); + if (recursive) + _mutex->handle = CreateMutex(0, FALSE, 0); + else + _mutex->handle = CreateSemaphore(NULL, 1, 100, NULL); + _mutex->recursive = recursive; return _mutex; } static void mutex_lock(struct lkl_mutex *mutex) { - WaitForSingleObject(mutex->mutex, INFINITE); + WaitForSingleObject(mutex->handle, INFINITE); } static void mutex_unlock(struct lkl_mutex *_mutex) { - ReleaseMutex(_mutex->mutex); + if (_mutex->recursive) + ReleaseMutex(_mutex->handle); + else + ReleaseSemaphore(_mutex->handle, 1, NULL); } static void mutex_free(struct lkl_mutex *_mutex) { - CloseHandle(_mutex->mutex); + CloseHandle(_mutex->handle); free(_mutex); } static lkl_thread_t thread_create(void (*fn)(void *), void *arg) { DWORD WINAPI (*win_fn)(LPVOID arg) = (DWORD WINAPI (*)(LPVOID))fn; + HANDLE h = CreateThread(NULL, 0, win_fn, arg, 0, NULL); - return (lkl_thread_t)CreateThread(NULL, 0, win_fn, arg, 0, NULL); + if (!h) + return 0; + + return GetThreadId(h); } static void thread_detach(void) @@ -83,9 +96,30 @@ static void thread_exit(void) static int thread_join(lkl_thread_t tid) { - /* TODO: error handling */ - WaitForSingleObject((void *)tid, INFINITE); - return 0; + int ret; + HANDLE *h; + + h = OpenThread(SYNCHRONIZE, FALSE, tid); + if (!h) + lkl_printf("%s: can't get thread handle\n", __func__); + + ret = WaitForSingleObject(h, INFINITE); + if (ret) + lkl_printf("%s: %d\n", __func__, ret); + + CloseHandle(h); + + return ret ? -1 : 0; +} + +static lkl_thread_t thread_self(void) +{ + return GetThreadId(GetCurrentThread()); +} + +static int thread_equal(lkl_thread_t a, lkl_thread_t b) +{ + return a == b; } static int tls_alloc(unsigned int *key, void (*destructor)(void *)) @@ -215,6 +249,8 @@ struct lkl_host_operations lkl_host_ops = { .thread_detach = thread_detach, .thread_exit = thread_exit, .thread_join = thread_join, + .thread_self = thread_self, + .thread_equal = thread_equal, .sem_alloc = sem_alloc, .sem_free = sem_free, .sem_up = sem_up, @@ -238,6 +274,8 @@ struct lkl_host_operations lkl_host_ops = { .iomem_access = lkl_iomem_access, .virtio_devices = lkl_virtio_devs, .gettid = gettid, + .jmp_buf_set = jmp_buf_set, + .jmp_buf_longjmp = jmp_buf_longjmp, }; int handle_get_capacity(struct lkl_disk disk, unsigned long long *res) diff --git a/tools/lkl/lib/posix-host.c b/tools/lkl/lib/posix-host.c index 5976465548ac4a..9300aa0b1b6f6d 100644 --- a/tools/lkl/lib/posix-host.c +++ b/tools/lkl/lib/posix-host.c @@ -16,6 +16,7 @@ #include #include #include "iomem.h" +#include "jmp_buf.h" /* Let's see if the host has semaphore.h */ #include @@ -132,7 +133,7 @@ static void sem_down(struct lkl_sem *sem) #endif /* _POSIX_SEMAPHORES */ } -static struct lkl_mutex *mutex_alloc(void) +static struct lkl_mutex *mutex_alloc(int recursive) { struct lkl_mutex *_mutex = malloc(sizeof(struct lkl_mutex)); pthread_mutex_t *mutex = NULL; @@ -148,9 +149,13 @@ static struct lkl_mutex *mutex_alloc(void) * but has some overhead, so we provide an option to turn it * off. */ #ifdef DEBUG - WARN_PTHREAD(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); + if (!recursive) + WARN_PTHREAD(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); #endif /* DEBUG */ + if (recursive) + WARN_PTHREAD(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)); + WARN_PTHREAD(pthread_mutex_init(mutex, &attr)); return _mutex; @@ -201,6 +206,16 @@ static int thread_join(lkl_thread_t tid) return 0; } +static lkl_thread_t thread_self(void) +{ + return (lkl_thread_t)pthread_self(); +} + +static int thread_equal(lkl_thread_t a, lkl_thread_t b) +{ + return pthread_equal(a, b); +} + static int tls_alloc(unsigned int *key, void (*destructor)(void *)) { return pthread_key_create((pthread_key_t *)key, destructor); @@ -289,6 +304,8 @@ struct lkl_host_operations lkl_host_ops = { .thread_detach = thread_detach, .thread_exit = thread_exit, .thread_join = thread_join, + .thread_self = thread_self, + .thread_equal = thread_equal, .sem_alloc = sem_alloc, .sem_free = sem_free, .sem_up = sem_up, @@ -312,6 +329,8 @@ struct lkl_host_operations lkl_host_ops = { .iomem_access = lkl_iomem_access, .virtio_devices = lkl_virtio_devs, .gettid = _gettid, + .jmp_buf_set = jmp_buf_set, + .jmp_buf_longjmp = jmp_buf_longjmp, }; static int fd_get_capacity(struct lkl_disk disk, unsigned long long *res) diff --git a/tools/lkl/lib/utils.c b/tools/lkl/lib/utils.c index 7d2477be9a3837..57cffe411fc2d0 100644 --- a/tools/lkl/lib/utils.c +++ b/tools/lkl/lib/utils.c @@ -158,31 +158,50 @@ void lkl_perror(char *msg, int err) fprintf(stderr, "%s: %s\n", msg, err_msg); } -int lkl_printf(const char *fmt, ...) +static int lkl_vprintf(const char *fmt, va_list args) { - char *buffer; - va_list args, copy; int n; + char *buffer; + va_list copy; if (!lkl_host_ops.print) return 0; - va_start(args, fmt); va_copy(copy, args); n = vsnprintf(NULL, 0, fmt, copy); va_end(copy); buffer = lkl_host_ops.mem_alloc(n + 1); - if (!buffer) { - va_end(args); - return 0; - } + if (!buffer) + return -1; + vsnprintf(buffer, n + 1, fmt, args); - va_end(args); lkl_host_ops.print(buffer, n); - lkl_host_ops.mem_free(buffer); return n; } + +int lkl_printf(const char *fmt, ...) +{ + int n; + va_list args; + + va_start(args, fmt); + n = lkl_vprintf(fmt, args); + va_end(args); + + return n; +} + +void lkl_bug(const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + lkl_vprintf(fmt, args); + va_end(args); + + lkl_host_ops.panic(); +} diff --git a/tools/lkl/lib/virtio_net.c b/tools/lkl/lib/virtio_net.c index 3fd9e94565cd13..a06c2135ad2c62 100644 --- a/tools/lkl/lib/virtio_net.c +++ b/tools/lkl/lib/virtio_net.c @@ -197,7 +197,7 @@ static struct lkl_mutex **init_queue_locks(int num_queues) return NULL; for (i = 0; i < num_queues; i++) { - ret[i] = lkl_host_ops.mutex_alloc(); + ret[i] = lkl_host_ops.mutex_alloc(1); if (!ret[i]) { free_queue_locks(ret, i); return NULL; diff --git a/tools/lkl/scripts/checkpatch.sh b/tools/lkl/scripts/checkpatch.sh index e5483ecfd9d813..1941f72a372ae1 100755 --- a/tools/lkl/scripts/checkpatch.sh +++ b/tools/lkl/scripts/checkpatch.sh @@ -23,10 +23,11 @@ tmp=`mktemp -d` for c in `git log --no-merges --pretty=format:%h HEAD ^$origin_master ^$tag`; do git format-patch -1 -o $tmp $c - ./scripts/checkpatch.pl --ignore FILE_PATH_CHANGES $tmp/*.patch - rm $tmp/*.patch done +./scripts/checkpatch.pl --ignore FILE_PATH_CHANGES $tmp/*.patch +rm $tmp/*.patch + # checkpatch.pl does not know how to deal with 3 way diffs which would # be useful to check the conflict resolutions during merges... #for c in `git log --merges --pretty=format:%h HEAD ^$origin_master ^$tag`; do diff --git a/tools/lkl/tests/boot.c b/tools/lkl/tests/boot.c index 8b2376415d748d..047b2f257e541e 100644 --- a/tools/lkl/tests/boot.c +++ b/tools/lkl/tests/boot.c @@ -678,11 +678,20 @@ static int test_mutex(char *str, int len) * warn us on CI if we've made bad memory accesses. */ - struct lkl_mutex *mutex = lkl_host_ops.mutex_alloc(); + struct lkl_mutex *mutex; + + mutex = lkl_host_ops.mutex_alloc(0); lkl_host_ops.mutex_lock(mutex); lkl_host_ops.mutex_unlock(mutex); lkl_host_ops.mutex_free(mutex); + mutex = lkl_host_ops.mutex_alloc(1); + lkl_host_ops.mutex_lock(mutex); + lkl_host_ops.mutex_lock(mutex); + lkl_host_ops.mutex_unlock(mutex); + lkl_host_ops.mutex_unlock(mutex); + lkl_host_ops.mutex_free(mutex); + snprintf(str, len, "%ld", ret); return ret; diff --git a/tools/lkl/tests/lklfuse.sh b/tools/lkl/tests/lklfuse.sh index f050ae2c13c8e8..9131d5b199915e 100755 --- a/tools/lkl/tests/lklfuse.sh +++ b/tools/lkl/tests/lklfuse.sh @@ -64,8 +64,8 @@ if which stress-ng; then exclude="chmod,filename,link,mknod,symlink,xattr" fi stress-ng --class filesystem --all 0 --timeout 10 \ - --exclude fiemap,$exclude --fallocate-bytes 50m \ - --sync-file-bytes 50m + --exclude fiemap,$exclude --fallocate-bytes 10m \ + --sync-file-bytes 10m else echo "could not find stress-ng, skipping" fi