Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

lkl: Direct irq and fix direct syscall degration #257

Merged
merged 2 commits into from
Nov 3, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion arch/lkl/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,11 @@ int lkl_cpu_try_run_irq(int irq);
int lkl_cpu_init(void);
void lkl_cpu_shutdown(void);
void lkl_cpu_wait_shutdown(void);
void lkl_cpu_wakeup(void);
void lkl_cpu_wakeup_idle(void);
void lkl_cpu_change_owner(lkl_thread_t owner);
void lkl_cpu_set_irqs_pending(void);
void lkl_idle_tail_schedule(void);
int lkl_cpu_idle_pending(void);
extern void cpu_idle_loop(void);

#endif /* _ASM_LKL_CPU_H */
1 change: 1 addition & 0 deletions arch/lkl/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ void threads_cleanup(void);
#define TIF_SCHED_JB 7
#define TIF_SCHED_EXIT 8
#define TIF_HOST_THREAD 9
#define TIF_IDLE 10

static inline void set_ti_thread_flag(struct thread_info *ti, int flag);

Expand Down
95 changes: 82 additions & 13 deletions arch/lkl/kernel/cpu.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <asm/host_ops.h>
#include <asm/cpu.h>
#include <asm/thread_info.h>
Expand Down Expand Up @@ -50,6 +53,10 @@ struct lkl_cpu {
struct lkl_sem *sem;
/* semaphore for the idle thread */
struct lkl_sem *idle_sem;
/* if the idle thread is pending */
bool idle_pending;
/* jmp_buf used for idle thread to restart */
struct lkl_jmp_buf idle_jb;
/* semaphore used for shutdown */
struct lkl_sem *shutdown_sem;
} cpu;
Expand Down Expand Up @@ -126,18 +133,19 @@ void lkl_cpu_put(void)
lkl_ops->mutex_lock(cpu.lock);
}

if (need_resched()) {
if (need_resched() && cpu.count == 1) {
if (in_interrupt())
lkl_bug("%s: in interrupt\n", __func__);
lkl_ops->mutex_unlock(cpu.lock);
if (test_thread_flag(TIF_HOST_THREAD)) {
if (cpu.count == 1 && !in_interrupt()) {
lkl_ops->mutex_unlock(cpu.lock);
set_current_state(TASK_UNINTERRUPTIBLE);
if (!thread_set_sched_jmp())
schedule();
return;
}
set_current_state(TASK_UNINTERRUPTIBLE);
if (!thread_set_sched_jmp())
schedule();
} else {
lkl_cpu_wakeup();
if (!thread_set_sched_jmp())
lkl_idle_tail_schedule();
}
return;
}

if (--cpu.count > 0) {
Expand Down Expand Up @@ -210,20 +218,37 @@ void arch_cpu_idle(void)

lkl_ops->thread_exit();
}

/* enable irqs now to allow direct irqs to run */
local_irq_enable();

if (need_resched())
return;

cpu.idle_pending = true;
lkl_cpu_put();

lkl_ops->sem_down(cpu.idle_sem);

lkl_cpu_get();
cpu.idle_pending = false;
/* to match that of schedule_preempt_disabled() */
preempt_disable();
lkl_ops->jmp_buf_longjmp(&cpu.idle_jb, 1);
}

run_irqs();
void arch_cpu_idle_prepare(void)
{
set_ti_thread_flag(current_thread_info(), TIF_IDLE);
/*
* We hijack the idle loop here so that we can let the idle thread
* jump back to the beginning.
*/
while (1) {
if (!lkl_ops->jmp_buf_set(&cpu.idle_jb))
cpu_idle_loop();
}
}

void lkl_cpu_wakeup(void)
void lkl_cpu_wakeup_idle(void)
{
lkl_ops->sem_up(cpu.idle_sem);
}
Expand All @@ -242,3 +267,47 @@ int lkl_cpu_init(void)

return 0;
}

/*
* Simulate the exit path of idle loop so that we can schedule when LKL is
* in idle.
* It's just a duplication of those in idle.c so a better way is to refactor
* idle.c to expose such function.
*/
void lkl_idle_tail_schedule(void)
{

if (!cpu.idle_pending ||
!test_bit(TIF_IDLE, &current_thread_info()->flags))
lkl_bug("%s: not in idle\n", __func__);

start_critical_timings();
__current_set_polling();

if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();

rcu_idle_exit();
arch_cpu_idle_exit();
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();

/*
* memory barrier copied from idle.c
*/
smp_mb__after_atomic();

/*
* Didn't find a way to include kernel/sched/sched.h for
* sched_ttwu_pending().
* Anyway, it's no op when not CONFIG_SMP.
*/

schedule_preempt_disabled();
}

int lkl_cpu_idle_pending(void)
{
return cpu.idle_pending;
}
12 changes: 0 additions & 12 deletions arch/lkl/kernel/syscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,12 @@ static unsigned int task_key;
long lkl_syscall(long no, long *params)
{
struct task_struct *task = host0;
static int count;
long ret;

ret = lkl_cpu_get();
if (ret < 0)
return ret;

count++;

if (lkl_ops->tls_get) {
task = lkl_ops->tls_get(task_key);
if (!task) {
Expand All @@ -116,16 +113,7 @@ long lkl_syscall(long no, long *params)

ret = run_syscall(no, params);

if (count > 1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!thread_set_sched_jmp())
schedule();
count--;
return ret;
}

out:
count--;
lkl_cpu_put();

return ret;
Expand Down
21 changes: 17 additions & 4 deletions arch/lkl/kernel/threads.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,17 +90,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct thread_info *_prev = task_thread_info(prev);
struct thread_info *_next = task_thread_info(next);
unsigned long _prev_flags = _prev->flags;
bool wakeup_idle = test_bit(TIF_IDLE, &_next->flags) &&
lkl_cpu_idle_pending();

_current_thread_info = task_thread_info(next);
_next->prev_sched = prev;
abs_prev = prev;

BUG_ON(!_next->tid);
lkl_cpu_change_owner(_next->tid);

lkl_ops->sem_up(_next->sched_sem);
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
/* Atomic. Must be done before wakeup next */
clear_ti_thread_flag(_prev, TIF_SCHED_JB);
}
if (wakeup_idle)
schedule_tail(abs_prev);
lkl_cpu_change_owner(_next->tid);

/* No kernel code is allowed after wakeup next */
if (wakeup_idle)
lkl_cpu_wakeup_idle();
else
lkl_ops->sem_up(_next->sched_sem);

if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
lkl_ops->jmp_buf_longjmp(&_prev->sched_jb, 1);
} else if (test_bit(TIF_SCHED_EXIT, &_prev_flags)) {
lkl_ops->thread_exit();
Expand Down Expand Up @@ -132,8 +145,8 @@ void switch_to_host_task(struct task_struct *task)
if (!thread_set_sched_jmp())
schedule();
} else {
lkl_cpu_wakeup();
lkl_cpu_put();
if (!thread_set_sched_jmp())
lkl_idle_tail_schedule();
}

lkl_ops->sem_down(task_thread_info(task)->sched_sem);
Expand Down
3 changes: 2 additions & 1 deletion kernel/sched/idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ static void cpuidle_idle_call(void)
*
* Called with polling cleared.
*/
static void cpu_idle_loop(void)
void cpu_idle_loop(void)
{
int cpu = smp_processor_id();

Expand Down Expand Up @@ -270,6 +270,7 @@ static void cpu_idle_loop(void)
schedule_preempt_disabled();
}
}
EXPORT_SYMBOL(cpu_idle_loop);

void cpu_startup_entry(enum cpuhp_state state)
{
Expand Down