Skip to content
This repository has been archived by the owner on Dec 28, 2020. It is now read-only.

Commit

Permalink
sysctl: promote sched_migration_cost_ns out of CONFIG_SCHED_DEBUG
Browse files Browse the repository at this point in the history
Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
  • Loading branch information
arter97 authored and 0ctobot committed Jul 13, 2019
1 parent 8cdff35 commit 11815a5
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 4 deletions.
2 changes: 1 addition & 1 deletion include/linux/sched/sysctl.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;

#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
extern __read_mostly unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_sched_shares_window;
Expand Down
4 changes: 3 additions & 1 deletion kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;

const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
unsigned int __read_mostly sysctl_sched_migration_cost = 500000UL;

/*
* The exponential sliding window over which load is averaged for shares
Expand Down Expand Up @@ -641,6 +641,7 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
return rb_entry(next, struct sched_entity, run_node);
}

#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
Expand All @@ -650,6 +651,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)

return rb_entry(last, struct sched_entity, run_node);
}
#endif

/**************************************************************
* Scheduling class statistics methods:
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1782,7 +1782,7 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);

extern const_debug unsigned int sysctl_sched_time_avg;
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int __read_mostly sysctl_sched_migration_cost;

static inline u64 sched_avg_period(void)
{
Expand Down
2 changes: 1 addition & 1 deletion kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,14 +449,14 @@ static struct ctl_table kern_table[] = {
.extra1 = &min_sched_tunable_scaling,
.extra2 = &max_sched_tunable_scaling,
},
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_migration_cost_ns",
.data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_nr_migrate",
.data = &sysctl_sched_nr_migrate,
Expand Down

0 comments on commit 11815a5

Please sign in to comment.