Skip to content

Commit

Permalink
deepin: KABI: KABI reservation for sched structures
Browse files Browse the repository at this point in the history
We reserve some fields beforehand for sched structures prone to change,
therefore, we can hot add/change features of sched with this enhancement.
After reserving, normally cache does not matter as the reserved fields
are not accessed at all.

Link: https://gitee.com/openeuler/kernel/issues/I8ZJI8
Link: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/commit/f11bec33927d12cf8a0f95b1ad10653d69897444

Signed-off-by: Phil Auld's avatarPhil Auld <pauld@redhat.com>
Signed-off-by: Guan Jing <guanjing6@huawei.com>
Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
  • Loading branch information
opsiff committed Jan 8, 2025
1 parent d0a4257 commit 884f72d
Show file tree
Hide file tree
Showing 3 changed files with 85 additions and 0 deletions.
46 changes: 46 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include <linux/rv.h>
#include <linux/livepatch_sched.h>
#include <asm/kmap_size.h>
#include <linux/deepin_kabi.h>

/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
Expand Down Expand Up @@ -391,6 +392,8 @@ struct sched_info {
/* When were we last queued to run? */
unsigned long long last_queued;

DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
#endif /* CONFIG_SCHED_INFO */
};

Expand Down Expand Up @@ -504,6 +507,8 @@ struct sched_avg {
unsigned long runnable_avg;
unsigned long util_avg;
struct util_est util_est;
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
} ____cacheline_aligned;

struct sched_statistics {
Expand Down Expand Up @@ -545,6 +550,14 @@ struct sched_statistics {
#ifdef CONFIG_SCHED_CORE
u64 core_forceidle_sum;
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
DEEPIN_KABI_RESERVE(5)
DEEPIN_KABI_RESERVE(6)
DEEPIN_KABI_RESERVE(7)
DEEPIN_KABI_RESERVE(8)
#endif /* CONFIG_SCHEDSTATS */
} ____cacheline_aligned;

Expand Down Expand Up @@ -587,6 +600,10 @@ struct sched_entity {
*/
struct sched_avg avg;
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
};

struct sched_rt_entity {
Expand All @@ -605,6 +622,8 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
} __randomize_layout;

struct sched_dl_entity {
Expand Down Expand Up @@ -743,6 +762,12 @@ struct kmap_ctrl {
#endif
};

struct task_struct_deepin {
};

struct task_struct_extend {
};

struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
Expand Down Expand Up @@ -1541,12 +1566,33 @@ struct task_struct {
struct user_event_mm *user_event_mm;
#endif

/* reserve extra field for randomized used */
struct task_struct_extend *task_struct_extend;

/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
*/
randomized_struct_fields_end

DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
DEEPIN_KABI_RESERVE(5)
DEEPIN_KABI_RESERVE(6)
DEEPIN_KABI_RESERVE(7)
DEEPIN_KABI_RESERVE(8)
DEEPIN_KABI_RESERVE(9)
DEEPIN_KABI_RESERVE(10)
DEEPIN_KABI_RESERVE(11)
DEEPIN_KABI_RESERVE(12)
DEEPIN_KABI_RESERVE(13)
DEEPIN_KABI_RESERVE(14)
DEEPIN_KABI_RESERVE(15)
DEEPIN_KABI_RESERVE(16)
DEEPIN_KABI_AUX_PTR(task_struct)

/* CPU-specific state of this task: */
struct thread_struct thread;

Expand Down
5 changes: 5 additions & 0 deletions include/linux/sched/signal.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
#include <linux/deepin_kabi.h>

/*
* Types defining task->signal and task->sighand and APIs using them:
Expand Down Expand Up @@ -245,6 +246,10 @@ struct signal_struct {
* and may have inconsistent
* permissions.
*/
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
} __randomize_layout;

/*
Expand Down
34 changes: 34 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,8 @@ struct rt_bandwidth {
u64 rt_runtime;
struct hrtimer rt_period_timer;
unsigned int rt_period_active;
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
};

static inline int dl_bandwidth_enabled(void)
Expand Down Expand Up @@ -356,6 +358,14 @@ struct cfs_bandwidth {
int nr_burst;
u64 throttled_time;
u64 burst_time;
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
DEEPIN_KABI_RESERVE(5)
DEEPIN_KABI_RESERVE(6)
DEEPIN_KABI_RESERVE(7)
DEEPIN_KABI_RESERVE(8)
#endif
};

Expand Down Expand Up @@ -649,6 +659,14 @@ struct cfs_rq {
#endif
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
DEEPIN_KABI_RESERVE(5)
DEEPIN_KABI_RESERVE(6)
DEEPIN_KABI_RESERVE(7)
DEEPIN_KABI_RESERVE(8)
};

static inline int rt_bandwidth_enabled(void)
Expand Down Expand Up @@ -695,6 +713,8 @@ struct rt_rq {
struct rq *rq;
struct task_group *tg;
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
};

static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
Expand Down Expand Up @@ -889,6 +909,8 @@ struct root_domain {
* CPUs of the rd. Protected by RCU.
*/
struct perf_domain __rcu *pd;
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
};

extern void init_defrootdomain(void);
Expand Down Expand Up @@ -1167,6 +1189,14 @@ struct rq {
call_single_data_t cfsb_csd;
struct list_head cfsb_csd_list;
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
DEEPIN_KABI_RESERVE(3)
DEEPIN_KABI_RESERVE(4)
DEEPIN_KABI_RESERVE(5)
DEEPIN_KABI_RESERVE(6)
DEEPIN_KABI_RESERVE(7)
DEEPIN_KABI_RESERVE(8)
};

#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down Expand Up @@ -1908,6 +1938,8 @@ struct sched_group {
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* CPU of highest priority in group */
int flags;
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)

/*
* The CPUs this group covers.
Expand Down Expand Up @@ -2299,6 +2331,8 @@ struct sched_class {
#ifdef CONFIG_SCHED_CORE
int (*task_is_throttled)(struct task_struct *p, int cpu);
#endif
DEEPIN_KABI_RESERVE(1)
DEEPIN_KABI_RESERVE(2)
};

static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
Expand Down

0 comments on commit 884f72d

Please sign in to comment.