Skip to content

Commit

Permalink
powerpc/64s: avoid reloading (H)SRR registers if they are still valid
Browse files Browse the repository at this point in the history
When an interrupt is taken, the SRR registers are set to return to where
it left off. Unless they are modified in the meantime, or the return
address or MSR are modified, there is no need to reload these registers
when returning from interrupt.

Introduce per-CPU flags that track the validity of SRR and HSRR
registers. These are cleared when returning from interrupt, when
using the registers for something else (e.g., OPAL calls), when
adjusting the return address or MSR of a context, and when context
switching (which changes the return address and MSR).

This improves the performance of interrupt returns.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Fold in fixup patch from Nick]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-5-npiggin@gmail.com
  • Loading branch information
npiggin authored and mpe committed Jun 24, 2021
1 parent 1df7d5e commit 59dc5bf
Show file tree
Hide file tree
Showing 47 changed files with 418 additions and 179 deletions.
4 changes: 4 additions & 0 deletions arch/powerpc/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"

config PPC_RFI_SRR_DEBUG
bool "Include extra checks for RFI SRR register validity"
depends on PPC_BOOK3S_64

config XMON
bool "Include xmon kernel debugger"
depends on DEBUG_KERNEL
Expand Down
10 changes: 9 additions & 1 deletion arch/powerpc/include/asm/hw_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}

static inline void may_hard_irq_enable(void) { }
static inline bool may_hard_irq_enable(void)
{
return false;
}

static inline void do_hard_irq_enable(void)
{
BUILD_BUG();
}

static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
Expand Down
14 changes: 13 additions & 1 deletion arch/powerpc/include/asm/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,25 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>

#ifdef CONFIG_PPC_BOOK3S_64
static inline void srr_regs_clobbered(void)
{
local_paca->srr_valid = 0;
local_paca->hsrr_valid = 0;
}
#else
static inline void srr_regs_clobbered(void)
{
}
#endif

static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
/* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
regs->nip = (unsigned long)power4_idle_nap_return;
regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
}
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/livepatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);

regs->nip = ip;
regs_set_return_ip(regs, ip);
}

#define klp_get_ftrace_location klp_get_ftrace_location
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/paca.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,10 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
u8 hsrr_valid; /* HSRRs set for HRFID */
u8 srr_valid; /* SRRs set for RFID */
#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/include/asm/probes.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
regs->msr |= MSR_SINGLESTEP;
regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
regs->msr &= ~MSR_CE;
regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
Expand Down
52 changes: 42 additions & 10 deletions arch/powerpc/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,47 @@ struct pt_regs
#endif /* __powerpc64__ */

#ifndef __ASSEMBLY__
#include <asm/paca.h>

#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif

long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);

static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
#ifdef CONFIG_PPC_BOOK3S_64
local_paca->hsrr_valid = 0;
local_paca->srr_valid = 0;
#endif
}

static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
{
regs->msr = msr;
#ifdef CONFIG_PPC_BOOK3S_64
local_paca->hsrr_valid = 0;
local_paca->srr_valid = 0;
#endif
}

static inline void set_return_regs_changed(void)
{
#ifdef CONFIG_PPC_BOOK3S_64
local_paca->hsrr_valid = 0;
local_paca->srr_valid = 0;
#endif
}

static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
{
regs_set_return_ip(regs, regs->nip + offset);
}

static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
Expand All @@ -132,7 +173,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->nip = val;
regs_set_return_ip(regs, val);
}

static inline unsigned long user_stack_pointer(struct pt_regs *regs)
Expand All @@ -145,15 +186,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return 0;
}

#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif

long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);

#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,10 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
OFFSET(PACASRR_VALID, paca_struct, srr_valid);
#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
Expand Down
92 changes: 85 additions & 7 deletions arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,30 @@ exception_marker:
.section ".text"
.align 7

.macro DEBUG_SRR_VALID srr
#ifdef CONFIG_PPC_RFI_SRR_DEBUG
.ifc \srr,srr
mfspr r11,SPRN_SRR0
ld r12,_NIP(r1)
100: tdne r11,r12
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
mfspr r11,SPRN_SRR1
ld r12,_MSR(r1)
100: tdne r11,r12
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
.else
mfspr r11,SPRN_HSRR0
ld r12,_NIP(r1)
100: tdne r11,r12
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
mfspr r11,SPRN_HSRR1
ld r12,_MSR(r1)
100: tdne r11,r12
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
.endif
#endif
.endm

#ifdef CONFIG_PPC_BOOK3S
.macro system_call_vectored name trapnr
.globl system_call_vectored_\name
Expand Down Expand Up @@ -286,6 +310,11 @@ END_BTB_FLUSH_SECTION
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */

#ifdef CONFIG_PPC_BOOK3S
li r11,1
stb r11,PACASRR_VALID(r13)
#endif

/*
* We always enter kernel from userspace with irq soft-mask enabled and
* nothing pending. system_call_exception() will call
Expand All @@ -306,18 +335,27 @@ END_BTB_FLUSH_SECTION
bl syscall_exit_prepare

ld r2,_CCR(r1)
ld r6,_LINK(r1)
mtlr r6

#ifdef CONFIG_PPC_BOOK3S
lbz r4,PACASRR_VALID(r13)
cmpdi r4,0
bne 1f
li r4,0
stb r4,PACASRR_VALID(r13)
#endif
ld r4,_NIP(r1)
ld r5,_MSR(r1)
ld r6,_LINK(r1)
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r5
1:
DEBUG_SRR_VALID srr

BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r5
mtlr r6

cmpdi r3,0
bne .Lsyscall_restore_regs
/* Zero volatile regs that may contain sensitive kernel data */
Expand Down Expand Up @@ -673,19 +711,40 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
kuap_user_restore r3, r4
#endif
.Lfast_user_interrupt_return_\srr\():
ld r11,_NIP(r1)
ld r12,_MSR(r1)

BEGIN_FTR_SECTION
ld r10,_PPR(r1)
mtspr SPRN_PPR,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

#ifdef CONFIG_PPC_BOOK3S
.ifc \srr,srr
lbz r4,PACASRR_VALID(r13)
.else
lbz r4,PACAHSRR_VALID(r13)
.endif
cmpdi r4,0
li r4,0
bne 1f
#endif
ld r11,_NIP(r1)
ld r12,_MSR(r1)
.ifc \srr,srr
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
stb r4,PACASRR_VALID(r13)
#endif
.else
mtspr SPRN_HSRR0,r11
mtspr SPRN_HSRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
stb r4,PACAHSRR_VALID(r13)
#endif
.endif
DEBUG_SRR_VALID \srr

BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
Expand Down Expand Up @@ -730,15 +789,34 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

.Lfast_kernel_interrupt_return_\srr\():
cmpdi cr1,r3,0
#ifdef CONFIG_PPC_BOOK3S
.ifc \srr,srr
lbz r4,PACASRR_VALID(r13)
.else
lbz r4,PACAHSRR_VALID(r13)
.endif
cmpdi r4,0
li r4,0
bne 1f
#endif
ld r11,_NIP(r1)
ld r12,_MSR(r1)
.ifc \srr,srr
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
stb r4,PACASRR_VALID(r13)
#endif
.else
mtspr SPRN_HSRR0,r11
mtspr SPRN_HSRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
stb r4,PACAHSRR_VALID(r13)
#endif
.endif
DEBUG_SRR_VALID \srr

BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
Expand Down
27 changes: 27 additions & 0 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */

/* Mark our [H]SRRs valid for return */
li r10,1
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
stb r10,PACAHSRR_VALID(r13)
FTR_SECTION_ELSE
stb r10,PACASRR_VALID(r13)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.elseif IHSRR
stb r10,PACAHSRR_VALID(r13)
.else
stb r10,PACASRR_VALID(r13)
.endif

.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
Expand Down Expand Up @@ -584,10 +598,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
Expand Down Expand Up @@ -1718,6 +1735,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
*
* Be careful to avoid touching the kernel stack.
*/
li r10,0
stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
Expand Down Expand Up @@ -2513,6 +2532,8 @@ BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
li r10,0
stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
Expand Down Expand Up @@ -2673,6 +2694,12 @@ masked_interrupt:
ori r11,r11,PACA_IRQ_HARD_DIS
stb r11,PACAIRQHAPPENED(r13)
2: /* done */
li r10,0
.if \hsrr
stb r10,PACAHSRR_VALID(r13)
.else
stb r10,PACASRR_VALID(r13)
.endif
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
Expand Down
Loading

0 comments on commit 59dc5bf

Please sign in to comment.