Skip to content

Commit

Permalink
arm64: xchg: patch in lse instructions when supported by the CPU
Browse files Browse the repository at this point in the history
On CPUs which support the LSE atomic instructions introduced in ARMv8.1,
it makes sense to use them in preference to ll/sc sequences.

This patch introduces runtime patching of our xchg primitives so that
the LSE swp instruction (yes, you read right!) is used instead.

Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
wildea01 committed Jul 27, 2015
1 parent 084f903 commit c8366ba
Showing 1 changed file with 33 additions and 5 deletions.
38 changes: 33 additions & 5 deletions arch/arm64/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,44 +22,73 @@
#include <linux/mmdebug.h>

#include <asm/barrier.h>
#include <asm/lse.h>

static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, tmp;

switch (size) {
case 1:
asm volatile("// __xchg1\n"
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish",
/* LSE atomics */
" nop\n"
" swpalb %w3, %w0, %2\n"
" nop\n"
" nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
: "memory");
break;
case 2:
asm volatile("// __xchg2\n"
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish",
/* LSE atomics */
" nop\n"
" swpalh %w3, %w0, %2\n"
" nop\n"
" nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
: "memory");
break;
case 4:
asm volatile("// __xchg4\n"
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish",
/* LSE atomics */
" nop\n"
" swpal %w3, %w0, %2\n"
" nop\n"
" nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
: "memory");
break;
case 8:
asm volatile("// __xchg8\n"
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish",
/* LSE atomics */
" nop\n"
" swpal %3, %0, %2\n"
" nop\n"
" nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
: "memory");
Expand All @@ -68,7 +97,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
BUILD_BUG();
}

smp_mb();
return ret;
}

Expand Down

0 comments on commit c8366ba

Please sign in to comment.