diff --git a/arch/powerpc/include/asm/elf_util.h b/arch/powerpc/include/asm/elf_util.h index 37372559fe6295..b98a1611e2ac53 100644 --- a/arch/powerpc/include/asm/elf_util.h +++ b/arch/powerpc/include/asm/elf_util.h @@ -39,11 +39,12 @@ typedef unsigned long func_desc_t; typedef struct ppc64_opd_entry func_desc_t; #endif /* PPC64_ELF_ABI_v2 */ -/* Like PPC32, we need little trampolines to do > 24-bit jumps (into - the kernel itself). But on PPC64, these need to be used for every - jump, actually, to reset r2 (TOC+0x8000). */ -struct ppc64_stub_entry -{ +/* + * Like PPC32, we need little trampolines to do > 24-bit jumps (into + * the kernel itself). But on PPC64, these need to be used for every + * jump, actually, to reset r2 (TOC+0x8000). + */ +struct ppc64_stub_entry { /* 28 byte jump instruction sequence (7 instructions). We only * need 6 instructions on ABIv2 but we always allocate 7 so * so we don't have to modify the trampoline load instruction. */ diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index f2073115d518b9..c1aa2673c1dd1c 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -14,12 +14,14 @@ #include #include -/* Both low and high 16 bits are added as SIGNED additions, so if low - 16 bits has high bit set, high 16 bits must be adjusted. These - macros do that (stolen from binutils). */ +/* + * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits + * has high bit set, high 16 bits must be adjusted. These macros do that + * (stolen from binutils). + */ #define PPC_LO(v) ((v) & 0xffff) #define PPC_HI(v) (((v) >> 16) & 0xffff) -#define PPC_HA(v) PPC_HI ((v) + 0x8000) +#define PPC_HA(v) PPC_HI((v) + 0x8000) #ifndef __powerpc64__ /* diff --git a/arch/powerpc/kernel/elf_util_64.c b/arch/powerpc/kernel/elf_util_64.c index decad2c34f3822..b64a06554e7460 100644 --- a/arch/powerpc/kernel/elf_util_64.c +++ b/arch/powerpc/kernel/elf_util_64.c @@ -33,9 +33,11 @@ struct module; static unsigned int local_entry_offset(const Elf64_Sym *sym) { - /* sym->st_other indicates offset to local entry point + /* + * sym->st_other indicates offset to local entry point * (otherwise it will assume r12 is the address of the start - * of function and try to derive r2 from it). */ + * of function and try to derive r2 from it). + */ return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); } #else @@ -176,19 +178,21 @@ int elf64_apply_relocate_add(const struct elf_info *elf_info, /* FIXME: Handle weak symbols here --RR */ if (sym->st_shndx == SHN_UNDEF) { /* External: go via stub */ - value = stub_for_addr(elf_info, value, obj_name); + value = stub_for_addr(elf_info, value, + obj_name); if (!value) return -ENOENT; if (!restore_r2((u32 *)location + 1, obj_name)) return -ENOEXEC; - squash_toc_save_inst(strtab + sym->st_name, value); + squash_toc_save_inst(strtab + sym->st_name, + value); } else value += local_entry_offset(sym); /* Convert value to relative */ value -= (unsigned long)location; - if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ + if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0) { pr_err("%s: REL24 %li out of range!\n", obj_name, (long int)value); return -ENOEXEC; diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index b929560ecd5b9f..11aaacf7fdaa03 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -377,10 +377,11 @@ static inline int create_stub(const struct elf_info *elf_info, unsigned long stub_for_addr(const struct elf_info *elf_info, unsigned long addr, const char *obj_name) { - struct elf_shdr *stubs_sec = &elf_info->sechdrs[elf_info->stubs_section]; + struct elf_shdr *stubs_sec; struct ppc64_stub_entry *stubs; unsigned int i, num_stubs; + stubs_sec = &elf_info->sechdrs[elf_info->stubs_section]; num_stubs = stubs_sec->sh_size / sizeof(*stubs); /* Find this stub, or if that fails, the next avail. entry */