parisc: Add static branch and JUMP_LABEL feature

Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Helge Deller 2019-05-03 23:51:00 +02:00
parent bdca5d64ee
commit 62217beb39
5 changed files with 104 additions and 0 deletions

View File

@ -45,6 +45,8 @@ config PARISC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/assembly.h>
#define JUMP_LABEL_NOP_SIZE 4
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif

View File

@ -33,5 +33,6 @@ obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o

View File

@ -0,0 +1,55 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
*
* Based on arch/arm64/kernel/jump_label.c
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <asm/alternative.h>
#include <asm/patch.h>
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
void *target = (void *)jump_entry_target(entry);
int distance = target - addr;
/*
* Encode the PA1.1 "b,n" instruction with a 17-bit
* displacement. In case we hit the BUG(), we could use
* another branch instruction with a 22-bit displacement on
* 64-bit CPUs instead. But this seems sufficient for now.
*/
distance -= 8;
BUG_ON(distance > 262143 || distance < -262144);
insn = 0xe8000002 | reassemble_17(distance >> 2);
} else {
insn = INSN_NOP;
}
patch_text(addr, insn);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the architected NOP in arch_static_branch, so there's no
* need to patch an identical NOP over the top of it here. The core
* will call arch_jump_label_transform from a module notifier if the
* NOP needs to be replaced by a branch.
*/
}

View File

@ -18,6 +18,9 @@
*(.data..vm0.pgd) \
*(.data..vm0.pte)
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */