riscv: make use of the barrier functions from Linux

Replace the barrier functions in arch/riscv/include/asm/io.h with those
defined in barrier.h, which is imported from Linux. This version is
modified to remove the include statement of asm-generic/barrier.h, which
is not available in U-Boot or required.

Signed-off-by: Lukas Auer <lukas.auer@aisec.fraunhofer.de>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
Reviewed-by: Rick Chen <rick@andestech.com>
This commit is contained in:
Lukas Auer 2018-11-22 11:26:18 +01:00 committed by Andes
parent b2c860c6dc
commit fc8c76f42e
2 changed files with 71 additions and 7 deletions

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2013 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* Taken from Linux arch/riscv/include/asm/barrier.h, which is based on
* arch/arm/include/asm/barrier.h
*/
#ifndef _ASM_RISCV_BARRIER_H
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
#define nop() __asm__ __volatile__ ("nop")
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw,iorw)
#define rmb() RISCV_FENCE(ir,ir)
#define wmb() RISCV_FENCE(ow,ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define __smp_mb() RISCV_FENCE(rw,rw)
#define __smp_rmb() RISCV_FENCE(r,r)
#define __smp_wmb() RISCV_FENCE(w,w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw,w); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r,rw); \
___p1; \
})
/*
* This is a very specific barrier: it's currently only used in two places in
* the kernel, both in the scheduler. See include/linux/spinlock.h for the two
* orderings it guarantees, but the "critical section is RCsc" guarantee
* mandates a barrier on RISC-V. The sequence looks like:
*
* lr.aq lock
* sc lock <= LOCKED
* smp_mb__after_spinlock()
* // critical section
* lr lock
* sc.rl lock <= UNLOCKED
*
* The AQ/RL pair provides a RCpc critical section, but there's not really any
* way we can take advantage of that here because the ordering is only enforced
* on that one lock. Thus, we're just doing a full fence.
*/
#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_BARRIER_H */

View File

@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h>
static inline void sync(void)
@ -91,13 +92,9 @@ static inline phys_addr_t virt_to_phys(void *vaddr)
#define __raw_readl(a) __arch_getl(a)
#define __raw_readq(a) __arch_getq(a)
/*
* TODO: The kernel offers some more advanced versions of barriers, it might
* have some advantages to use them instead of the simple one here.
*/
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#define __iormb() dmb()
#define __iowmb() dmb()
#define dmb() mb()
#define __iormb() rmb()
#define __iowmb() wmb()
static inline void writeb(u8 val, volatile void __iomem *addr)
{