mirror of
https://github.com/brain-hackers/linux-brain.git
synced 2024-06-09 23:36:23 +09:00
2106b21a8a
commit fedb8da963
upstream.
For years I thought all parisc machines executed loads and stores in
order. However, Jeff Law recently indicated on gcc-patches that this is
not correct. There are various degrees of out-of-order execution all the
way back to the PA7xxx processor series (hit-under-miss). The PA8xxx
series has full out-of-order execution for both integer operations, and
loads and stores.
This is described in the following article:
http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml
For this reason, we need to define mb() and to insert a memory barrier
before the store unlocking spinlocks. This ensures that all memory
accesses are complete prior to unlocking. The ldcw instruction performs
the same function on entry.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.0+
Signed-off-by: Helge Deller <deller@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
33 lines
785 B
C
33 lines
785 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_BARRIER_H
|
|
#define __ASM_BARRIER_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/* The synchronize caches instruction executes as a nop on systems in
|
|
which all memory references are performed in order. */
|
|
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
|
|
|
|
#if defined(CONFIG_SMP)
|
|
#define mb() do { synchronize_caches(); } while (0)
|
|
#define rmb() mb()
|
|
#define wmb() mb()
|
|
#define dma_rmb() mb()
|
|
#define dma_wmb() mb()
|
|
#else
|
|
#define mb() barrier()
|
|
#define rmb() barrier()
|
|
#define wmb() barrier()
|
|
#define dma_rmb() barrier()
|
|
#define dma_wmb() barrier()
|
|
#endif
|
|
|
|
#define __smp_mb() mb()
|
|
#define __smp_rmb() mb()
|
|
#define __smp_wmb() mb()
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif /* __ASM_BARRIER_H */
|