blob: bab79a110c7b05ceb8a4320efe4290257e186250 [file] [log] [blame]
David Howellsae3a1972012-03-28 18:30:02 +01001/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_BARRIER_H
5#define _ASM_POWERPC_BARRIER_H
6
7/*
8 * Memory barrier.
9 * The sync instruction guarantees that all memory accesses initiated
10 * by this processor have been performed (with respect to all other
11 * mechanisms that access memory). The eieio instruction is a barrier
12 * providing an ordering (separately) for (a) cacheable stores and (b)
13 * loads and stores to non-cacheable memory (e.g. I/O devices).
14 *
15 * mb() prevents loads and stores being reordered across this point.
16 * rmb() prevents loads being reordered across this point.
17 * wmb() prevents stores being reordered across this point.
18 * read_barrier_depends() prevents data-dependent loads being reordered
19 * across this point (nop on PPC).
20 *
21 * *mb() variants without smp_ prefix must order all types of memory
22 * operations with one another. sync is the only instruction sufficient
23 * to do this.
24 *
25 * For the smp_ barriers, ordering is for cacheable memory operations
26 * only. We have to use the sync instruction for smp_mb(), since lwsync
27 * doesn't order loads with respect to previous stores. Lwsync can be
28 * used for smp_rmb() and smp_wmb().
29 *
30 * However, on CPUs that don't support lwsync, lwsync actually maps to a
31 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
32 */
33#define mb() __asm__ __volatile__ ("sync" : : : "memory")
34#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
36#define read_barrier_depends() do { } while(0)
37
38#define set_mb(var, value) do { var = value; mb(); } while (0)
39
40#ifdef CONFIG_SMP
41
42#ifdef __SUBARCH_HAS_LWSYNC
43# define SMPWMB LWSYNC
44#else
45# define SMPWMB eieio
46#endif
47
Peter Zijlstra47933ad2013-11-06 14:57:36 +010048#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
49
David Howellsae3a1972012-03-28 18:30:02 +010050#define smp_mb() mb()
Peter Zijlstra47933ad2013-11-06 14:57:36 +010051#define smp_rmb() __lwsync()
David Howellsae3a1972012-03-28 18:30:02 +010052#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
53#define smp_read_barrier_depends() read_barrier_depends()
54#else
Peter Zijlstra47933ad2013-11-06 14:57:36 +010055#define __lwsync() barrier()
56
David Howellsae3a1972012-03-28 18:30:02 +010057#define smp_mb() barrier()
58#define smp_rmb() barrier()
59#define smp_wmb() barrier()
60#define smp_read_barrier_depends() do { } while(0)
61#endif /* CONFIG_SMP */
62
63/*
64 * This is a barrier which prevents following instructions from being
65 * started until the value of the argument x is known. For example, if
66 * x is a variable loaded from memory, this prevents following
67 * instructions from being executed until the load has been performed.
68 */
69#define data_barrier(x) \
70 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
71
Peter Zijlstra47933ad2013-11-06 14:57:36 +010072#define smp_store_release(p, v) \
73do { \
74 compiletime_assert_atomic_type(*p); \
75 __lwsync(); \
76 ACCESS_ONCE(*p) = (v); \
77} while (0)
78
79#define smp_load_acquire(p) \
80({ \
81 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
82 compiletime_assert_atomic_type(*p); \
83 __lwsync(); \
84 ___p1; \
85})
86
Peter Zijlstrac6450732014-03-13 19:00:35 +010087#define smp_mb__before_atomic() smp_mb()
88#define smp_mb__after_atomic() smp_mb()
89
David Howellsae3a1972012-03-28 18:30:02 +010090#endif /* _ASM_POWERPC_BARRIER_H */