Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
| 3 | |
| 4 | #include <asm/system.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 5 | #include <asm/processor.h> |
| 6 | #include <asm/spinlock_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | { |
| 10 | volatile unsigned int *a = __ldcw_align(x); |
| 11 | return *a == 0; |
| 12 | } |
| 13 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 14 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 15 | #define __raw_spin_unlock_wait(x) \ |
| 16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 18 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | { |
| 20 | volatile unsigned int *a; |
| 21 | |
| 22 | mb(); |
| 23 | a = __ldcw_align(x); |
| 24 | while (__ldcw(a) == 0) |
| 25 | while (*a == 0); |
| 26 | mb(); |
| 27 | } |
| 28 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 29 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
| 31 | volatile unsigned int *a; |
| 32 | mb(); |
| 33 | a = __ldcw_align(x); |
| 34 | *a = 1; |
| 35 | mb(); |
| 36 | } |
| 37 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 38 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | { |
| 40 | volatile unsigned int *a; |
| 41 | int ret; |
| 42 | |
| 43 | mb(); |
| 44 | a = __ldcw_align(x); |
| 45 | ret = __ldcw(a) != 0; |
| 46 | mb(); |
| 47 | |
| 48 | return ret; |
| 49 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * Read-write spinlocks, allowing multiple readers |
| 53 | * but only one writer. |
| 54 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 56 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
| 58 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
| 59 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
| 60 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 61 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | { |
| 63 | unsigned long flags; |
| 64 | local_irq_save(flags); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 65 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | rw->counter++; |
| 68 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 69 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | local_irq_restore(flags); |
| 71 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 73 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | { |
| 75 | unsigned long flags; |
| 76 | local_irq_save(flags); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 77 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | |
| 79 | rw->counter--; |
| 80 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | local_irq_restore(flags); |
| 83 | } |
| 84 | |
| 85 | /* write_lock is less trivial. We optimistically grab the lock and check |
| 86 | * if we surprised any readers. If so we release the lock and wait till |
| 87 | * they're all gone before trying again |
| 88 | * |
| 89 | * Also note that we don't use the _irqsave / _irqrestore suffixes here. |
| 90 | * If we're called with interrupts enabled and we've got readers (or other |
| 91 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
| 92 | * sooner or later anyway. prumpf */ |
| 93 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 94 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | { |
| 96 | retry: |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 97 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
| 99 | if(rw->counter != 0) { |
| 100 | /* this basically never happens */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 101 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 103 | while (rw->counter != 0) |
| 104 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | goto retry; |
| 107 | } |
| 108 | |
| 109 | /* got it. now leave without unlocking */ |
| 110 | rw->counter = -1; /* remember we are locked */ |
| 111 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
| 113 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
| 114 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 115 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
| 117 | rw->counter = 0; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 118 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 121 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 123 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | if (rw->counter != 0) { |
| 125 | /* this basically never happens */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 126 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | /* got it. now leave without unlocking */ |
| 132 | rw->counter = -1; /* remember we are locked */ |
| 133 | return 1; |
| 134 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 136 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | { |
| 138 | return rw->counter > 0; |
| 139 | } |
| 140 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 141 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
| 143 | return rw->counter < 0; |
| 144 | } |
| 145 | |
| 146 | #endif /* __ASM_SPINLOCK_H */ |