Skip to content

Commit 6541d98

Browse files
danglin44gregkh
authored andcommitted
parisc: Remove unnecessary barriers from spinlock.h
commit 3b885ac upstream. Now that mb() is an instruction barrier, it will slow performance if we issue unnecessary barriers. The spinlock defines have a number of unnecessary barriers.  The __ldcw() define is both a hardware and compiler barrier.  The mb() barriers in the routines using __ldcw() serve no purpose. The only barrier needed is the one in arch_spin_unlock().  We need to ensure all accesses are complete prior to releasing the lock. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: stable@vger.kernel.org # 4.0+ Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 3f59cf4 commit 6541d98

1 file changed

Lines changed: 2 additions & 6 deletions

File tree

arch/parisc/include/asm/spinlock.h

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
2020
{
2121
volatile unsigned int *a;
2222

23-
mb();
2423
a = __ldcw_align(x);
2524
while (__ldcw(a) == 0)
2625
while (*a == 0)
@@ -30,27 +29,24 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
3029
local_irq_disable();
3130
} else
3231
cpu_relax();
33-
mb();
3432
}
3533

3634
static inline void arch_spin_unlock(arch_spinlock_t *x)
3735
{
3836
volatile unsigned int *a;
39-
mb();
37+
4038
a = __ldcw_align(x);
41-
*a = 1;
4239
mb();
40+
*a = 1;
4341
}
4442

4543
static inline int arch_spin_trylock(arch_spinlock_t *x)
4644
{
4745
volatile unsigned int *a;
4846
int ret;
4947

50-
mb();
5148
a = __ldcw_align(x);
5249
ret = __ldcw(a) != 0;
53-
mb();
5450

5551
return ret;
5652
}

0 commit comments

Comments
 (0)