ACK: [SRU][Xenial][PATCH 1/1] UBUNTU: SAUCE: (no-up) s390: fix rwlock implementation

Kleber Souza kleber.souza at canonical.com
Fri May 11 10:53:36 UTC 2018


On 05/08/18 00:04, Joseph Salisbury wrote:
> From: Heiko Carstens <heiko.carstens at de.ibm.com>
> 
> BugLink: http://bugs.launchpad.net/bugs/1761674
> 
> Description:  kernel: fix rwlock implementation
> Symptom:      Kernel hangs, due to deadlock on an rwlock.
> Problem:      With upstream commit 94232a4332de ("s390/rwlock: improve writer
>               fairness") rwlock writer fairness was supposed to be
>               implemented. If a writer tries to take an rwlock it sets
>               unconditionally the writer bit within the lock word and waits
>               until all readers have released the lock. This however can lead
>               to a deadlock since rwlocks can be taken recursively by readers.
>               If e.g. CPU 0 holds the lock as a reader, and CPU 1 wants to
>               write-lock the lock, then CPU 1 sets the writer bit and
>               afterwards busy waits for CPU 0 to release the lock. If now CPU 0
>               tries to read-lock the lock again (recursively) it will also busy
>               wait until CPU 1 removes the writer bit, which will never happen,
>               since it waits for the first reader on CPU 0 to release the lock.
> Solution:     Revert the rwlock writer fairness semantics again.
> 
> Signed-off-by: Heiko Carstens <heiko.carstens at de.ibm.com>
> Signed-off-by: Joseph Salisbury <joseph.salisbury at canonical.com>

Scope limited to specific arch, tested by vendor.

Acked-by: Kleber Sacilotto de Souza <kleber.souza at canonical.com>

> ---
>  arch/s390/lib/spinlock.c | 48 +++++++++---------------------------------------
>  1 file changed, 9 insertions(+), 39 deletions(-)
> 
> diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> index 427aa44..99da25a 100644
> --- a/arch/s390/lib/spinlock.c
> +++ b/arch/s390/lib/spinlock.c
> @@ -182,42 +182,18 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
>  EXPORT_SYMBOL(_raw_read_trylock_retry);
>  
>  #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
> -
>  void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
> -{
> -	unsigned int owner, old;
> -	int count = spin_retry;
> -
> -	owner = 0;
> -	while (1) {
> -		if (count-- <= 0) {
> -			if (owner && !smp_vcpu_scheduled(~owner))
> -				smp_yield_cpu(~owner);
> -			count = spin_retry;
> -		}
> -		old = ACCESS_ONCE(rw->lock);
> -		owner = ACCESS_ONCE(rw->owner);
> -		smp_mb();
> -		if ((int) old >= 0) {
> -			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
> -			old = prev;
> -		}
> -		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
> -			break;
> -		if (MACHINE_HAS_CAD)
> -			_raw_compare_and_delay(&rw->lock, old);
> -	}
> -}
> -EXPORT_SYMBOL(_raw_write_lock_wait);
> -
> -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
> -
> +#else
>  void _raw_write_lock_wait(arch_rwlock_t *rw)
> +#endif
>  {
> -	unsigned int owner, old, prev;
> +	unsigned int owner, old;
>  	int count = spin_retry;
>  
> -	prev = 0x80000000;
> +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
> +	if ((int) prev > 0)
> +		__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
> +#endif
>  	owner = 0;
>  	while (1) {
>  		if (count-- <= 0) {
> @@ -227,12 +203,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
>  		}
>  		old = ACCESS_ONCE(rw->lock);
>  		owner = ACCESS_ONCE(rw->owner);
> -		if ((int) old >= 0 &&
> -		    _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
> -			prev = old;
> -		else
> -			smp_mb();
> -		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
> +		if (old == 0 &&
> +		    _raw_compare_and_swap(&rw->lock, 0, 0x80000000))
>  			break;
>  		if (MACHINE_HAS_CAD)
>  			_raw_compare_and_delay(&rw->lock, old);
> @@ -240,8 +212,6 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
>  }
>  EXPORT_SYMBOL(_raw_write_lock_wait);
>  
> -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
> -
>  int _raw_write_trylock_retry(arch_rwlock_t *rw)
>  {
>  	unsigned int old;
> 




More information about the kernel-team mailing list