ACK: [PATCH] [Natty SRU] UBUNTU: SAUCE: fix yama_ptracer_del lockdep warning(v1)

Stefan Bader stefan.bader at canonical.com
Tue Jul 5 08:57:36 UTC 2011


On 29.06.2011 19:19, Ming Lei wrote:
> From a3bea763772d6f0f672b76f26dd6152d6d40812f Mon Sep 17 00:00:00 2001
> From: Ming Lei <ming.lei at canonical.com>
> Date: Wed, 29 Jun 2011 19:06:25 +0800
> Subject: [PATCH] [Natty SRU] UBUNTU: SAUCE: fix yama_ptracer_del lockdep warning(v1)
> 
> yama_ptracer_del can be called in softirq context, so
> ptracer_relations_lock may be held in softirq context.
> 
> This patch replaces spin_[un]lock with spin_[un]lock_bh for
> &ptracer_relations_lock to fix reported lockdep warning and
> avoid possible dealock.
> 
> SRU Justification:
> 
> Impact:
> 	- lockdep warning is triggered if lockdep config options are
> 	  enabled
> 	- probably deadlock can be produced in yama_ptracer_del path
> 
> Fix:
> 	- After applying the patch, lockdep warning is fixefd
> 
> BugLink: http://bugs.launchpad.net/bugs/791019
> 
> Signed-off-by: Ming Lei <ming.lei at canonical.com>
> ---
>  security/yama/yama_lsm.c |   12 ++++++------
>  1 files changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
> index 7c3591a..b200e33 100644
> --- a/security/yama/yama_lsm.c
> +++ b/security/yama/yama_lsm.c
> @@ -46,7 +46,7 @@ static int yama_ptracer_add(struct task_struct *tracer,
>  	struct ptrace_relation *entry, *relation = NULL;
>  
>  	added = kmalloc(sizeof(*added), GFP_KERNEL);
> -	spin_lock(&ptracer_relations_lock);
> +	spin_lock_bh(&ptracer_relations_lock);
>  	list_for_each_entry(entry, &ptracer_relations, node)
>  		if (entry->tracee == tracee) {
>  			relation = entry;
> @@ -64,7 +64,7 @@ static int yama_ptracer_add(struct task_struct *tracer,
>  	relation->tracer = tracer;
>  
>  unlock_out:
> -	spin_unlock(&ptracer_relations_lock);
> +	spin_unlock_bh(&ptracer_relations_lock);
>  	if (added && added != relation)
>  		kfree(added);
>  
> @@ -82,7 +82,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
>  	struct ptrace_relation *relation;
>  	struct list_head *list, *safe;
>  
> -	spin_lock(&ptracer_relations_lock);
> +	spin_lock_bh(&ptracer_relations_lock);
>  	list_for_each_safe(list, safe, &ptracer_relations) {
>  		relation = list_entry(list, struct ptrace_relation, node);
>  		if (relation->tracee == tracee ||
> @@ -91,7 +91,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
>  			kfree(relation);
>  		}
>  	}
> -	spin_unlock(&ptracer_relations_lock);
> +	spin_unlock_bh(&ptracer_relations_lock);
>  }
>  
>  /**
> @@ -205,7 +205,7 @@ static int ptracer_exception_found(struct task_struct *tracer,
>  	struct ptrace_relation *relation;
>  	struct task_struct *parent = NULL;
>  
> -	spin_lock(&ptracer_relations_lock);
> +	spin_lock_bh(&ptracer_relations_lock);
>  
>  	rcu_read_lock();
>  	read_lock(&tasklist_lock);
> @@ -221,7 +221,7 @@ static int ptracer_exception_found(struct task_struct *tracer,
>  
>  	if (task_is_descendant(parent, tracer))
>  		rc = 1;
> -	spin_unlock(&ptracer_relations_lock);
> +	spin_unlock_bh(&ptracer_relations_lock);
>  
>  	return rc;
>  }

Acked-by: Stefan Bader <stefan.bader at canonical.com>




More information about the kernel-team mailing list