[CVE-2015-7613][Vivid] Initialize msg/shm IPC objects before doing ipc_addid()

John Johansen john.johansen at canonical.com
Fri Oct 2 08:48:08 UTC 2015


On 10/02/2015 01:44 AM, Luis Henriques wrote:
> From: Linus Torvalds <torvalds at linux-foundation.org>
> 
> As reported by Dmitry Vyukov, we really shouldn't do ipc_addid() before
> having initialized the IPC object state.  Yes, we initialize the IPC
> object in a locked state, but with all the lockless RCU lookup work,
> that IPC object lock no longer means that the state cannot be seen.
> 
> We already did this for the IPC semaphore code (see commit e8577d1f0329:
> "ipc/sem.c: fully initialize sem_array before making it visible") but we
> clearly forgot about msg and shm.
> 
> Reported-by: Dmitry Vyukov <dvyukov at google.com>
> Cc: Manfred Spraul <manfred at colorfullife.com>
> Cc: Davidlohr Bueso <dbueso at suse.de>
> Cc: stable at vger.kernel.org
> Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
> (cherry picked from commit b9a532277938798b53178d5a66af6e2915cb27cf)
> BugLink: https://bugs.launchpad.net/bugs/1502032
> CVE-2015-7613
> Signed-off-by: Luis Henriques <luis.henriques at canonical.com>
Acked-by: John Johansen <john.johansen at canonical.com>

> ---
>  ipc/msg.c  | 14 +++++++-------
>  ipc/shm.c  | 13 +++++++------
>  ipc/util.c |  8 ++++----
>  3 files changed, 18 insertions(+), 17 deletions(-)
> 
> diff --git a/ipc/msg.c b/ipc/msg.c
> index a7261d5cbc89..1b8f36fa7a5f 100644
> --- a/ipc/msg.c
> +++ b/ipc/msg.c
> @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
>  		return retval;
>  	}
>  
> -	/* ipc_addid() locks msq upon success. */
> -	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
> -	if (id < 0) {
> -		ipc_rcu_putref(msq, msg_rcu_free);
> -		return id;
> -	}
> -
>  	msq->q_stime = msq->q_rtime = 0;
>  	msq->q_ctime = get_seconds();
>  	msq->q_cbytes = msq->q_qnum = 0;
> @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
>  	INIT_LIST_HEAD(&msq->q_receivers);
>  	INIT_LIST_HEAD(&msq->q_senders);
>  
> +	/* ipc_addid() locks msq upon success. */
> +	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
> +	if (id < 0) {
> +		ipc_rcu_putref(msq, msg_rcu_free);
> +		return id;
> +	}
> +
>  	ipc_unlock_object(&msq->q_perm);
>  	rcu_read_unlock();
>  
> diff --git a/ipc/shm.c b/ipc/shm.c
> index 19633b4a2350..dd5f68640771 100644
> --- a/ipc/shm.c
> +++ b/ipc/shm.c
> @@ -550,12 +550,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
>  	if (IS_ERR(file))
>  		goto no_file;
>  
> -	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
> -	if (id < 0) {
> -		error = id;
> -		goto no_id;
> -	}
> -
>  	shp->shm_cprid = task_tgid_vnr(current);
>  	shp->shm_lprid = 0;
>  	shp->shm_atim = shp->shm_dtim = 0;
> @@ -564,6 +558,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
>  	shp->shm_nattch = 0;
>  	shp->shm_file = file;
>  	shp->shm_creator = current;
> +
> +	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
> +	if (id < 0) {
> +		error = id;
> +		goto no_id;
> +	}
> +
>  	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
>  
>  	/*
> diff --git a/ipc/util.c b/ipc/util.c
> index 106bed0378ab..372dd6eca393 100644
> --- a/ipc/util.c
> +++ b/ipc/util.c
> @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
>  	rcu_read_lock();
>  	spin_lock(&new->lock);
>  
> +	current_euid_egid(&euid, &egid);
> +	new->cuid = new->uid = euid;
> +	new->gid = new->cgid = egid;
> +
>  	id = idr_alloc(&ids->ipcs_idr, new,
>  		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
>  		       GFP_NOWAIT);
> @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
>  
>  	ids->in_use++;
>  
> -	current_euid_egid(&euid, &egid);
> -	new->cuid = new->uid = euid;
> -	new->gid = new->cgid = egid;
> -
>  	if (next_id < 0) {
>  		new->seq = ids->seq++;
>  		if (ids->seq > IPCID_SEQ_MAX)
> 





More information about the kernel-team mailing list