[PATCH 01/11] net: add limit for socket backlog CVE-2010-4251

Paolo Pisati paolo.pisati at canonical.com
Mon Jul 11 08:17:38 UTC 2011


From: Zhu Yi <yi.zhu at intel.com>

BugLink: http://bugs.launchpad.net/bugs/807462

commit upstream 8eae939f1400326b06d0c9afe53d2a484a326871

We got system OOM while running some UDP netperf testing on the loopback
device. The case is multiple senders sent stream UDP packets to a single
receiver via loopback on local host. Of course, the receiver is not able
to handle all the packets in time. But we surprisingly found that these
packets were not discarded due to the receiver's sk->sk_rcvbuf limit.
Instead, they are kept queuing to sk->sk_backlog and finally ate up all
the memory. We believe this is a secure hole that a none privileged user
can crash the system.

The root cause for this problem is, when the receiver is doing
__release_sock() (i.e. after userspace recv, kernel udp_recvmsg ->
skb_free_datagram_locked -> release_sock), it moves skbs from backlog to
sk_receive_queue with the softirq enabled. In the above case, multiple
busy senders will almost make it an endless loop. The skbs in the
backlog end up eat all the system memory.

The issue is not only for UDP. Any protocols using socket backlog is
potentially affected. The patch adds limit for socket backlog so that
the backlog size cannot be expanded endlessly.

CVE-2010-4251

Reported-by: Alex Shi <alex.shi at intel.com>
Cc: David Miller <davem at davemloft.net>
Cc: Arnaldo Carvalho de Melo <acme at ghostprotocols.net>
Cc: Alexey Kuznetsov <kuznet at ms2.inr.ac.ru
Cc: "Pekka Savola (ipv6)" <pekkas at netcore.fi>
Cc: Patrick McHardy <kaber at trash.net>
Cc: Vlad Yasevich <vladislav.yasevich at hp.com>
Cc: Sridhar Samudrala <sri at us.ibm.com>
Cc: Jon Maloy <jon.maloy at ericsson.com>
Cc: Allan Stephens <allan.stephens at windriver.com>
Cc: Andrew Hendry <andrew.hendry at gmail.com>
Signed-off-by: Zhu Yi <yi.zhu at intel.com>
Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
Acked-by: Arnaldo Carvalho de Melo <acme at redhat.com>
Signed-off-by: David S. Miller <davem at davemloft.net>
Signed-off-by: Paolo Pisati <paolo.pisati at canonical.com>
---
 include/net/sock.h |   15 ++++++++++++++-
 net/core/sock.c    |   16 ++++++++++++++--
 2 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 9f96394..5a4844a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -242,6 +242,8 @@ struct sock {
 	struct {
 		struct sk_buff *head;
 		struct sk_buff *tail;
+		int len;
+		int limit;
 	} sk_backlog;
 	wait_queue_head_t	*sk_sleep;
 	struct dst_entry	*sk_dst_cache;
@@ -561,7 +563,7 @@ static inline int sk_stream_memory_free(struct sock *sk)
 	return sk->sk_wmem_queued < sk->sk_sndbuf;
 }
 
-/* The per-socket spinlock must be held here. */
+/* OOB backlog add */
 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
 	if (!sk->sk_backlog.tail) {
@@ -573,6 +575,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 	skb->next = NULL;
 }
 
+/* The per-socket spinlock must be held here. */
+static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+{
+	if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
+		return -ENOBUFS;
+
+	sk_add_backlog(sk, skb);
+	sk->sk_backlog.len += skb->truesize;
+	return 0;
+}
+
 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	return sk->sk_backlog_rcv(sk, skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6605e75..5797dab 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -336,8 +336,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 		rc = sk_backlog_rcv(sk, skb);
 
 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-	} else
-		sk_add_backlog(sk, skb);
+	} else if (sk_add_backlog_limited(sk, skb)) {
+		bh_unlock_sock(sk);
+		atomic_inc(&sk->sk_drops);
+		goto discard_and_relse;
+	}
+
 	bh_unlock_sock(sk);
 out:
 	sock_put(sk);
@@ -1114,6 +1118,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
 		sock_lock_init(newsk);
 		bh_lock_sock(newsk);
 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
+		newsk->sk_backlog.len = 0;
 
 		atomic_set(&newsk->sk_rmem_alloc, 0);
 		/*
@@ -1517,6 +1522,12 @@ static void __release_sock(struct sock *sk)
 
 		bh_lock_sock(sk);
 	} while ((skb = sk->sk_backlog.head) != NULL);
+
+	/*
+	 * Doing the zeroing here guarantee we can not loop forever
+	 * while a wild producer attempts to flood us.
+	 */
+	sk->sk_backlog.len = 0;
 }
 
 /**
@@ -1849,6 +1860,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 	sk->sk_allocation	=	GFP_KERNEL;
 	sk->sk_rcvbuf		=	sysctl_rmem_default;
 	sk->sk_sndbuf		=	sysctl_wmem_default;
+	sk->sk_backlog.limit	=	sk->sk_rcvbuf << 1;
 	sk->sk_state		=	TCP_CLOSE;
 	sk_set_socket(sk, sock);
 
-- 
1.7.5.3





More information about the kernel-team mailing list