[PATCH 12/13] bpf: Don't refcount LISTEN sockets in sk_assign()
Kleber Souza
kleber.souza at canonical.com
Thu Sep 3 10:58:12 UTC 2020
On 31.08.20 06:03, Khalid Elmously wrote:
> From: Joe Stringer <joe at wand.net.nz>
This patch is missing:
BugLink: https://bugs.launchpad.net/bugs/1887740
>
> [ upstream commit 7ae215d23c12a939005f35d1848ca55b6109b9c0 ]
>
> Avoid taking a reference on listen sockets by checking the socket type
> in the sk_assign and in the corresponding skb_steal_sock() code in the
> the transport layer, and by ensuring that the prefetch free (sock_pfree)
> function uses the same logic to check whether the socket is refcounted.
>
> Suggested-by: Martin KaFai Lau <kafai at fb.com>
> Signed-off-by: Joe Stringer <joe at wand.net.nz>
> Signed-off-by: Alexei Starovoitov <ast at kernel.org>
> Acked-by: Martin KaFai Lau <kafai at fb.com>
> Link: https://lore.kernel.org/bpf/20200329225342.16317-4-joe@wand.net.nz
> Signed-off-by: Daniel Borkmann <daniel at iogearbox.net>
> Signed-off-by: Khalid Elmously <khalid.elmously at canonical.com>
> ---
> include/net/sock.h | 25 +++++++++++++++++--------
> net/core/filter.c | 6 +++---
> net/core/sock.c | 3 ++-
> 3 files changed, 22 insertions(+), 12 deletions(-)
>
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 6cb1f0efa01b..e13e9b7c34d1 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -2492,6 +2492,21 @@ skb_sk_is_prefetched(struct sk_buff *skb)
> #endif /* CONFIG_INET */
> }
>
> +/* This helper checks if a socket is a full socket,
> + * ie _not_ a timewait or request socket.
> + */
> +static inline bool sk_fullsock(const struct sock *sk)
> +{
> + return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
> +}
> +
> +static inline bool
> +sk_is_refcounted(struct sock *sk)
> +{
> + /* Only full sockets have sk->sk_flags. */
> + return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
> +}
> +
> /**
> * skb_steal_sock
> * @skb to steal the socket from
> @@ -2504,6 +2519,8 @@ skb_steal_sock(struct sk_buff *skb, bool *refcounted)
> struct sock *sk = skb->sk;
>
> *refcounted = true;
> + if (skb_sk_is_prefetched(skb))
> + *refcounted = sk_is_refcounted(sk);
> skb->destructor = NULL;
> skb->sk = NULL;
> return sk;
> @@ -2512,14 +2529,6 @@ skb_steal_sock(struct sk_buff *skb, bool *refcounted)
> return NULL;
> }
>
> -/* This helper checks if a socket is a full socket,
> - * ie _not_ a timewait or request socket.
> - */
> -static inline bool sk_fullsock(const struct sock *sk)
> -{
> - return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
> -}
> -
> /* Checks if this SKB belongs to an HW offloaded socket
> * and whether any SW fallbacks are required based on dev.
> * Check decrypted mark in case skb_orphan() cleared socket.
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 4033799dafd5..baaeab75327c 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -5468,8 +5468,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
>
> BPF_CALL_1(bpf_sk_release, struct sock *, sk)
> {
> - /* Only full sockets have sk->sk_flags. */
> - if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
> + if (sk_is_refcounted(sk))
> sock_gen_put(sk);
> return 0;
> }
> @@ -5995,7 +5994,8 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
> return -ENETUNREACH;
> if (unlikely(sk->sk_reuseport))
> return -ESOCKTNOSUPPORT;
> - if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
> + if (sk_is_refcounted(sk) &&
> + unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
> return -ENOENT;
>
> skb_orphan(skb);
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 036ba43d044a..bcb7affcc1e0 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -2071,7 +2071,8 @@ EXPORT_SYMBOL(sock_efree);
> #ifdef CONFIG_INET
> void sock_pfree(struct sk_buff *skb)
> {
> - sock_gen_put(skb->sk);
> + if (sk_is_refcounted(skb->sk))
> + sock_gen_put(skb->sk);
> }
> EXPORT_SYMBOL(sock_pfree);
> #endif /* CONFIG_INET */
>
More information about the kernel-team
mailing list