[SRU][Focal][PATCH] Revert "bpf: Zero-fill re-used per-cpu map element"

Kleber Souza kleber.souza at canonical.com
Mon Dec 7 13:50:12 UTC 2020


On 07.12.20 13:24, Thadeu Lima de Souza Cascardo wrote:
> On Fri, Dec 04, 2020 at 10:47:57AM -0800, Kamal Mostafa wrote:
>> BugLink: https://bugs.launchpad.net/bugs/1906866
>>
>> This reverts commit d946d4ddd6af531398201e7ce8f73bd1d8a98e2a.
>>
>> Reported upstream:
>>   https://lore.kernel.org/stable/20201204182846.27110-1-kamal@canonical.com/T/#u
>>
>> This v5.4.78 commit breaks the tools/testing/selftests/bpf build:
>>
>> [linux-5.4.y] c602ad2b52dc bpf: Zero-fill re-used per-cpu map element
>> [focal] d946d4ddd6af bpf: Zero-fill re-used per-cpu map element
>>
>> Like this:
>>
>>          prog_tests/map_init.c:5:10: fatal error: test_map_init.skel.h:
>> No such file or directory
>>              5 | #include "test_map_init.skel.h"
>>
>> Because tools/testing/selftests/bpf/Makefile in v5.4 does not have the
>> "skeleton header generation" stuff (circa v5.6).
>>
> 
> Is this just because of the new test? Why would it cause any existing tests to
> fail? It's unfortunate that the commit puts the kernel change and the new test
> together, because the kernel change seems legit and desirable from what I could
> see.

It breaks the build of bpf selftests, which is also needed for the net tests.

> 
> Cascardo.
> 
>> Signed-off-by: Kamal Mostafa <kamal at canonical.com>
>> ---
>>   kernel/bpf/hashtab.c                          |  30 +--
>>   .../selftests/bpf/prog_tests/map_init.c       | 214 ------------------
>>   .../selftests/bpf/progs/test_map_init.c       |  33 ---
>>   3 files changed, 2 insertions(+), 275 deletions(-)
>>   delete mode 100644 tools/testing/selftests/bpf/prog_tests/map_init.c
>>   delete mode 100644 tools/testing/selftests/bpf/progs/test_map_init.c
>>
>> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
>> index 03a67583f6fb..728ffec52cf3 100644
>> --- a/kernel/bpf/hashtab.c
>> +++ b/kernel/bpf/hashtab.c
>> @@ -709,32 +709,6 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
>>   	}
>>   }
>>   
>> -static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
>> -			    void *value, bool onallcpus)
>> -{
>> -	/* When using prealloc and not setting the initial value on all cpus,
>> -	 * zero-fill element values for other cpus (just as what happens when
>> -	 * not using prealloc). Otherwise, bpf program has no way to ensure
>> -	 * known initial values for cpus other than current one
>> -	 * (onallcpus=false always when coming from bpf prog).
>> -	 */
>> -	if (htab_is_prealloc(htab) && !onallcpus) {
>> -		u32 size = round_up(htab->map.value_size, 8);
>> -		int current_cpu = raw_smp_processor_id();
>> -		int cpu;
>> -
>> -		for_each_possible_cpu(cpu) {
>> -			if (cpu == current_cpu)
>> -				bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
>> -						size);
>> -			else
>> -				memset(per_cpu_ptr(pptr, cpu), 0, size);
>> -		}
>> -	} else {
>> -		pcpu_copy_value(htab, pptr, value, onallcpus);
>> -	}
>> -}
>> -
>>   static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
>>   {
>>   	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
>> @@ -805,7 +779,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
>>   			}
>>   		}
>>   
>> -		pcpu_init_value(htab, pptr, value, onallcpus);
>> +		pcpu_copy_value(htab, pptr, value, onallcpus);
>>   
>>   		if (!prealloc)
>>   			htab_elem_set_ptr(l_new, key_size, pptr);
>> @@ -1101,7 +1075,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
>>   		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
>>   				value, onallcpus);
>>   	} else {
>> -		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
>> +		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
>>   				value, onallcpus);
>>   		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
>>   		l_new = NULL;
>> diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
>> deleted file mode 100644
>> index 14a31109dd0e..000000000000
>> --- a/tools/testing/selftests/bpf/prog_tests/map_init.c
>> +++ /dev/null
>> @@ -1,214 +0,0 @@
>> -// SPDX-License-Identifier: GPL-2.0-only
>> -/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
>> -
>> -#include <test_progs.h>
>> -#include "test_map_init.skel.h"
>> -
>> -#define TEST_VALUE 0x1234
>> -#define FILL_VALUE 0xdeadbeef
>> -
>> -static int nr_cpus;
>> -static int duration;
>> -
>> -typedef unsigned long long map_key_t;
>> -typedef unsigned long long map_value_t;
>> -typedef struct {
>> -	map_value_t v; /* padding */
>> -} __bpf_percpu_val_align pcpu_map_value_t;
>> -
>> -
>> -static int map_populate(int map_fd, int num)
>> -{
>> -	pcpu_map_value_t value[nr_cpus];
>> -	int i, err;
>> -	map_key_t key;
>> -
>> -	for (i = 0; i < nr_cpus; i++)
>> -		bpf_percpu(value, i) = FILL_VALUE;
>> -
>> -	for (key = 1; key <= num; key++) {
>> -		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
>> -		if (!ASSERT_OK(err, "bpf_map_update_elem"))
>> -			return -1;
>> -	}
>> -
>> -	return 0;
>> -}
>> -
>> -static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
>> -			    int *map_fd, int populate)
>> -{
>> -	struct test_map_init *skel;
>> -	int err;
>> -
>> -	skel = test_map_init__open();
>> -	if (!ASSERT_OK_PTR(skel, "skel_open"))
>> -		return NULL;
>> -
>> -	err = bpf_map__set_type(skel->maps.hashmap1, map_type);
>> -	if (!ASSERT_OK(err, "bpf_map__set_type"))
>> -		goto error;
>> -
>> -	err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
>> -	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
>> -		goto error;
>> -
>> -	err = test_map_init__load(skel);
>> -	if (!ASSERT_OK(err, "skel_load"))
>> -		goto error;
>> -
>> -	*map_fd = bpf_map__fd(skel->maps.hashmap1);
>> -	if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
>> -		goto error;
>> -
>> -	err = map_populate(*map_fd, populate);
>> -	if (!ASSERT_OK(err, "map_populate"))
>> -		goto error_map;
>> -
>> -	return skel;
>> -
>> -error_map:
>> -	close(*map_fd);
>> -error:
>> -	test_map_init__destroy(skel);
>> -	return NULL;
>> -}
>> -
>> -/* executes bpf program that updates map with key, value */
>> -static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
>> -				map_value_t value)
>> -{
>> -	struct test_map_init__bss *bss;
>> -
>> -	bss = skel->bss;
>> -
>> -	bss->inKey = key;
>> -	bss->inValue = value;
>> -	bss->inPid = getpid();
>> -
>> -	if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
>> -		return -1;
>> -
>> -	/* Let tracepoint trigger */
>> -	syscall(__NR_getpgid);
>> -
>> -	test_map_init__detach(skel);
>> -
>> -	return 0;
>> -}
>> -
>> -static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
>> -{
>> -	int i, nzCnt = 0;
>> -	map_value_t val;
>> -
>> -	for (i = 0; i < nr_cpus; i++) {
>> -		val = bpf_percpu(value, i);
>> -		if (val) {
>> -			if (CHECK(val != expected, "map value",
>> -				  "unexpected for cpu %d: 0x%llx\n", i, val))
>> -				return -1;
>> -			nzCnt++;
>> -		}
>> -	}
>> -
>> -	if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
>> -		  nzCnt))
>> -		return -1;
>> -
>> -	return 0;
>> -}
>> -
>> -/* Add key=1 elem with values set for all CPUs
>> - * Delete elem key=1
>> - * Run bpf prog that inserts new key=1 elem with value=0x1234
>> - *   (bpf prog can only set value for current CPU)
>> - * Lookup Key=1 and check value is as expected for all CPUs:
>> - *   value set by bpf prog for one CPU, 0 for all others
>> - */
>> -static void test_pcpu_map_init(void)
>> -{
>> -	pcpu_map_value_t value[nr_cpus];
>> -	struct test_map_init *skel;
>> -	int map_fd, err;
>> -	map_key_t key;
>> -
>> -	/* max 1 elem in map so insertion is forced to reuse freed entry */
>> -	skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
>> -	if (!ASSERT_OK_PTR(skel, "prog_setup"))
>> -		return;
>> -
>> -	/* delete element so the entry can be re-used*/
>> -	key = 1;
>> -	err = bpf_map_delete_elem(map_fd, &key);
>> -	if (!ASSERT_OK(err, "bpf_map_delete_elem"))
>> -		goto cleanup;
>> -
>> -	/* run bpf prog that inserts new elem, re-using the slot just freed */
>> -	err = prog_run_insert_elem(skel, key, TEST_VALUE);
>> -	if (!ASSERT_OK(err, "prog_run_insert_elem"))
>> -		goto cleanup;
>> -
>> -	/* check that key=1 was re-created by bpf prog */
>> -	err = bpf_map_lookup_elem(map_fd, &key, value);
>> -	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
>> -		goto cleanup;
>> -
>> -	/* and has expected values */
>> -	check_values_one_cpu(value, TEST_VALUE);
>> -
>> -cleanup:
>> -	test_map_init__destroy(skel);
>> -}
>> -
>> -/* Add key=1 and key=2 elems with values set for all CPUs
>> - * Run bpf prog that inserts new key=3 elem
>> - *   (only for current cpu; other cpus should have initial value = 0)
>> - * Lookup Key=1 and check value is as expected for all CPUs
>> - */
>> -static void test_pcpu_lru_map_init(void)
>> -{
>> -	pcpu_map_value_t value[nr_cpus];
>> -	struct test_map_init *skel;
>> -	int map_fd, err;
>> -	map_key_t key;
>> -
>> -	/* Set up LRU map with 2 elements, values filled for all CPUs.
>> -	 * With these 2 elements, the LRU map is full
>> -	 */
>> -	skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
>> -	if (!ASSERT_OK_PTR(skel, "prog_setup"))
>> -		return;
>> -
>> -	/* run bpf prog that inserts new key=3 element, re-using LRU slot */
>> -	key = 3;
>> -	err = prog_run_insert_elem(skel, key, TEST_VALUE);
>> -	if (!ASSERT_OK(err, "prog_run_insert_elem"))
>> -		goto cleanup;
>> -
>> -	/* check that key=3 replaced one of earlier elements */
>> -	err = bpf_map_lookup_elem(map_fd, &key, value);
>> -	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
>> -		goto cleanup;
>> -
>> -	/* and has expected values */
>> -	check_values_one_cpu(value, TEST_VALUE);
>> -
>> -cleanup:
>> -	test_map_init__destroy(skel);
>> -}
>> -
>> -void test_map_init(void)
>> -{
>> -	nr_cpus = bpf_num_possible_cpus();
>> -	if (nr_cpus <= 1) {
>> -		printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
>> -		test__skip();
>> -		return;
>> -	}
>> -
>> -	if (test__start_subtest("pcpu_map_init"))
>> -		test_pcpu_map_init();
>> -	if (test__start_subtest("pcpu_lru_map_init"))
>> -		test_pcpu_lru_map_init();
>> -}
>> diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c
>> deleted file mode 100644
>> index c89d28ead673..000000000000
>> --- a/tools/testing/selftests/bpf/progs/test_map_init.c
>> +++ /dev/null
>> @@ -1,33 +0,0 @@
>> -// SPDX-License-Identifier: GPL-2.0
>> -/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
>> -
>> -#include "vmlinux.h"
>> -#include <bpf/bpf_helpers.h>
>> -
>> -__u64 inKey = 0;
>> -__u64 inValue = 0;
>> -__u32 inPid = 0;
>> -
>> -struct {
>> -	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
>> -	__uint(max_entries, 2);
>> -	__type(key, __u64);
>> -	__type(value, __u64);
>> -} hashmap1 SEC(".maps");
>> -
>> -
>> -SEC("tp/syscalls/sys_enter_getpgid")
>> -int sysenter_getpgid(const void *ctx)
>> -{
>> -	/* Just do it for once, when called from our own test prog. This
>> -	 * ensures the map value is only updated for a single CPU.
>> -	 */
>> -	int cur_pid = bpf_get_current_pid_tgid() >> 32;
>> -
>> -	if (cur_pid == inPid)
>> -		bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
>> -
>> -	return 0;
>> -}
>> -
>> -char _license[] SEC("license") = "GPL";
>> -- 
>> 2.17.1
>>
>>
>> -- 
>> kernel-team mailing list
>> kernel-team at lists.ubuntu.com
>> https://lists.ubuntu.com/mailman/listinfo/kernel-team
> 




More information about the kernel-team mailing list