[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201125030119.2864302-29-guro@fb.com>
Date: Tue, 24 Nov 2020 19:01:13 -0800
From: Roman Gushchin <guro@...com>
To: <bpf@...r.kernel.org>
CC: <ast@...nel.org>, <daniel@...earbox.net>, <netdev@...r.kernel.org>,
<andrii@...nel.org>, <akpm@...ux-foundation.org>,
<linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<kernel-team@...com>
Subject: [PATCH bpf-next v8 28/34] bpf: eliminate rlimit-based memory accounting for sockmap and sockhash maps
Do not use rlimit-based memory accounting for sockmap and sockhash maps.
It has been replaced with the memcg-based memory accounting.
Signed-off-by: Roman Gushchin <guro@...com>
Acked-by: Song Liu <songliubraving@...com>
---
net/core/sock_map.c | 33 ++++++---------------------------
1 file changed, 6 insertions(+), 27 deletions(-)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 3ff635af737a..310e57f65a00 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -27,8 +27,6 @@ struct bpf_stab {
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- u64 cost;
- int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -46,22 +44,15 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&stab->map, attr);
raw_spin_lock_init(&stab->lock);
- /* Make sure page count doesn't overflow. */
- cost = (u64) stab->map.max_entries * sizeof(struct sock *);
- err = bpf_map_charge_init(&stab->map.memory, cost);
- if (err)
- goto free_stab;
-
stab->sks = bpf_map_area_alloc(stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
- if (stab->sks)
- return &stab->map;
- err = -ENOMEM;
- bpf_map_charge_finish(&stab->map.memory);
-free_stab:
- kfree(stab);
- return ERR_PTR(err);
+ if (!stab->sks) {
+ kfree(stab);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &stab->map;
}
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
@@ -1104,7 +1095,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
{
struct bpf_shtab *htab;
int i, err;
- u64 cost;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -1132,21 +1122,10 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
goto free_htab;
}
- cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
- if (cost >= U32_MAX - PAGE_SIZE) {
- err = -EINVAL;
- goto free_htab;
- }
- err = bpf_map_charge_init(&htab->map.memory, cost);
- if (err)
- goto free_htab;
-
htab->buckets = bpf_map_area_alloc(htab->buckets_num *
sizeof(struct bpf_shtab_bucket),
htab->map.numa_node);
if (!htab->buckets) {
- bpf_map_charge_finish(&htab->map.memory);
err = -ENOMEM;
goto free_htab;
}
--
2.26.2
Powered by blists - more mailing lists