[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <17f37206-942a-4db4-99c2-a43412a08d33@linux.dev>
Date: Thu, 9 Jan 2025 15:36:59 -0800
From: Martin KaFai Lau <martin.lau@...ux.dev>
To: Amery Hung <ameryhung@...il.com>
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org, daniel@...earbox.net,
andrii@...nel.org, alexei.starovoitov@...il.com, martin.lau@...nel.org,
sinquersw@...il.com, toke@...hat.com, jhs@...atatu.com, jiri@...nulli.us,
stfomichev@...il.com, ekarani.silvestre@....ufcg.edu.br,
yangpeihao@...u.edu.cn, xiyou.wangcong@...il.com, yepeilin.cs@...il.com,
amery.hung@...edance.com
Subject: Re: [PATCH bpf-next v2 14/14] selftests: Add a bpf fq qdisc to
selftest
On 12/20/24 11:55 AM, Amery Hung wrote:
> +static int fq_new_flow(void *flow_map, struct fq_stashed_flow **sflow, u64 hash)
> +{
> + struct fq_stashed_flow tmp = {};
> + struct fq_flow_node *flow;
> + int ret;
> +
> + flow = bpf_obj_new(typeof(*flow));
> + if (!flow)
> + return -ENOMEM;
> +
> + flow->credit = q.initial_quantum,
> + flow->qlen = 0,
> + flow->age = 1,
> + flow->time_next_packet = 0,
> +
> + ret = bpf_map_update_elem(flow_map, &hash, &tmp, 0);
> + if (ret == -ENOMEM) {
-E2BIG needs to be checked also to handle the flow_map is full case.
> + fq_gc();
> + bpf_map_update_elem(&fq_nonprio_flows, &hash, &tmp, 0);
> + }
> +
> + *sflow = bpf_map_lookup_elem(flow_map, &hash);
> + if (!*sflow) {
> + bpf_obj_drop(flow);
> + return -ENOMEM;
> + }
> +
> + bpf_kptr_xchg_back(&(*sflow)->flow, flow);
> + return 0;
> +}
[ ... ]
> +static int
> +fq_remove_flows(struct bpf_map *flow_map, u64 *hash,
> + struct fq_stashed_flow *sflow, struct remove_flows_ctx *ctx)
> +{
> + struct fq_flow_node *flow = NULL;
> +
> + flow = bpf_kptr_xchg(&sflow->flow, flow);
> + if (flow) {
> + if (!ctx->gc_only || fq_gc_candidate(flow)) {
> + bpf_obj_drop(flow);
afaik, the hash value (i.e. sflow here) is still in the flow_map.
Instead of xchg and then drop, I think this should be
bpf_map_delete_elem(flow_map, hash) which deletes the sflow value from the
flow_map and also takes care of the bpf_obj_drop() also.
> + ctx->reset_cnt++;
> + } else {
> + bpf_kptr_xchg_back(&sflow->flow, flow);
> + }
> + }
> +
> + return ctx->reset_cnt < ctx->reset_max ? 0 : 1;
> +}
> +
> +static void fq_gc(void)
> +{
> + struct remove_flows_ctx cb_ctx = {
> + .gc_only = true,
> + .reset_cnt = 0,
> + .reset_max = FQ_GC_MAX,
> + };
> +
> + bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &cb_ctx, 0);
> +}
> +
> +SEC("struct_ops/bpf_fq_reset")
> +void BPF_PROG(bpf_fq_reset, struct Qdisc *sch)
> +{
> + struct unset_throttled_flows_ctx utf_ctx = {
> + .unset_all = true,
> + };
> + struct remove_flows_ctx rf_ctx = {
> + .gc_only = false,
> + .reset_cnt = 0,
> + .reset_max = NUM_QUEUE,
> + };
> + struct fq_stashed_flow *sflow;
> + u64 hash = 0;
> +
> + sch->q.qlen = 0;
> + sch->qstats.backlog = 0;
> +
> + bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &rf_ctx, 0);
> +
> + rf_ctx.reset_cnt = 0;
> + bpf_for_each_map_elem(&fq_prio_flows, fq_remove_flows, &rf_ctx, 0);
> + fq_new_flow(&fq_prio_flows, &sflow, hash);
> +
> + bpf_loop(NUM_QUEUE, fq_remove_flows_in_list, NULL, 0);
> + q.new_flow_cnt = 0;
> + q.old_flow_cnt = 0;
> +
> + bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &utf_ctx, 0);
> +
> + return;
> +}
Powered by blists - more mailing lists