[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250319215358.2287371-6-ameryhung@gmail.com>
Date: Wed, 19 Mar 2025 14:53:52 -0700
From: Amery Hung <ameryhung@...il.com>
To: netdev@...r.kernel.org
Cc: bpf@...r.kernel.org,
daniel@...earbox.net,
andrii@...nel.org,
alexei.starovoitov@...il.com,
martin.lau@...nel.org,
kuba@...nel.org,
edumazet@...gle.com,
xiyou.wangcong@...il.com,
jhs@...atatu.com,
sinquersw@...il.com,
toke@...hat.com,
juntong.deng@...look.com,
jiri@...nulli.us,
stfomichev@...il.com,
ekarani.silvestre@....ufcg.edu.br,
yangpeihao@...u.edu.cn,
yepeilin.cs@...il.com,
ameryhung@...il.com,
kernel-team@...a.com
Subject: [PATCH bpf-next v6 05/11] bpf: net_sched: Add a qdisc watchdog timer
From: Amery Hung <amery.hung@...edance.com>
Add a watchdog timer to bpf qdisc. The watchdog can be used to schedule
the execution of qdisc through kfunc, bpf_qdisc_schedule(). It can be
useful for building traffic shaping scheduling algorithm, where the time
the next packet will be dequeued is known.
The implementation relies on struct_ops gen_prologue/epilogue to patch bpf
programs provided by users. Operator specific prologue/epilogue kfuncs
are introduced instead of watchdog kfuncs so that it is easier to extend
prologue/epilogue in the future (writing C vs BPF bytecode).
Signed-off-by: Amery Hung <amery.hung@...edance.com>
Acked-by: Toke Høiland-Jørgensen <toke@...hat.com>
---
net/sched/bpf_qdisc.c | 106 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 105 insertions(+), 1 deletion(-)
diff --git a/net/sched/bpf_qdisc.c b/net/sched/bpf_qdisc.c
index d812a72ca032..5f4ab4877535 100644
--- a/net/sched/bpf_qdisc.c
+++ b/net/sched/bpf_qdisc.c
@@ -13,6 +13,10 @@
static struct bpf_struct_ops bpf_Qdisc_ops;
+struct bpf_sched_data {
+ struct qdisc_watchdog watchdog;
+};
+
struct bpf_sk_buff_ptr {
struct sk_buff *skb;
};
@@ -142,6 +146,56 @@ static int bpf_qdisc_btf_struct_access(struct bpf_verifier_log *log,
return 0;
}
+BTF_ID_LIST(bpf_qdisc_init_prologue_ids)
+BTF_ID(func, bpf_qdisc_init_prologue)
+
+static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, init))
+ return 0;
+
+ /* r6 = r1; // r6 will be "u64 *ctx". r1 is "u64 *ctx".
+ * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
+ * r0 = bpf_qdisc_init_prologue(r1);
+ * r1 = r6; // r1 will be "u64 *ctx".
+ */
+ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_init_prologue_ids[0]);
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+ *insn++ = prog->insnsi[0];
+
+ return insn - insn_buf;
+}
+
+BTF_ID_LIST(bpf_qdisc_reset_destroy_epilogue_ids)
+BTF_ID(func, bpf_qdisc_reset_destroy_epilogue)
+
+static int bpf_qdisc_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
+ s16 ctx_stack_off)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, reset) &&
+ prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, destroy))
+ return 0;
+
+ /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
+ * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
+ * r0 = bpf_qdisc_reset_destroy_epilogue(r1);
+ * BPF_EXIT;
+ */
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_reset_destroy_epilogue_ids[0]);
+ *insn++ = BPF_EXIT_INSN();
+
+ return insn - insn_buf;
+}
+
__bpf_kfunc_start_defs();
/* bpf_skb_get_hash - Get the flow hash of an skb.
@@ -170,6 +224,36 @@ __bpf_kfunc void bpf_qdisc_skb_drop(struct sk_buff *skb,
__qdisc_drop(skb, (struct sk_buff **)to_free_list);
}
+/* bpf_qdisc_watchdog_schedule - Schedule a qdisc to a later time using a timer.
+ * @sch: The qdisc to be scheduled.
+ * @expire: The expiry time of the timer.
+ * @delta_ns: The slack range of the timer.
+ */
+__bpf_kfunc void bpf_qdisc_watchdog_schedule(struct Qdisc *sch, u64 expire, u64 delta_ns)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_schedule_range_ns(&q->watchdog, expire, delta_ns);
+}
+
+/* bpf_qdisc_init_prologue - Hidden kfunc called in prologue of .init. */
+__bpf_kfunc void bpf_qdisc_init_prologue(struct Qdisc *sch)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_init(&q->watchdog, sch);
+}
+
+/* bpf_qdisc_reset_destroy_epilogue - Hidden kfunc called in epilogue of .reset
+ * and .destroy
+ */
+__bpf_kfunc void bpf_qdisc_reset_destroy_epilogue(struct Qdisc *sch)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_cancel(&q->watchdog);
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(qdisc_kfunc_ids)
@@ -177,6 +261,9 @@ BTF_ID_FLAGS(func, bpf_skb_get_hash, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfree_skb, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_qdisc_skb_drop, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_watchdog_schedule, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_init_prologue, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_reset_destroy_epilogue, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(qdisc_kfunc_ids)
BTF_SET_START(qdisc_common_kfunc_set)
@@ -187,16 +274,22 @@ BTF_SET_END(qdisc_common_kfunc_set)
BTF_SET_START(qdisc_enqueue_kfunc_set)
BTF_ID(func, bpf_qdisc_skb_drop)
+BTF_ID(func, bpf_qdisc_watchdog_schedule)
BTF_SET_END(qdisc_enqueue_kfunc_set)
+BTF_SET_START(qdisc_dequeue_kfunc_set)
+BTF_ID(func, bpf_qdisc_watchdog_schedule)
+BTF_SET_END(qdisc_dequeue_kfunc_set)
+
enum qdisc_ops_kf_flags {
QDISC_OPS_KF_COMMON = 0,
QDISC_OPS_KF_ENQUEUE = 1 << 0,
+ QDISC_OPS_KF_DEQUEUE = 1 << 1,
};
static const u32 qdisc_ops_context_flags[] = {
[QDISC_OP_IDX(enqueue)] = QDISC_OPS_KF_ENQUEUE,
- [QDISC_OP_IDX(dequeue)] = QDISC_OPS_KF_COMMON,
+ [QDISC_OP_IDX(dequeue)] = QDISC_OPS_KF_DEQUEUE,
[QDISC_OP_IDX(init)] = QDISC_OPS_KF_COMMON,
[QDISC_OP_IDX(reset)] = QDISC_OPS_KF_COMMON,
[QDISC_OP_IDX(destroy)] = QDISC_OPS_KF_COMMON,
@@ -219,6 +312,10 @@ static int bpf_qdisc_kfunc_filter(const struct bpf_prog *prog, u32 kfunc_id)
btf_id_set_contains(&qdisc_enqueue_kfunc_set, kfunc_id))
return 0;
+ if ((flags & QDISC_OPS_KF_DEQUEUE) &&
+ btf_id_set_contains(&qdisc_dequeue_kfunc_set, kfunc_id))
+ return 0;
+
if (btf_id_set_contains(&qdisc_common_kfunc_set, kfunc_id))
return 0;
@@ -235,6 +332,8 @@ static const struct bpf_verifier_ops bpf_qdisc_verifier_ops = {
.get_func_proto = bpf_base_func_proto,
.is_valid_access = bpf_qdisc_is_valid_access,
.btf_struct_access = bpf_qdisc_btf_struct_access,
+ .gen_prologue = bpf_qdisc_gen_prologue,
+ .gen_epilogue = bpf_qdisc_gen_epilogue,
};
static int bpf_qdisc_init_member(const struct btf_type *t,
@@ -250,6 +349,11 @@ static int bpf_qdisc_init_member(const struct btf_type *t,
moff = __btf_member_bit_offset(t, member) / 8;
switch (moff) {
+ case offsetof(struct Qdisc_ops, priv_size):
+ if (uqdisc_ops->priv_size)
+ return -EINVAL;
+ qdisc_ops->priv_size = sizeof(struct bpf_sched_data);
+ return 1;
case offsetof(struct Qdisc_ops, peek):
qdisc_ops->peek = qdisc_peek_dequeued;
return 0;
--
2.47.1
Powered by blists - more mailing lists