[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171103205630.1083-4-jakub.kicinski@netronome.com>
Date: Fri, 3 Nov 2017 13:56:18 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: netdev@...r.kernel.org
Cc: oss-drivers@...ronome.com, alexei.starovoitov@...il.com,
daniel@...earbox.net, Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [PATCH net-next v2 03/15] bpf: report offload info to user space
Extend struct bpf_prog_info to contain information about program
being bound to a device. Since the netdev may get destroyed while
program still exists we need a flag to indicate the program is
loaded for a device, even if the device is gone.
Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
Reviewed-by: Simon Horman <simon.horman@...ronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@...ronome.com>
---
include/linux/bpf.h | 1 +
include/uapi/linux/bpf.h | 6 ++++++
kernel/bpf/offload.c | 12 ++++++++++++
kernel/bpf/syscall.c | 5 +++++
4 files changed, 24 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e45d43f9ec92..98bacd0fa5cc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -506,6 +506,7 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
+u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 727a3dba13e6..e92f62cf933a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -894,6 +894,10 @@ enum sk_action {
#define BPF_TAG_SIZE 8
+enum bpf_prog_status {
+ BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
+};
+
struct bpf_prog_info {
__u32 type;
__u32 id;
@@ -907,6 +911,8 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 5553e0e2f8b1..2816feb38be1 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -144,6 +144,18 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
return bpf_prog_offload_translate(prog);
}
+u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
+{
+ struct bpf_dev_offload *offload = prog->aux->offload;
+ u32 ifindex;
+
+ rtnl_lock();
+ ifindex = offload->netdev ? offload->netdev->ifindex : 0;
+ rtnl_unlock();
+
+ return ifindex;
+}
+
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1574b9f0f24e..3217c20ea91b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1592,6 +1592,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
+ if (bpf_prog_is_dev_bound(prog->aux)) {
+ info.status |= BPF_PROG_STATUS_DEV_BOUND;
+ info.ifindex = bpf_prog_offload_ifindex(prog);
+ }
+
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
--
2.14.1
Powered by blists - more mailing lists