[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181004045511.27733-3-jakub.kicinski@netronome.com>
Date: Wed, 3 Oct 2018 21:55:11 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: netdev@...r.kernel.org
Cc: jiri@...nulli.us, gerlitz.or@...il.com, oss-drivers@...ronome.com,
john.hurley@...ronome.com,
Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [RFC 2/2] nfp: register remote block callbacks for vxlan/geneve
From: John Hurley <john.hurley@...ronome.com>
Test stub to illustrate how the NFP could register for and receive
callbacks from remote block setups.
Signed-off-by: John Hurley <john.hurley@...ronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
---
.../net/ethernet/netronome/nfp/flower/main.c | 12 ++
.../net/ethernet/netronome/nfp/flower/main.h | 10 ++
.../ethernet/netronome/nfp/flower/offload.c | 156 ++++++++++++++++++
.../netronome/nfp/flower/tunnel_conf.c | 8 +
4 files changed, 186 insertions(+)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e57d23746585..34b0c3602ab2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -587,8 +587,17 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ app_priv->indir_cb_owner = tc_indr_block_owner_create();
+ if (!app_priv->indir_cb_owner)
+ goto err_cleanup_lag;
+
+ INIT_LIST_HEAD(&app_priv->nfp_indr_block_cb_list);
+
return 0;
+err_cleanup_lag:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
@@ -607,6 +616,9 @@ static void nfp_flower_clean(struct nfp_app *app)
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
+ tc_indr_block_owner_clean(app_priv->indir_cb_owner);
+ nfp_flower_clean_indr_block_cbs(app_priv);
+
nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 81d941ab895c..5f27318ecdbd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -161,6 +161,7 @@ struct nfp_fl_lag {
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
+ * @indir_cb_owner: Master structure for indirect TC block callback
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -191,6 +192,8 @@ struct nfp_flower_priv {
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
+ struct list_head nfp_indr_block_cb_list;
+ struct tcf_indr_block_owner *indir_cb_owner;
};
/**
@@ -293,5 +296,12 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void
+nfp_flower_register_indr_block(struct nfp_flower_priv *app_priv,
+ struct net_device *netdev);
+void
+nfp_flower_unregister_indr_block(struct nfp_flower_priv *app_priv,
+ struct net_device *netdev);
+void nfp_flower_clean_indr_block_cbs(struct nfp_flower_priv *app_priv);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624f10cf..14f1b91b7b90 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -707,3 +707,159 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+struct indr_block_cb_priv {
+ struct net_device *netdev;
+ struct nfp_flower_priv *app_priv;
+ struct list_head list;
+};
+
+static struct indr_block_cb_priv *
+indr_block_cb_priv_lookup(struct nfp_flower_priv *app_priv,
+ struct net_device *netdev)
+{
+ struct indr_block_cb_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &app_priv->nfp_indr_block_cb_list, list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+void nfp_flower_clean_indr_block_cbs(struct nfp_flower_priv *app_priv)
+{
+ struct indr_block_cb_priv *cb_priv, *temp;
+
+ list_for_each_entry_safe(cb_priv, temp,
+ &app_priv->nfp_indr_block_cb_list, list)
+ kfree(cb_priv);
+}
+
+static int
+nfp_flower_indr_offload(struct net_device *netdev,
+ struct tc_cls_flower_offload *flower)
+{
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ if (!eth_proto_is_802_3(flower->common.protocol))
+ return -EOPNOTSUPP;
+
+ switch (flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ netdev_info(netdev, "Flower replace\n");
+ break;
+ case TC_CLSFLOWER_DESTROY:
+ netdev_info(netdev, "Flower destroy\n");
+ break;
+ case TC_CLSFLOWER_STATS:
+ netdev_info(netdev, "Flower stats\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct indr_block_cb_priv *priv = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_indr_offload(priv->netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev,
+ struct nfp_flower_priv *app_priv,
+ struct tc_block_offload *f)
+{
+ struct indr_block_cb_priv *cb_priv;
+ int err;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ cb_priv = indr_block_cb_priv_lookup(app_priv, netdev);
+ if (cb_priv)
+ return -EEXIST;
+
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->netdev = netdev;
+ cb_priv->app_priv = app_priv;
+ list_add(&cb_priv->list, &app_priv->nfp_indr_block_cb_list);
+
+ err = tcf_block_cb_register(f->block,
+ nfp_flower_setup_indr_block_cb,
+ netdev, cb_priv, f->extack);
+ if (err) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_indr_block_cb,
+ netdev);
+ cb_priv = indr_block_cb_priv_lookup(app_priv, netdev);
+ if (cb_priv) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void
+nfp_flower_register_indr_block(struct nfp_flower_priv *app_priv,
+ struct net_device *netdev)
+{
+ int err;
+
+ err = __tc_indr_block_cb_register(netdev, app_priv,
+ nfp_flower_indr_setup_tc_cb,
+ netdev, app_priv->indir_cb_owner);
+
+ if (err)
+ nfp_flower_cmsg_warn(app_priv->app, "Failed to register remote block notifier for %s\n", netdev_name(netdev));
+}
+
+void
+nfp_flower_unregister_indr_block(struct nfp_flower_priv *app_priv,
+ struct net_device *netdev)
+{
+ __tc_indr_block_cb_unregister(netdev, nfp_flower_indr_setup_tc_cb,
+ netdev);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 382bb93cb090..49cf86f1a2da 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -697,6 +697,10 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
/* If non-nfp netdev then free its offload index. */
if (nfp_tun_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
+
+ if (event == NETDEV_UNREGISTER &&
+ nfp_tun_is_netdev_to_offload(netdev))
+ nfp_flower_unregister_indr_block(app_priv, netdev);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
app_priv = container_of(nb, struct nfp_flower_priv,
@@ -708,6 +712,10 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
/* Force a list write to keep NFP up to date. */
nfp_tunnel_write_macs(app);
+
+ if (event == NETDEV_REGISTER &&
+ nfp_tun_is_netdev_to_offload(netdev))
+ nfp_flower_register_indr_block(app_priv, netdev);
}
return NOTIFY_OK;
}
--
2.17.1
Powered by blists - more mailing lists