lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190415145603.32491-7-simon.horman@netronome.com>
Date:   Mon, 15 Apr 2019 16:55:58 +0200
From:   Simon Horman <simon.horman@...ronome.com>
To:     David Miller <davem@...emloft.net>,
        Jakub Kicinski <jakub.kicinski@...ronome.com>
Cc:     netdev@...r.kernel.org, oss-drivers@...ronome.com,
        John Hurley <john.hurley@...ronome.com>,
        Simon Horman <simon.horman@...ronome.com>
Subject: [PATCH net-next 06/11] nfp: flower: get flows by host context

From: John Hurley <john.hurley@...ronome.com>

Each flow is given a context ID that the fw uses (along with its cookie)
to identity the flow. The flows stats are updated by the fw via this ID
which is a reference to a pre-allocated array entry.

In preparation for flow merge code, enable the nfp_fl_payload structure to
be accessed via this stats context ID. Rather than increasing the memory
requirements of the pre-allocated array, add a new rhashtable to associate
each active stats context ID with its rule payload.

While adding new code to the compile metadata functions, slightly
restructure the existing function to allow for cleaner, easier to read
error handling.

Signed-off-by: John Hurley <john.hurley@...ronome.com>
Signed-off-by: Simon Horman <simon.horman@...ronome.com>
---
 drivers/net/ethernet/netronome/nfp/flower/main.h   |   4 +
 .../net/ethernet/netronome/nfp/flower/metadata.c   | 101 +++++++++++++++++----
 2 files changed, 89 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 485bdc0e1c20..9b34264197c2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -140,6 +140,7 @@ struct nfp_fl_internal_ports {
  * @flow_table:		Hash table used to store flower rules
  * @stats:		Stored stats updates for flower rules
  * @stats_lock:		Lock for flower rule stats updates
+ * @stats_ctx_table:	Hash table to map stats contexts to its flow rule
  * @cmsg_work:		Workqueue for control messages processing
  * @cmsg_skbs_high:	List of higher priority skbs for control message
  *			processing
@@ -170,6 +171,7 @@ struct nfp_flower_priv {
 	struct rhashtable flow_table;
 	struct nfp_fl_stats *stats;
 	spinlock_t stats_lock; /* lock stats */
+	struct rhashtable stats_ctx_table;
 	struct work_struct cmsg_work;
 	struct sk_buff_head cmsg_skbs_high;
 	struct sk_buff_head cmsg_skbs_low;
@@ -304,6 +306,8 @@ struct nfp_fl_payload *
 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
 			   struct net_device *netdev);
 struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
+struct nfp_fl_payload *
 nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
 
 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 492837b852b6..d68307e5bf16 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
 	unsigned long cookie;
 };
 
+struct nfp_fl_stats_ctx_to_flow {
+	struct rhash_head ht_node;
+	u32 stats_cxt;
+	struct nfp_fl_payload *flow;
+};
+
+static const struct rhashtable_params stats_ctx_table_params = {
+	.key_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
+	.head_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
+	.key_len	= sizeof(u32),
+};
+
 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
 {
 	struct nfp_flower_priv *priv = app->priv;
@@ -285,25 +297,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 			      struct nfp_fl_payload *nfp_flow,
 			      struct net_device *netdev)
 {
+	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *check_entry;
 	u8 new_mask_id;
 	u32 stats_cxt;
+	int err;
 
-	if (nfp_get_stats_entry(app, &stats_cxt))
-		return -ENOENT;
+	err = nfp_get_stats_entry(app, &stats_cxt);
+	if (err)
+		return err;
 
 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
 	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
 	nfp_flow->ingress_dev = netdev;
 
+	ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
+	if (!ctx_entry) {
+		err = -ENOMEM;
+		goto err_release_stats;
+	}
+
+	ctx_entry->stats_cxt = stats_cxt;
+	ctx_entry->flow = nfp_flow;
+
+	if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
+				   stats_ctx_table_params)) {
+		err = -ENOMEM;
+		goto err_free_ctx_entry;
+	}
+
 	new_mask_id = 0;
 	if (!nfp_check_mask_add(app, nfp_flow->mask_data,
 				nfp_flow->meta.mask_len,
 				&nfp_flow->meta.flags, &new_mask_id)) {
-		if (nfp_release_stats_entry(app, stats_cxt))
-			return -EINVAL;
-		return -ENOENT;
+		err = -ENOENT;
+		goto err_remove_rhash;
 	}
 
 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
@@ -317,23 +346,31 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 
 	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
 	if (check_entry) {
-		if (nfp_release_stats_entry(app, stats_cxt))
-			return -EINVAL;
-
-		if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
-					   nfp_flow->meta.mask_len,
-					   NULL, &new_mask_id))
-			return -EINVAL;
-
-		return -EEXIST;
+		err = -EEXIST;
+		goto err_remove_mask;
 	}
 
 	return 0;
+
+err_remove_mask:
+	nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+			      NULL, &new_mask_id);
+err_remove_rhash:
+	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+					    &ctx_entry->ht_node,
+					    stats_ctx_table_params));
+err_free_ctx_entry:
+	kfree(ctx_entry);
+err_release_stats:
+	nfp_release_stats_entry(app, stats_cxt);
+
+	return err;
 }
 
 int nfp_modify_flow_metadata(struct nfp_app *app,
 			     struct nfp_fl_payload *nfp_flow)
 {
+	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
 	struct nfp_flower_priv *priv = app->priv;
 	u8 new_mask_id = 0;
 	u32 temp_ctx_id;
@@ -348,12 +385,36 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
 	/* Update flow payload with mask ids. */
 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
 
-	/* Release the stats ctx id. */
+	/* Release the stats ctx id and ctx to flow table entry. */
 	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 
+	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
+					   stats_ctx_table_params);
+	if (!ctx_entry)
+		return -ENOENT;
+
+	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+					    &ctx_entry->ht_node,
+					    stats_ctx_table_params));
+	kfree(ctx_entry);
+
 	return nfp_release_stats_entry(app, temp_ctx_id);
 }
 
+struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
+{
+	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
+	struct nfp_flower_priv *priv = app->priv;
+
+	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
+					   stats_ctx_table_params);
+	if (!ctx_entry)
+		return NULL;
+
+	return ctx_entry->flow;
+}
+
 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
 			    const void *obj)
 {
@@ -403,6 +464,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
 	if (err)
 		return err;
 
+	err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
+	if (err)
+		goto err_free_flow_table;
+
 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
 	/* Init ring buffer and unallocated mask_ids. */
@@ -410,7 +475,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
 	if (!priv->mask_ids.mask_id_free_list.buf)
-		goto err_free_flow_table;
+		goto err_free_stats_ctx_table;
 
 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -447,6 +512,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
 	kfree(priv->mask_ids.last_used);
 err_free_mask_id:
 	kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_stats_ctx_table:
+	rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
 	rhashtable_destroy(&priv->flow_table);
 	return -ENOMEM;
@@ -461,6 +528,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
 
 	rhashtable_free_and_destroy(&priv->flow_table,
 				    nfp_check_rhashtable_empty, NULL);
+	rhashtable_free_and_destroy(&priv->stats_ctx_table,
+				    nfp_check_rhashtable_empty, NULL);
 	kvfree(priv->stats);
 	kfree(priv->mask_ids.mask_id_free_list.buf);
 	kfree(priv->mask_ids.last_used);
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ