[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1457953578-7054-3-git-send-email-sunil.kovvuri@gmail.com>
Date: Mon, 14 Mar 2016 16:36:15 +0530
From: sunil.kovvuri@...il.com
To: netdev@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
davem@...emloft.net, sgoutham@...ium.com,
robert.richter@...iumnetworks.com
Subject: [PATCH v2 2/2] net: thunderx: Adjust nicvf structure to reduce cache misses
From: Sunil Goutham <sgoutham@...ium.com>
Adjusted nicvf structure such that all elements used in hot
path like napi, xmit e.t.c fall into same cache line. This reduced
no of cache misses and resulted in ~2% increase in no of packets
handled on a core.
Also modified elements with :1 notation to boolean, to be
consistent with other element definitions.
Signed-off-by: Sunil Goutham <sgoutham@...ium.com>
---
drivers/net/ethernet/cavium/thunder/nic.h | 52 ++++++++++++++++------------
1 files changed, 30 insertions(+), 22 deletions(-)
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 872b22d..83025bb 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -272,46 +272,54 @@ struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
+ void __iomem *reg_base;
+ struct queue_set *qs;
+ struct nicvf_cq_poll *napi[8];
u8 vf_id;
- u8 node;
- u8 tns_mode:1;
- u8 sqs_mode:1;
- u8 loopback_supported:1;
+ u8 sqs_id;
+ bool sqs_mode;
bool hw_tso;
- u16 mtu;
- struct queue_set *qs;
+
+ /* Receive buffer alloc */
+ u32 rb_page_offset;
+ u16 rb_pageref;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct page *rb_page;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+
+ /* Secondary Qset */
+ u8 sqs_count;
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
- u8 sqs_id;
- u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF];
+
+ /* Queue count */
u8 rx_queues;
u8 tx_queues;
u8 max_queues;
- void __iomem *reg_base;
+
+ u8 node;
+ u8 cpi_alg;
+ u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
- struct page *rb_page;
- u32 rb_page_offset;
- u16 rb_pageref;
- bool rb_alloc_fail;
- bool rb_work_scheduled;
- struct delayed_work rbdr_work;
- struct tasklet_struct rbdr_task;
- struct tasklet_struct qs_err_task;
- struct tasklet_struct cq_task;
- struct nicvf_cq_poll *napi[8];
+ bool tns_mode;
+ bool loopback_supported;
struct nicvf_rss_info rss_info;
- u8 cpi_alg;
+ struct tasklet_struct qs_err_task;
+ struct work_struct reset_task;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
-
u32 msg_enable;
+
+ /* Stats */
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
- struct work_struct reset_task;
/* MSI-X */
bool msix_enabled;
--
1.7.1
Powered by blists - more mailing lists