[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200908183909.4156744-5-awogbemila@google.com>
Date: Tue, 8 Sep 2020 11:39:04 -0700
From: David Awogbemila <awogbemila@...gle.com>
To: netdev@...r.kernel.org
Cc: Catherine Sullivan <csully@...gle.com>,
Yangchun Fu <yangchun@...gle.com>,
David Awogbemila <awogbemila@...gle.com>
Subject: [PATCH net-next v3 4/9] gve: Add support for dma_mask register
From: Catherine Sullivan <csully@...gle.com>
Add the dma_mask register and read it to set the dma_masks.
gve_alloc_page will alloc_page with:
GFP_DMA32 if priv->dma_mask is 32.
This helps in 32-bit device address cases where the guest would
run out of SWIOTLB space. Since its buffers would be allocated
GFP_DMA32, there would be less pressure on SWIOTLB.
Reviewed-by: Yangchun Fu <yangchun@...gle.com>
Signed-off-by: Catherine Sullivan <csully@...gle.com>
Signed-off-by: David Awogbemila <awogbemila@...gle.com>
---
drivers/net/ethernet/google/gve/gve.h | 6 ++-
drivers/net/ethernet/google/gve/gve_main.c | 40 ++++++++++++-------
.../net/ethernet/google/gve/gve_register.h | 3 +-
3 files changed, 32 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 55b34b437764..37a3bbced36a 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -232,6 +232,9 @@ struct gve_priv {
struct work_struct service_task;
unsigned long service_task_flags;
unsigned long state_flags;
+
+ /* Gvnic device's dma mask, set during probe. */
+ u8 dma_mask;
};
enum gve_service_task_flags {
@@ -451,8 +454,7 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
}
/* buffers */
-int gve_alloc_page(struct gve_priv *priv, struct device *dev,
- struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma,
enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 0fc68f844edf..c69ec044f47c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -518,7 +518,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir)
{
- *page = alloc_page(GFP_KERNEL);
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ if (priv->dma_mask == 32)
+ gfp_flags |= GFP_DMA32;
+
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1083,6 +1088,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
__be32 __iomem *db_bar;
struct gve_registers __iomem *reg_bar;
struct gve_priv *priv;
+ u8 dma_mask;
int err;
err = pci_enable_device(pdev);
@@ -1095,19 +1101,6 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1122,10 +1115,28 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_reg_bar;
}
+ dma_mask = readb(®_bar->dma_mask);
+ // Default to 64 if the register isn't set
+ if (!dma_mask)
+ dma_mask = 64;
gve_write_version(®_bar->driver_version);
/* Get max queues to alloc etherdev */
max_rx_queues = ioread32be(®_bar->max_tx_queues);
max_tx_queues = ioread32be(®_bar->max_rx_queues);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
+ goto abort_with_reg_bar;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to set consistent dma mask: err=%d\n", err);
+ goto abort_with_reg_bar;
+ }
+
/* Alloc and setup the netdev and priv */
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
@@ -1158,6 +1169,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->db_bar2 = db_bar;
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
+ priv->dma_mask = dma_mask;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fad8813d1bb1 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -16,7 +16,8 @@ struct gve_registers {
__be32 adminq_pfn;
__be32 adminq_doorbell;
__be32 adminq_event_counter;
- u8 reserved[3];
+ u8 reserved[2];
+ u8 dma_mask;
u8 driver_version;
};
--
2.28.0.526.ge36021eeef-goog
Powered by blists - more mailing lists