[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191231100331.6316-4-kishon@ti.com>
Date: Tue, 31 Dec 2019 15:33:29 +0530
From: Kishon Vijay Abraham I <kishon@...com>
To: Kishon Vijay Abraham I <kishon@...com>,
Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Andrew Murray <andrew.murray@....com>
CC: <linux-pci@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: [PATCH 3/5] PCI: endpoint: Protect concurrent access to memory allocation with mutex
pci-epc-mem uses bitmap to manage the PCI address region. This address
region will be shared by multiple endpoint functions (in the case of
multi function endpoint) and it has to be protected from concurrent
access to avoid updating inconsistent state. Use mutex to protect while
updating bitmap.
Signed-off-by: Kishon Vijay Abraham I <kishon@...com>
---
drivers/pci/endpoint/pci-epc-mem.c | 10 ++++++++--
include/linux/pci-epc.h | 3 +++
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index d2b174ce15de..abfac1109a13 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 97c78464013c..42c2b14ea2b4 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -72,6 +72,7 @@ struct pci_epc_ops {
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
* @page_size: size of each page
+ * @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
phys_addr_t phys_base;
@@ -79,6 +80,8 @@ struct pci_epc_mem {
unsigned long *bitmap;
size_t page_size;
int pages;
+ /* mutex to protect against concurrent access for memory allocation*/
+ struct mutex lock;
};
/**
--
2.17.1
Powered by blists - more mailing lists