lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1357764194-12677-6-git-send-email-thierry.reding@avionic-design.de>
Date:	Wed,  9 Jan 2013 21:43:05 +0100
From:	Thierry Reding <thierry.reding@...onic-design.de>
To:	linux-tegra@...r.kernel.org
Cc:	Grant Likely <grant.likely@...retlab.ca>,
	Rob Herring <rob.herring@...xeda.com>,
	Russell King <linux@....linux.org.uk>,
	Stephen Warren <swarren@...dotorg.org>,
	Bjorn Helgaas <bhelgaas@...gle.com>,
	Andrew Murray <andrew.murray@....com>,
	Jason Gunthorpe <jgunthorpe@...idianresearch.com>,
	Arnd Bergmann <arnd@...db.de>,
	Thomas Petazzoni <thomas.petazzoni@...e-electrons.com>,
	devicetree-discuss@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
	linux-arm-kernel@...ts.infradead.org, linux-pci@...r.kernel.org
Subject: [PATCH 05/14] lib: Add I/O map cache implementation

The I/O map cache is used to map large regions of physical memory in
smaller chunks to avoid running out of vmalloc()/ioremap() space.

Signed-off-by: Thierry Reding <thierry.reding@...onic-design.de>
---
 include/linux/io.h |  12 +++
 lib/ioremap.c      | 266 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 278 insertions(+)

diff --git a/include/linux/io.h b/include/linux/io.h
index 069e407..c5d296c 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -76,4 +76,16 @@ void devm_ioremap_release(struct device *dev, void *res);
 #define arch_has_dev_port()     (1)
 #endif
 
+struct iomap_cache;
+struct resource;
+
+struct iomap_cache *iomap_cache_create(const struct resource *region);
+void iomap_cache_free(struct iomap_cache *cache);
+void __iomem *iomap_cache_map(struct iomap_cache *cache, unsigned long offset);
+void iomap_cache_unmap(struct iomap_cache *cache, void __iomem *addr);
+
+struct iomap_cache *devm_iomap_cache_create(struct device *dev,
+					    const struct resource *region);
+void devm_iomap_cache_free(struct device *dev, struct iomap_cache *cache);
+
 #endif /* _LINUX_IO_H */
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 0c9216c..8a13d97 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -5,11 +5,16 @@
  *
  * (C) Copyright 1995 1996 Linus Torvalds
  */
+
+#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/ioport.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 
@@ -92,3 +97,264 @@ int ioremap_page_range(unsigned long addr,
 	return err;
 }
 EXPORT_SYMBOL_GPL(ioremap_page_range);
+
+/**
+ * struct iomap_cache_page - page in an I/O map cache
+ * @region: subregion mapped by the page
+ * @list: chain in cache list
+ * @virt: virtual address of mapped region
+ */
+struct iomap_cache_page {
+	struct resource *region;
+	struct list_head list;
+	void __iomem *virt;
+};
+
+static struct iomap_cache_page *iomap_cache_page_create(void)
+{
+	struct iomap_cache_page *page;
+
+	page = kzalloc(sizeof(*page), GFP_KERNEL);
+	if (!page)
+		return NULL;
+
+	INIT_LIST_HEAD(&page->list);
+
+	return page;
+}
+
+static void iomap_cache_page_unmap(struct iomap_cache_page *page)
+{
+	release_resource(page->region);
+	page->region = NULL;
+
+	iounmap(page->virt);
+	page->virt = NULL;
+}
+
+static void iomap_cache_page_free(struct iomap_cache_page *page)
+{
+	iomap_cache_page_unmap(page);
+	list_del(&page->list);
+	kfree(page);
+}
+
+/**
+ * struct iomap_cache - cache of I/O mapped pages
+ * @region: region mapped by the cache
+ * @pages: list of pages in the cache
+ * @num_pages: number of pages in the cache
+ * @max_pages: maximum number of pages that the cache can map simultaneously
+ */
+struct iomap_cache {
+	struct resource region;
+	struct list_head pages;
+	unsigned int num_pages;
+	unsigned int max_pages;
+};
+
+/**
+ * iomap_cache_create() - create an I/O map cache
+ * @region: memory region to map
+ *
+ * Returns a new I/O map cache that can be used to map the given region on a
+ * page by page basis. On failure, a negative error code is returned.
+ */
+struct iomap_cache *iomap_cache_create(const struct resource *region)
+{
+	struct iomap_cache *cache;
+
+	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+	if (!cache)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(&cache->region, region, sizeof(*region));
+	INIT_LIST_HEAD(&cache->pages);
+	cache->num_pages = 0;
+	cache->max_pages = 1;
+
+	return cache;
+}
+
+/**
+ * iomap_cache_free() - free an I/O map cache
+ * @cache: I/O map cache
+ */
+void iomap_cache_free(struct iomap_cache *cache)
+{
+	struct iomap_cache_page *page, *tmp;
+
+	if (!cache)
+		return;
+
+	list_for_each_entry_safe(page, tmp, &cache->pages, list)
+		iomap_cache_page_free(page);
+
+	kfree(cache);
+}
+
+/**
+ * iomap_cache_map() - map a given offset in the cache's region
+ * @cache: I/O map cache
+ * @offset: offset into the cache's region of the address to map
+ *
+ * Returns the virtual address of mapped offset into the cache's region or
+ * NULL if the offset is outside of the region or if not enough memory is
+ * available to map the page.
+ */
+void __iomem *iomap_cache_map(struct iomap_cache *cache, unsigned long offset)
+{
+	struct iomap_cache_page *page;
+	struct resource *region;
+	unsigned long phys;
+
+	if (!cache || offset >= resource_size(&cache->region))
+		return NULL;
+
+	phys = cache->region.start + (offset & PAGE_MASK);
+
+	list_for_each_entry(page, &cache->pages, list) {
+		resource_size_t start, end;
+
+		if (!page->region || !page->virt)
+			continue;
+
+		start = page->region->start - cache->region.start;
+		end = page->region->end - cache->region.start;
+
+		/* address is within an already mapped page */
+		if (offset >= start && offset <= end) {
+			/* move page to end of the LRU list */
+			list_del_init(&page->list);
+			list_add_tail(&page->list, &cache->pages);
+			goto out;
+		}
+	}
+
+	/* find an unmapped page */
+	list_for_each_entry(page, &cache->pages, list) {
+		if (!page->region || !page->virt) {
+			list_del_init(&page->list);
+			break;
+		}
+	}
+
+	/* no unmapped page found */
+	if (&page->list == &cache->pages) {
+		/* add a new page if more space is available */
+		if (cache->num_pages < cache->max_pages) {
+			page = iomap_cache_page_create();
+			if (!page)
+				return NULL;
+
+			cache->num_pages++;
+		} else {
+			/*
+			 * If all pages are in use and there's no space left
+			 * for a new one, evict the first page in the list.
+			 */
+			page = list_first_entry(&cache->pages,
+						struct iomap_cache_page,
+						list);
+			iomap_cache_page_unmap(page);
+			list_del_init(&page->list);
+		}
+	}
+
+	/* insert page at the end of the LRU list */
+	list_add_tail(&page->list, &cache->pages);
+
+	region = __request_region(&cache->region, phys, PAGE_SIZE, NULL,
+				  cache->region.flags);
+	if (!region)
+		return NULL;
+
+	page->virt = ioremap(region->start, resource_size(region));
+	if (!page->virt) {
+		release_resource(region);
+		return NULL;
+	}
+
+	page->region = region;
+
+out:
+	return page->virt + (offset & ~PAGE_MASK);
+}
+
+/**
+ * iomap_cache_unmap() - remove a mapping from the cache
+ * @cache: I/O map cache
+ * @addr: virtual address of the mapping to remove
+ */
+void iomap_cache_unmap(struct iomap_cache *cache, void __iomem *addr)
+{
+	struct iomap_cache_page *page;
+
+	if (!cache)
+		return;
+
+	list_for_each_entry(page, &cache->pages, list) {
+		if (page->virt == addr) {
+			iomap_cache_page_unmap(page);
+			break;
+		}
+	}
+}
+
+static void devm_iomap_cache_release(struct device *dev, void *res)
+{
+	iomap_cache_free(*(struct iomap_cache **)res);
+}
+
+static int devm_iomap_cache_match(struct device *dev, void *res, void *data)
+{
+	struct iomap_cache **p = res;
+
+	if (WARN_ON(!p || !*p))
+		return 0;
+
+	return *p == data;
+}
+
+/**
+ * devm_iomap_cache_create() - create an I/O map cache
+ * @dev: device to attach this I/O map cache to
+ * @region: memory region to map
+ *
+ * Returns a new I/O map cache that can be used to map the given region on a
+ * page by page basis. On failure, a negative error code is returned.
+ *
+ * This function is a device-managed version of iomap_cache_create() which
+ * will automatically be freed when the device disappears.
+ */
+struct iomap_cache *devm_iomap_cache_create(struct device *dev,
+					    const struct resource *region)
+{
+	struct iomap_cache **ptr, *cache;
+
+	ptr = devres_alloc(devm_iomap_cache_release, sizeof(**ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	cache = iomap_cache_create(region);
+	if (IS_ERR(cache)) {
+		devres_free(ptr);
+		return cache;
+	}
+
+	*ptr = cache;
+	devres_add(dev, ptr);
+
+	return cache;
+}
+
+/**
+ * devm_iomap_cache_free() - free an I/O map cache
+ * @dev: device that this I/O map cached was attached to
+ * @cache: I/O map cache
+ */
+void devm_iomap_cache_free(struct device *dev, struct iomap_cache *cache)
+{
+	WARN_ON(devres_release(dev, devm_iomap_cache_release,
+			       devm_iomap_cache_match, cache));
+}
-- 
1.8.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ