[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080429234309.603251ff@daedalus.pq.iki.fi>
Date: Tue, 29 Apr 2008 23:43:09 +0300
From: Pekka Paalanen <pq@....fi>
To: Ingo Molnar <mingo@...e.hu>
Cc: Pekka Paalanen <pq@....fi>, linux-kernel@...r.kernel.org,
Christoph Hellwig <hch@...radead.org>,
Arjan van de Ven <arjan@...radead.org>,
Pavel Roskin <proski@....org>,
Steven Rostedt <rostedt@...dmis.org>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH 1/3] x86 mmiotrace: fix page-unaligned ioremaps
>From c306ab1bc1f40e74f7015a498784e1a39ec6229b Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@....fi>
Date: Tue, 29 Apr 2008 22:39:40 +0300
Subject: [PATCH] x86 mmiotrace: fix page-unaligned ioremaps
mmiotrace_ioremap() expects to receive the original unaligned map phys address
and size. Also fix {un,}register_kmmio_probe() to deal properly with
unaligned size.
Signed-off-by: Pekka Paalanen <pq@....fi>
---
arch/x86/mm/ioremap.c | 4 +++-
arch/x86/mm/kmmio.c | 13 +++++++++++--
arch/x86/mm/mmio-mod.c | 1 +
3 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index ce73980..923621d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -123,6 +123,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
{
unsigned long pfn, offset, vaddr;
resource_size_t last_addr;
+ const resource_size_t unaligned_phys_addr = phys_addr;
+ const unsigned long unaligned_size = size;
struct vm_struct *area;
unsigned long new_prot_val;
pgprot_t prot;
@@ -235,7 +237,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
}
ret_addr = (void __iomem *) (vaddr + offset);
- mmiotrace_ioremap(phys_addr, size, ret_addr);
+ mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
return ret_addr;
}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 40567d8..c12be07 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -352,11 +352,19 @@ static void release_kmmio_fault_page(unsigned long page,
}
}
+/*
+ * With page-unaligned ioremaps, one or two armed pages may contain
+ * addresses from outside the intended mapping. Events for these addresses
+ * are currently silently dropped. The events may result only from programming
+ * mistakes by accessing addresses before the beginning or past the end of a
+ * mapping.
+ */
int register_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
int ret = 0;
unsigned long size = 0;
+ const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
@@ -365,7 +373,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
}
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
- while (size < p->len) {
+ while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size))
pr_err("kmmio: Unable to set page fault.\n");
size += PAGE_SIZE;
@@ -437,11 +445,12 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
unsigned long size = 0;
+ const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;
spin_lock_irqsave(&kmmio_lock, flags);
- while (size < p->len) {
+ while (size < size_lim) {
release_kmmio_fault_page(p->addr + size, &release_list);
size += PAGE_SIZE;
}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index a8d2a00..278998c 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -280,6 +280,7 @@ static void ioremap_trace_core(unsigned long offset, unsigned long size,
{
static atomic_t next_id;
struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+ /* These are page-unaligned. */
struct mmiotrace_map map = {
.phys = offset,
.virt = (unsigned long)addr,
--
1.5.3.7
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists