[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-2a3746984c98b17b565e6a2c2bbaaaef757db1b4@git.kernel.org>
Date: Sun, 16 Nov 2014 02:56:45 -0800
From: tip-bot for Juergen Gross <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, jgross@...e.com, tglx@...utronix.de,
mingo@...nel.org, stefan.bader@...onical.com, hpa@...or.com
Subject: [tip:x86/mm] x86: Use new cache mode type in track_pfn_remap()
and track_pfn_insert()
Commit-ID: 2a3746984c98b17b565e6a2c2bbaaaef757db1b4
Gitweb: http://git.kernel.org/tip/2a3746984c98b17b565e6a2c2bbaaaef757db1b4
Author: Juergen Gross <jgross@...e.com>
AuthorDate: Mon, 3 Nov 2014 14:01:55 +0100
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Sun, 16 Nov 2014 11:04:25 +0100
x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type. As those are the main callers of
lookup_memtype(), change this as well.
Based-on-patch-by: Stefan Bader <stefan.bader@...onical.com>
Signed-off-by: Juergen Gross <jgross@...e.com>
Reviewed-by: Thomas Gleixner <tglx@...utronix.de>
Cc: stefan.bader@...onical.com
Cc: xen-devel@...ts.xensource.com
Cc: konrad.wilk@...cle.com
Cc: ville.syrjala@...ux.intel.com
Cc: david.vrabel@...rix.com
Cc: jbeulich@...e.com
Cc: toshi.kani@...com
Cc: plagnioj@...osoft.com
Cc: tomi.valkeinen@...com
Cc: bhelgaas@...gle.com
Link: http://lkml.kernel.org/r/1415019724-4317-10-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/mm/pat.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 6d5a8e3..2f3744f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -394,12 +394,12 @@ int free_memtype(u64 start, u64 end)
*
* Only to be called when PAT is enabled
*
- * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
- * _PAGE_CACHE_UC
+ * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
+ * or _PAGE_CACHE_MODE_UC
*/
-static unsigned long lookup_memtype(u64 paddr)
+static enum page_cache_mode lookup_memtype(u64 paddr)
{
- int rettype = _PAGE_CACHE_WB;
+ enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
struct memtype *entry;
if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
@@ -408,13 +408,13 @@ static unsigned long lookup_memtype(u64 paddr)
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
struct page *page;
page = pfn_to_page(paddr >> PAGE_SHIFT);
- rettype = get_page_memtype(page);
+ rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
/*
* -1 from get_page_memtype() implies RAM page is in its
* default state and not reserved, and hence of type WB
*/
if (rettype == -1)
- rettype = _PAGE_CACHE_WB;
+ rettype = _PAGE_CACHE_MODE_WB;
return rettype;
}
@@ -423,9 +423,9 @@ static unsigned long lookup_memtype(u64 paddr)
entry = rbt_memtype_lookup(paddr);
if (entry != NULL)
- rettype = entry->type;
+ rettype = pgprot2cachemode(__pgprot(entry->type));
else
- rettype = _PAGE_CACHE_UC_MINUS;
+ rettype = _PAGE_CACHE_MODE_UC_MINUS;
spin_unlock(&memtype_lock);
return rettype;
@@ -613,7 +613,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
if (!pat_enabled)
return 0;
- flags = lookup_memtype(paddr);
+ flags = cachemode2protval(lookup_memtype(paddr));
if (want_flags != flags) {
printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
@@ -715,7 +715,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr, unsigned long size)
{
resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
- unsigned long flags;
+ enum page_cache_mode pcm;
/* reserve the whole chunk starting from paddr */
if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
@@ -734,18 +734,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
* For anything smaller than the vma size we set prot based on the
* lookup.
*/
- flags = lookup_memtype(paddr);
+ pcm = lookup_memtype(paddr);
/* Check memtype for the remaining pages */
while (size > PAGE_SIZE) {
size -= PAGE_SIZE;
paddr += PAGE_SIZE;
- if (flags != lookup_memtype(paddr))
+ if (pcm != lookup_memtype(paddr))
return -EINVAL;
}
*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
- flags);
+ cachemode2protval(pcm));
return 0;
}
@@ -753,15 +753,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn)
{
- unsigned long flags;
+ enum page_cache_mode pcm;
if (!pat_enabled)
return 0;
/* Set prot based on lookup */
- flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
+ pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
- flags);
+ cachemode2protval(pcm));
return 0;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists