>From 3533b9e66c5144844a0b0864d0f57f43d57aea1a Mon Sep 17 00:00:00 2001 From: Thomas Schlichter Date: Thu, 8 Oct 2009 21:24:07 +0200 Subject: [PATCH 2/2] Use MTRR for write combining mmap/ioremap if PAT is not available X.org uses libpciaccess which tries to mmap with write combining enabled via /sys/bus/pci/devices/*/resource0_wc. Currently, when PAT is not enabled, we fall back to uncached mmap. Then libpciaccess thinks it succeeded mapping with write combining anabled and does not set up suited MTRR entries. ;-( So when falling back to uncached mapping, we better try to set up MTRR entries automatically. To match this modified PCI mmap behavior, also ioremap_wc and set_memory_wc are adjusted. Signed-off-by: Thomas Schlichter --- arch/x86/mm/ioremap.c | 15 ++++++++++----- arch/x86/mm/pageattr.c | 10 ++++++++-- arch/x86/pci/i386.c | 6 ++++++ 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 334e63c..abe40fa 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "physaddr.h" @@ -268,11 +269,15 @@ EXPORT_SYMBOL(ioremap_nocache); */ void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { - if (pat_enabled) - return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, - __builtin_return_address(0)); - else - return ioremap_nocache(phys_addr, size); + if (!pat_enabled) { + void __iomem *ret = ioremap_nocache(phys_addr, size); + if (ret) + mtrr_add_unaligned(phys_addr, size, + MTRR_TYPE_WRCOMB, false); + return ret; + } + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, + __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_wc); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index dd38bfb..c25f697 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -23,6 +23,7 @@ #include #include #include +#include /* * The current flushing context - we pass it instead of 5 arguments: @@ -1010,8 +1011,13 @@ int set_memory_wc(unsigned long addr, int numpages) { int ret; - if (!pat_enabled) - return set_memory_uc(addr, numpages); + if (!pat_enabled) { + ret = set_memory_uc(addr, numpages); + if (!ret) + mtrr_add_unaligned(__pa(addr), numpages * PAGE_SIZE, + MTRR_TYPE_WRCOMB, false); + return ret; + } ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, _PAGE_CACHE_WC, NULL); diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index b22d13b..8379e9b 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -301,5 +302,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, vma->vm_ops = &pci_mmap_ops; + if (!pat_enabled && write_combine) + mtrr_add_unaligned(vma->vm_pgoff << PAGE_SHIFT, + vma->vm_end - vma->vm_start, + MTRR_TYPE_WRCOMB, false); + return 0; } -- 1.6.5