lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 30 Apr 2015 14:11:49 +0200
From:	Jiri Slaby <jslaby@...e.cz>
To:	stable@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, Marc Zyngier <marc.zyngier@....com>,
	Shannon Zhao <shannon.zhao@...aro.org>,
	Jiri Slaby <jslaby@...e.cz>
Subject: [PATCH 3.12 20/63] ARM: KVM: introduce kvm_p*d_addr_end

From: Marc Zyngier <marc.zyngier@....com>

3.12-stable review patch.  If anyone has any objections, please let me know.

===============

commit a3c8bd31af260a17d626514f636849ee1cd1f63e upstream.

The use of p*d_addr_end with stage-2 translation is slightly dodgy,
as the IPA is 40bits, while all the p*d_addr_end helpers are
taking an unsigned long (arm64 is fine with that as unligned long
is 64bit).

The fix is to introduce 64bit clean versions of the same helpers,
and use them in the stage-2 page table code.

Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Acked-by: Catalin Marinas <catalin.marinas@....com>
Reviewed-by: Christoffer Dall <christoffer.dall@...aro.org>
Signed-off-by: Shannon Zhao <shannon.zhao@...aro.org>
Signed-off-by: Jiri Slaby <jslaby@...e.cz>
---
 arch/arm/include/asm/kvm_mmu.h   | 13 +++++++++++++
 arch/arm/kvm/mmu.c               |  8 ++++----
 arch/arm64/include/asm/kvm_mmu.h |  4 ++++
 3 files changed, 21 insertions(+), 4 deletions(-)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index ba285d7c7c57..5c946dfdcb94 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -103,6 +103,19 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
 	pte_val(*pte) |= L_PTE_S2_RDWR;
 }
 
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end)                                    \
+({	u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
+	(__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
+#define kvm_pud_addr_end(addr,end)             (end)
+
+#define kvm_pmd_addr_end(addr, end)                                    \
+({	u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
+	(__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
 struct kvm;
 
 static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 9e9260138ca0..e747dc10c033 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -134,13 +134,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
 		pgd = pgdp + pgd_index(addr);
 		pud = pud_offset(pgd, addr);
 		if (pud_none(*pud)) {
-			addr = pud_addr_end(addr, end);
+			addr = kvm_pud_addr_end(addr, end);
 			continue;
 		}
 
 		pmd = pmd_offset(pud, addr);
 		if (pmd_none(*pmd)) {
-			addr = pmd_addr_end(addr, end);
+			addr = kvm_pmd_addr_end(addr, end);
 			continue;
 		}
 
@@ -151,10 +151,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
 		/* If we emptied the pte, walk back up the ladder */
 		if (page_empty(pte)) {
 			clear_pmd_entry(kvm, pmd, addr);
-			next = pmd_addr_end(addr, end);
+			next = kvm_pmd_addr_end(addr, end);
 			if (page_empty(pmd) && !page_empty(pud)) {
 				clear_pud_entry(kvm, pud, addr);
-				next = pud_addr_end(addr, end);
+				next = kvm_pud_addr_end(addr, end);
 			}
 		}
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 99229a613cd1..802bd971f1de 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -115,6 +115,10 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
 	pte_val(*pte) |= PTE_S2_RDWR;
 }
 
+#define kvm_pgd_addr_end(addr, end)    pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end)    pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end)    pmd_addr_end(addr, end)
+
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
-- 
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ