lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230328095807.7014-7-songmuchun@bytedance.com>
Date:   Tue, 28 Mar 2023 17:58:07 +0800
From:   Muchun Song <songmuchun@...edance.com>
To:     glider@...gle.com, elver@...gle.com, dvyukov@...gle.com,
        akpm@...ux-foundation.org, jannh@...gle.com, sjpark@...zon.de,
        muchun.song@...ux.dev
Cc:     kasan-dev@...glegroups.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        Muchun Song <songmuchun@...edance.com>
Subject: [PATCH 6/6] mm: kfence: replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x)

Replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x) to simplify
the code a bit.

Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
 mm/kfence/core.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index f205b860f460..dbfb79a4d624 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -230,17 +230,17 @@ static bool alloc_covered_contains(u32 alloc_stack_hash)
 
 static inline void kfence_protect(unsigned long addr)
 {
-	kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true);
+	kfence_protect_page(PAGE_ALIGN_DOWN(addr), true);
 }
 
 static inline void kfence_unprotect(unsigned long addr)
 {
-	kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false);
+	kfence_protect_page(PAGE_ALIGN_DOWN(addr), false);
 }
 
 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
 {
-	return ALIGN_DOWN(meta->addr, PAGE_SIZE);
+	return PAGE_ALIGN_DOWN(meta->addr);
 }
 
 /*
@@ -308,7 +308,7 @@ static inline bool check_canary_byte(u8 *addr)
 /* __always_inline this to ensure we won't do an indirect call to fn. */
 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
 {
-	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
+	const unsigned long pageaddr = PAGE_ALIGN_DOWN(meta->addr);
 	unsigned long addr;
 
 	/*
@@ -455,7 +455,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 	}
 
 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
-	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
+	kcsan_begin_scoped_access((void *)PAGE_ALIGN_DOWN((unsigned long)addr), PAGE_SIZE,
 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
 				  &assert_page_exclusive);
 
@@ -464,7 +464,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 
 	/* Restore page protection if there was an OOB access. */
 	if (meta->unprotected_page) {
-		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
+		memzero_explicit((void *)PAGE_ALIGN_DOWN(meta->unprotected_page), PAGE_SIZE);
 		kfence_protect(meta->unprotected_page);
 		meta->unprotected_page = 0;
 	}
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ