[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159675713636.3192.8550558633124756003.tip-bot2@tip-bot2>
Date: Thu, 06 Aug 2020 23:38:56 -0000
From: "tip-bot2 for Arvind Sankar" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Arvind Sankar <nivedita@...m.mit.edu>,
Ingo Molnar <mingo@...nel.org>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: x86/kaslr] x86/kaslr: Simplify process_gb_huge_pages()
The following commit has been merged into the x86/kaslr branch of tip:
Commit-ID: be9e8d9541a95bdfac1c13d112cc032ea7fc745f
Gitweb: https://git.kernel.org/tip/be9e8d9541a95bdfac1c13d112cc032ea7fc745f
Author: Arvind Sankar <nivedita@...m.mit.edu>
AuthorDate: Tue, 28 Jul 2020 18:57:13 -04:00
Committer: Ingo Molnar <mingo@...nel.org>
CommitterDate: Fri, 31 Jul 2020 11:08:17 +02:00
x86/kaslr: Simplify process_gb_huge_pages()
Replace the loop to determine the number of 1Gb pages with arithmetic.
Signed-off-by: Arvind Sankar <nivedita@...m.mit.edu>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Link: https://lore.kernel.org/r/20200728225722.67457-13-nivedita@alum.mit.edu
---
arch/x86/boot/compressed/kaslr.c | 47 +++++++++++++------------------
1 file changed, 21 insertions(+), 26 deletions(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 3727e97..00ef84b 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -547,49 +547,44 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
static void
process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
{
- unsigned long addr, size = 0;
+ unsigned long pud_start, pud_end, gb_huge_pages;
struct mem_vector tmp;
- int i = 0;
if (!IS_ENABLED(CONFIG_X86_64) || !max_gb_huge_pages) {
store_slot_info(region, image_size);
return;
}
- addr = ALIGN(region->start, PUD_SIZE);
- /* Did we raise the address above the passed in memory entry? */
- if (addr < region->start + region->size)
- size = region->size - (addr - region->start);
-
- /* Check how many 1GB huge pages can be filtered out: */
- while (size >= PUD_SIZE && max_gb_huge_pages) {
- size -= PUD_SIZE;
- max_gb_huge_pages--;
- i++;
- }
+ /* Are there any 1GB pages in the region? */
+ pud_start = ALIGN(region->start, PUD_SIZE);
+ pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
/* No good 1GB huge pages found: */
- if (!i) {
+ if (pud_start >= pud_end) {
store_slot_info(region, image_size);
return;
}
- /*
- * Skip those 'i'*1GB good huge pages, and continue checking and
- * processing the remaining head or tail part of the passed region
- * if available.
- */
-
- if (addr >= region->start + image_size) {
+ /* Check if the head part of the region is usable. */
+ if (pud_start >= region->start + image_size) {
tmp.start = region->start;
- tmp.size = addr - region->start;
+ tmp.size = pud_start - region->start;
store_slot_info(&tmp, image_size);
}
- size = region->size - (addr - region->start) - i * PUD_SIZE;
- if (size >= image_size) {
- tmp.start = addr + i * PUD_SIZE;
- tmp.size = size;
+ /* Skip the good 1GB pages. */
+ gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT;
+ if (gb_huge_pages > max_gb_huge_pages) {
+ pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT);
+ max_gb_huge_pages = 0;
+ } else {
+ max_gb_huge_pages -= gb_huge_pages;
+ }
+
+ /* Check if the tail part of the region is usable. */
+ if (region->start + region->size >= pud_end + image_size) {
+ tmp.start = pud_end;
+ tmp.size = region->start + region->size - pud_end;
store_slot_info(&tmp, image_size);
}
}
Powered by blists - more mailing lists