[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120195407.1163051-8-hpa@zytor.com>
Date: Tue, 20 Jan 2026 11:53:59 -0800
From: "H. Peter Anvin" <hpa@...or.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Uros Bizjak <ubizjak@...il.com>, Petr Mladek <pmladek@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>, Kees Cook <kees@...nel.org>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Nathan Chancellor <nathan@...nel.org>,
Kiryl Shutsemau <kas@...nel.org>,
Rick Edgecombe <rick.p.edgecombe@...el.com>
Cc: "H. Peter Anvin" <hpa@...or.com>, linux-kernel@...r.kernel.org,
linux-coco@...ts.linux.dev, x86@...nel.org
Subject: [PATCH v1 07/14] x86/boot: factor out the 16-bit startup code from header.S
Move the 16-bit startup code to its own assembly file, instead of
mixing it into header.S. header.S is now a pure data file.
This also means the .code16 directive is no longer needed for this
file.
Signed-off-by: H. Peter Anvin (Intel) <hpa@...or.com>
---
arch/x86/boot/Makefile | 4 +-
arch/x86/boot/header.S | 91 +--------------------------------
arch/x86/boot/start16.S | 108 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 112 insertions(+), 91 deletions(-)
create mode 100644 arch/x86/boot/start16.S
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3f9fb3698d66..d7944bf196b9 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -22,8 +22,8 @@ subdir- := compressed
setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
setup-y += early_serial_console.o edd.o header.o main.o memory.o
-setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o
-setup-y += video-mode.o version.o
+setup-y += pm.o pmjump.o printf.o regs.o start16.o string.o tty.o
+setup-y += video.o video-mode.o version.o
setup-$(CONFIG_X86_APM_BOOT) += apm.o
# The link order of the video-*.o modules can matter. In particular,
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index d74db02928e6..10b2971320f3 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -25,7 +25,6 @@
#include "voffset.h"
#include "zoffset.h"
-BOOTSEG = 0x07C0 /* original address of boot-sector */
SYSSEG = 0x1000 /* historical load address >> 4 */
#ifndef SVGA_MODE
@@ -39,8 +38,6 @@ SYSSEG = 0x1000 /* historical load address >> 4 */
.set salign, 0x1000
.set falign, 0x200
- .code16
-
# EFI PECOFF header ##########################################################
.section ".header", "a"
@@ -277,6 +274,7 @@ type_of_loader: .byte 0 # 0 means ancient bootloader, newer
# assigned ids
# flags, unused bits must be zero (RFU) bit within loadflags
+ .globl loadflags
loadflags:
.byte LOADED_HIGH # The kernel is to be loaded high
@@ -301,6 +299,7 @@ ramdisk_size: .long 0 # its size in bytes
bootsect_kludge:
.long 0 # obsolete
+ .globl heap_end_ptr
heap_end_ptr: .word _end+STACK_SIZE-512
# (Header version 0x0201 or later)
# space from here (exclusive) down to
@@ -551,89 +550,3 @@ kernel_info_offset: .long ZO_kernel_info
end_of_bzheader:
# End of bzImage header ######################################################
-
- .section ".entrytext", "ax"
- .globl start_of_setup
-start_of_setup:
-# Force %es = %ds
- movw %ds, %ax
- movw %ax, %es
- cld
-
-# Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
-# which happened to work by accident for the old code. Recalculate the stack
-# pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
-# stack behind its own code, so we can't blindly put it directly past the heap.
-
- movw %ss, %dx
- cmpw %ax, %dx # %ds == %ss?
- movw %sp, %dx
- je 2f # -> assume %sp is reasonably set
-
- # Invalid %ss, make up a new stack
- movw $_end, %dx
- testb $CAN_USE_HEAP, loadflags
- jz 1f
- movw heap_end_ptr, %dx
-1: addw $STACK_SIZE, %dx
- jnc 2f
- xorw %dx, %dx # Prevent wraparound
-
-2: # Now %dx should point to the end of our stack space
- andw $~3, %dx # dword align (might as well...)
- jnz 3f
- movw $0xfffc, %dx # Make sure we're not zero
-3: movw %ax, %ss
- movzwl %dx, %esp # Clear upper half of %esp
- sti # Now we should have a working stack
-
-# We will have entered with %cs = %ds+0x20, normalize %cs so
-# it is on par with the other segments.
- pushw %ds
- pushw $6f
- lretw
-6:
-
-# Check signature at end of setup
-SETUP_SIGNATURE = 0x5a5aaa55
- cmpl $SETUP_SIGNATURE, setup_sig
- jne setup_bad
-
-# Zero the bss
- movw $__bss_start, %di
- movw $_end+3, %cx
- xorl %eax, %eax
- subw %di, %cx
- shrw $2, %cx
- rep stosl
-
-# The C code uses %gs == 0 as invariant
- movw %ax, %gs
-
-# Jump to C code (should not return)
- calll main
-
-# Setup corrupt somehow...
-setup_bad:
- movl $setup_corrupt, %eax
- # Fall through...
-
- .globl die
- .type die, @function
-die:
- calll puts
-1:
- hlt
- jmp 1b
-
- .size die, .-die
-
- .section ".initdata", "a"
-setup_corrupt:
- .byte 7
- .string "No setup signature found...\n"
-
- .section ".signature", "a"
- .balign 4
-setup_sig:
- .long SETUP_SIGNATURE
diff --git a/arch/x86/boot/start16.S b/arch/x86/boot/start16.S
new file mode 100644
index 000000000000..3381dc0f4065
--- /dev/null
+++ b/arch/x86/boot/start16.S
@@ -0,0 +1,108 @@
+/*
+ * start16.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Based on bootsect.S and setup.S
+ * modified by more people than can be counted
+ *
+ * Rewritten as a common file by H. Peter Anvin (Apr 2007)
+ *
+ * BIG FAT NOTE: We're in real mode using 64k segments. Therefore segment
+ * addresses must be multiplied by 16 to obtain their respective linear
+ * addresses. To avoid confusion, linear addresses are written using leading
+ * hex while segment addresses are written as segment:offset.
+ *
+ * This code must *immediately* follow the bzImage header, so DO NOT
+ * add alignment directives anywhere in the .entrytext section!
+ */
+
+#include <asm/bootparam.h>
+#include "boot.h"
+
+ .code16
+ .section ".entrytext", "ax"
+ .globl start_of_setup
+start_of_setup:
+# Force %es = %ds
+ movw %ds, %ax
+ movw %ax, %es
+ cld
+
+# Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
+# which happened to work by accident for the old code. Recalculate the stack
+# pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
+# stack behind its own code, so we can't blindly put it directly past the heap.
+
+ movw %ss, %dx
+ cmpw %ax, %dx # %ds == %ss?
+ movw %sp, %dx
+ je 2f # -> assume %sp is reasonably set
+
+ # Invalid %ss, make up a new stack
+ movw $_end, %dx
+ testb $CAN_USE_HEAP, loadflags
+ jz 1f
+ movw heap_end_ptr, %dx
+1: addw $STACK_SIZE, %dx
+ jnc 2f
+ xorw %dx, %dx # Prevent wraparound
+
+2: # Now %dx should point to the end of our stack space
+ andw $~3, %dx # dword align (might as well...)
+ jnz 3f
+ movw $0xfffc, %dx # Make sure we're not zero
+3: movw %ax, %ss
+ movzwl %dx, %esp # Clear upper half of %esp
+ sti # Now we should have a working stack
+
+# We will have entered with %cs = %ds+0x20, normalize %cs so
+# it is on par with the other segments.
+ pushw %ds
+ pushw $6f
+ lretw
+6:
+
+# Check signature at end of setup
+SETUP_SIGNATURE = 0x5a5aaa55
+ cmpl $SETUP_SIGNATURE, setup_sig
+ jne setup_bad
+
+# Zero the bss
+ movw $__bss_start, %di
+ movw $_end+3, %cx
+ xorl %eax, %eax
+ subw %di, %cx
+ shrw $2, %cx
+ rep stosl
+
+# The C code uses %gs == 0 as invariant
+ movw %ax, %gs
+
+# Jump to C code (should not return)
+ calll main
+
+# Setup corrupt somehow...
+setup_bad:
+ movl $setup_corrupt, %eax
+ # Fall through...
+
+ .globl die
+ .type die, @function
+die:
+ calll puts
+1:
+ hlt
+ jmp 1b
+
+ .size die, .-die
+
+ .section ".initdata", "a"
+setup_corrupt:
+ .byte 7
+ .string "No setup signature found...\n"
+
+ .section ".signature", "a"
+ .balign 4
+setup_sig:
+ .long SETUP_SIGNATURE
--
2.52.0
Powered by blists - more mailing lists