/* * * Automatically generated file; DO NOT EDIT. * Linux/arm64 4.4.9 Kernel Configuration * */ /* * Helper macros to use CONFIG_ options in C/CPP expressions. Note that * these only work with boolean and tristate options. */ /* * Getting something that works in C and CPP for an arg that may or may * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" * we match on the placeholder define, insert the "0," for arg1 and generate * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when * the last step cherry picks the 2nd arg, we get a zero. */ /* * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 * otherwise. For boolean options, this is equivalent to * IS_ENABLED(CONFIG_FOO). */ /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 * otherwise. */ /* * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled * code can call a function defined in code compiled based on CONFIG_FOO. * This is similar to IS_ENABLED(), but returns false when invoked from * built-in code when CONFIG_FOO is set to 'm'. */ /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', * 0 otherwise. */ /* * ld script to make ARM Linux kernel * taken from the i386 version by Russell King * Written by Martin Mares */ /* * Helper macros to support writing architecture specific * linker scripts. * * A minimal linker scripts has following content: * [This is a sample, architectures may have special requiriements] * * OUTPUT_FORMAT(...) * OUTPUT_ARCH(...) * ENTRY(...) * SECTIONS * { * . = START; * __init_begin = .; * HEAD_TEXT_SECTION * INIT_TEXT_SECTION(PAGE_SIZE) * INIT_DATA_SECTION(...) * PERCPU_SECTION(CACHELINE_SIZE) * __init_end = .; * * _stext = .; * TEXT_SECTION = 0 * _etext = .; * * _sdata = .; * RO_DATA_SECTION(PAGE_SIZE) * RW_DATA_SECTION(...) * _edata = .; * * EXCEPTION_TABLE(...) * NOTES * * BSS_SECTION(0, 0, 0) * _end = .; * * STABS_DEBUG * DWARF_DEBUG * * DISCARDS // must be the last * } * * [__init_begin, __init_end] is the init section that may be freed after init * // __init_begin and __init_end should be page aligned, so that we can * // free the whole .init memory * [_stext, _etext] is the text section * [_sdata, _edata] is the data section * * Some of the included output section have their own set of constants. * Examples are: [__initramfs_start, __initramfs_end] for initramfs and * [__nosave_begin, __nosave_end] for the nosave data */ /* * Export symbols from the kernel to modules. Forked from module.h * to reduce the amount of pointless cruft we feed to gcc when only * exporting a simple symbol or two. * * Try not to add #includes here. It slows compilation and makes kernel * hackers place grumpy comments in header files. */ /* Some toolchains use a `_' prefix for all user symbols. */ /* Indirect, so macros are expanded before pasting. */ /* Align . to a 8 byte boundary equals to maximum function alignment. */ /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct */ /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) */ /* .data section */ /* * Data section helpers */ /* * Read only Data */ /* RODATA & RO_DATA provided for backward compatibility. * All archs are supposed to use RO_DATA() */ /* .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map */ /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ /* spinlock.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ /* Section used for early init (in .S files) */ /* * Exception table */ /* * Init task */ /* init and exit section handling */ /* * bss (Block Started by Symbol) - uninitialized data * zeroed during startup */ /* * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra * sections to the front of bss. */ /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to * the beginning of the section so we begin them at 0. */ /* Stabs debugging sections. */ /* * Default discarded sections. * * Some archs want to discard exit text/data at runtime rather than * link time due to cross-section references such as alt instructions, * bug table, eh_frame, etc. DISCARDS must be the last of output * section definitions so that such archs put those in earlier section * definitions. */ /** * PERCPU_INPUT - the percpu input sections * @cacheline: cacheline size * * The core percpu section names and core symbols which do not rely * directly upon load addresses. * * @cacheline is used to align subsections to avoid false cacheline * sharing between subsections for different purposes. */ /** * PERCPU_VADDR - define output section for percpu area * @cacheline: cacheline size * @vaddr: explicit base address (optional) * @phdr: destination PHDR (optional) * * Macro which expands to output section for percpu area. * * @cacheline is used to align subsections to avoid false cacheline * sharing between subsections for different purposes. * * If @vaddr is not blank, it specifies explicit base address and all * percpu symbols will be offset from the given address. If blank, * @vaddr always equals @laddr + LOAD_OFFSET. * * @phdr defines the output PHDR to use if not blank. Be warned that * output PHDR is sticky. If @phdr is specified, the next output * section in the linker script will go there too. @phdr should have * a leading colon. * * Note that this macros defines __per_cpu_load as an absolute symbol. * If there is no need to put the percpu section at a predetermined * address, use PERCPU_SECTION. */ /** * PERCPU_SECTION - define output section for percpu area, simple version * @cacheline: cacheline size * * Align to PAGE_SIZE and outputs output section for percpu area. This * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and * __per_cpu_start will be identical. * * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) * except that __per_cpu_load is defined as a relative symbol against * .data..percpu which is required for relocatable x86_32 configuration. */ /* * Definition of the high level *_SECTION macros * They will fit only a subset of the architectures */ /* * Writeable data. * All sections are combined in a single .data section. * The sections following CONSTRUCTORS are arranged so their * typical alignment matches. * A cacheline is typical/always less than a PAGE_SIZE so * the sections that has this restriction (or similar) * is located before the ones requiring PAGE_SIZE alignment. * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which * matches the requirement of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Memory returned by kmalloc() may be used for DMA, so we must make * sure that all such allocations are cache aligned. Otherwise, * unrelated code may cause parts of the buffer to be read into the * cache before the transfer is done, causing old data to be seen by * the CPU. */ /* * Kernel page table mapping * * Copyright (C) 2015 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * The linear mapping and the start of memory are both 2M aligned (per * the arm64 booting.txt requirements). Hence we can use section mapping * with 4K (section size = 2M) but not with 16K (section size = 32M) or * 64K (section size = 512M). */ /* * The idmap and swapper page tables need some space reserved in the kernel * image. Both require pgd, pud (4 levels only) and pmd tables to (section) * map the kernel. With the 64K page configuration, swapper and idmap need to * map to pte level. The swapper also maps the FDT (see __create_page_tables * for more information). Note that the number of ID map translation levels * could be increased on the fly if system RAM is out of reach for the default * VA range, so pages required to map highest possible PA are reserved in all * cases. */ /* Initial memory map size */ /* The size of the initial kernel direct mapping */ /* * Initial memory map attributes. */ /* * Based on arch/arm/include/asm/thread_info.h * * Copyright (C) 2002 Russell King. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Allow us to mark functions as 'deprecated' and have gcc emit a nice * warning for each use, in hopes of speeding the functions removal. * Usage is: * int __deprecated foo(void) */ /* * Allow us to avoid 'defined but not used' warnings on functions and data, * as well as force them to be emitted to the assembly file. * * As of gcc 3.4, static functions that are not marked with attribute((used)) * may be elided from the assembly file. As of gcc 3.4, static data not so * marked will not be elided, but this may change in a future gcc version. * * NOTE: Because distributions shipped with a backported unit-at-a-time * compiler in gcc 3.3, we must define __used to be __attribute__((used)) * for gcc >=3.3 instead of 3.4. * * In prior versions of gcc, such functions and data would be emitted, but * would be warned about except with attribute((unused)). * * Mark functions that are referenced only in inline assembly as __used so * the code is emitted even though it appears to be unreferenced. */ /* * Rather then using noinline to prevent stack consumption, use * noinline_for_stack instead. For documentation reasons. */ /* * From the GCC manual: * * Many functions do not examine any values except their arguments, * and have no effects except the return value. Basically this is * just slightly more strict class than the `pure' attribute above, * since function is not allowed to read global memory. * * Note that a function that has pointer arguments and examines the * data pointed to must _not_ be declared `const'. Likewise, a * function that calls a non-`const' function usually must not be * `const'. It does not make sense for a `const' function to return * `void'. */ /* * Tell gcc if a function is cold. The compiler will assume any path * directly leading to the call is unlikely. */ /* Simple shorthand for a section definition */ /* * Assume alignment of return value. */ /* Are two types/vars the same type (ignoring qualifiers)? */ /* Is this type a native word size -- useful for atomic operations */ /* Compile time object size, -1 for unknown */ /* * Sparse complains of variable sized arrays due to the temporary variable in * __compiletime_assert. Unfortunately we can't just expand it out to make * sparse see a constant array size without breaking compiletime_assert on old * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. */ /** * compiletime_assert - break build and emit msg if condition is false * @condition: a compile-time constant condition to check * @msg: a message to emit if condition is false * * In tradition of POSIX assert, this macro will break the build if the * supplied condition is *false*, emitting the supplied error message if the * compiler has support to do so. */ /* * Prevent the compiler from merging or refetching accesses. The compiler * is also forbidden from reordering successive instances of ACCESS_ONCE(), * but only when the compiler is aware of some particular ordering. One way * to make the compiler aware of ordering is to put the two invocations of * ACCESS_ONCE() in different C statements. * * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE * on a union member will work as long as the size of the member matches the * size of the union and the size is smaller than word size. * * The major use cases of ACCESS_ONCE used to be (1) Mediating communication * between process-level code and irq/NMI handlers, all running on the same CPU, * and (2) Ensuring that the compiler does not fold, spindle, or otherwise * mutilate accesses that either do not require ordering or that interact * with an explicit memory barrier or atomic instruction that provides the * required ordering. * * If possible use READ_ONCE()/WRITE_ONCE() instead. */ /** * lockless_dereference() - safely load a pointer for later dereference * @p: The pointer to load * * Similar to rcu_dereference(), but for situations where the pointed-to * object's lifetime is managed by something other than RCU. That * "something other" might be reference counting or simple immortality. */ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace * TIF_SYSCALL_AUDIT - syscall auditing * TIF_SECOMP - syscall secure computing * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) */ /* * Based on arch/arm/include/asm/memory.h * * Copyright (C) 2000-2002 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * Note: this file should not be included by non-asm/.h files */ /* const.h: Macros for dealing with constants. */ /* Some constant macros are used in both assembler and * C code. Therefore we cannot annotate them always with * 'UL' and other type specifiers unilaterally. We * use the following macros to deal with this. * * Similarly, _AT() will cast an expression with a type in C, but * leave it unchanged in asm. */ /* * int-ll64 is used everywhere now. */ /* * asm-generic/int-ll64.h * * Integer declarations for architectures which use "long long" * for 64-bit types. */ /* * asm-generic/int-ll64.h * * Integer declarations for architectures which use "long long" * for 64-bit types. */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * There seems to be no way of detecting this automatically from user * space, so 64 bit architectures should override this in their * bitsperlong.h. In particular, an architecture that supports * both 32 and 64 bit user space must not rely on CONFIG_64BIT * to decide it, but rather check a compiler provided macro. */ /* * FIXME: The check currently breaks x86-64 build, so it's * temporarily disabled. Please fix x86-64 and reenable */ /* This is a placeholder, to be removed over time */ /* * include/linux/sizes.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * Allow for constants defined here to be used from assembly code * by prepending the UL suffix only with actual C code compilation. */ /* * Size of the PCI I/O space. This must remain a power of two so that * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. */ /* * PAGE_OFFSET - the virtual address of the start of the kernel image (top * (VA_BITS - 1)) * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 128MB of the kernel text. */ /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ /* * Convert a page to/from a physical address */ /* * Memory types available. */ /* * Memory types for Stage-2 translation */ /* * Based on arch/arm/include/asm/page.h * * Copyright (C) 1995-2003 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2015 ARM Limited * Author: Dave Martin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * This error code is special: arch syscall entry code will return * -ENOSYS if users try to call a syscall that doesn't exist. To keep * failures of syscalls that really do exist distinguishable from * failures due to attempts to use a nonexistent syscall, syscall * implementations should refrain from returning -ENOSYS. */ /* for robust mutexes */ /* * These should never be seen by user programs. To return one of ERESTART* * codes, signal_pending() MUST be set. Note that ptrace can observe these * at syscall exit tracing, but they will never be left for the debugged user * process to see. */ /* Defined for the NFSv3 protocol */ /* * Copyright (C) 2013 - ARM Ltd * Author: Marc Zyngier * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Based on arch/arm/include/asm/memory.h * * Copyright (C) 2000-2002 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * Note: this file should not be included by non-asm/.h files */ /* Unallocated EC: 0x02 */ /* Unallocated EC: 0x09 - 0x0B */ /* Unallocated EC: 0x0d */ /* Unallocated EC: 0x0F - 0x10 */ /* Unallocated EC: 0x14 */ /* Unallocated EC: 0x19 - 0x1E */ /* Unallocated EC: 0x23 */ /* Unallocated EC: 0x27 */ /* Unallocated EC: 0x29 - 0x2B */ /* Unallocated EC: 0x2D - 0x2E */ /* Unallocated EC: 0x36 - 0x37 */ /* Unallocated EC: 0x39 */ /* Unallocted EC: 0x3B */ /* Unallocated EC: 0x3D - 0x3F */ /* ESR value templates for specific events */ /* BRK instruction trap from AArch64 state */ /* * Copyright (C) 2013 Huawei Ltd. * Author: Jiang Liu * * Copyright (C) 2014 Zi Shen Lim * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* A64 instructions are always 32 bits. */ /* * Based on arch/arm/include/asm/ptrace.h * * Copyright (C) 1996-2003 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Based on arch/arm/include/asm/ptrace.h * * Copyright (C) 1996-2003 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP */ /* * PSR bits */ /* AArch32 CPSR bits */ /* AArch64 SPSR bits */ /* * Groups of PSR bits */ /* Current Exception Level values, as contained in CurrentEL */ /* AArch32-specific ptrace requests */ /* AArch32 CPSR bits */ /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. */ /* Low-level stepping controls. */ /* MDSCR_EL1 enabling bits */ /* AArch64 */ /* * Break point instruction encoding */ /* * #imm16 values used for BRK instruction generation * Allowed values for kgbd are 0x400 - 0x7ff * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction * 0x800: kernel-mode BUG() and WARN() traps */ /* * BRK instruction encoding * The #imm16 value should be placed at bits[20:5] within BRK ins */ /* * BRK instruction for provoking a fault on purpose * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. */ /* AArch32 */ /* * Based on arch/arm/include/asm/proc-fns.h * * Copyright (C) 1997-1999 Russell King * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Based on arch/arm/include/asm/memory.h * * Copyright (C) 2000-2002 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * Note: this file should not be included by non-asm/.h files */ /* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * Number of page-table levels required to address 'va_bits' wide * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT) * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence: * * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3)) * * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d)) * * We cannot include linux/kernel.h which defines DIV_ROUND_UP here * due to build issues. So we open code DIV_ROUND_UP here: * * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3)) * * which gets simplified as : */ /* * Size mapped by an entry at level n ( 0 <= n <= 3) * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits * in the final page. The maximum number of translation levels supported by * the architecture is 4. Hence, starting at at level n, we have further * ((4 - n) - 1) levels of translation excluding the offset within the page. * So, the total number of bits mapped by an entry at level n is : * * ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT * * Rearranging it a bit we get : * (4 - n) * (PAGE_SHIFT - 3) + 3 */ /* * PMD_SHIFT determines the size a level 2 page table entry can map. */ /* * PUD_SHIFT determines the size a level 1 page table entry can map. */ /* * PGDIR_SHIFT determines the size a top-level page table entry can map * (depending on the configuration, this level can be 0, 1 or 2). */ /* * Section address mask and size definitions. */ /* * Contiguous page definitions. */ /* the the numerical offset of the PTE within a range of CONT_PTES */ /* * Hardware page table definitions. * * Level 1 descriptor (PUD). */ /* * Level 2 descriptor (PMD). */ /* * Section */ /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). */ /* * Level 3 descriptor (PTE). */ /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). */ /* * 2nd stage PTE definitions */ /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ /* * EL2/HYP PTE/PMD definitions */ /* * Highest possible physical address supported. */ /* * TCR flags. */ /* * Software defined PTE bits definition. */ /* * VMALLOC and SPARSEMEM_VMEMMAP ranges. * * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array * (rounded up to PUD_SIZE). * VMALLOC_START: beginning of the kernel VA space * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * fixed mappings and modules */ /* * Linker script macros to generate Image header fields. * * Copyright (C) 2014 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ /* * There aren't any ELF relocations we can use to endian-swap values known only * at link time (e.g. the subtraction of two symbol addresses), so we must get * the linker to endian-swap certain values before emitting them. */ /* * These will output as part of the Image header, which should be little-endian * regardless of the endianness of the kernel. While constant values could be * endian swapped in head.S, all are done here for consistency. */ /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to * isolate it from the kernel proper. The following symbols are legally * accessed by the stub, so provide some aliases to make them accessible. * Only include data symbols here, or text symbols of functions that are * guaranteed to be safe when executed at another offset than they were * linked at. The routines below are all implemented in assembler in a * position independent manner */ __efistub_memcmp = __pi_memcmp; __efistub_memchr = __pi_memchr; __efistub_memcpy = __pi_memcpy; __efistub_memmove = __pi_memmove; __efistub_memset = __pi_memset; __efistub_strlen = __pi_strlen; __efistub_strcmp = __pi_strcmp; __efistub_strncmp = __pi_strncmp; __efistub___flush_dcache_area = __pi___flush_dcache_area; __efistub__text = _text; __efistub__end = _end; __efistub__edata = _edata; /* .exit.text needed in case of alternative patching */ OUTPUT_ARCH(aarch64) ENTRY(_text) jiffies = jiffies_64; /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF * FileAlignment, which we set to its minimum value of 0x200. 'stext' * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned * boundary should be sufficient. */ PECOFF_FILE_ALIGNMENT = 0x200; SECTIONS { /* * XXX: The linker does not define how output sections are * assigned to input sections when there are multiple statements * matching the same input section name. There is no documented * order of matching. */ /DISCARD/ : { *(.exitcall.exit) *(.discard) *(.discard.*) } . = (0xffffffffffffffff << ((39) - 1)) + 0x00080000; .head.text : { _text = .; *(.head.text) } . = ALIGN(1<<12); .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; *(.exception.text) __exception_text_end = .; . = ALIGN(8); __irqentry_text_start = .; *(.irqentry.text) __irqentry_text_end = .; . = ALIGN(8); *(.text.hot .text .text.fixup .text.unlikely) *(.ref.text) . = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .; . = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .; . = ALIGN(0x00001000); __hyp_idmap_text_start = .; *(.hyp.idmap.text) __hyp_idmap_text_end = .; __hyp_text_start = .; *(.hyp.text) __hyp_text_end = .; . = ALIGN(0x00001000); __idmap_text_start = .; *(.idmap.text) __idmap_text_end = .; *(.fixup) *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ } . = ALIGN(1<<12); . = ALIGN(((1 << 12))); .rodata : AT(ADDR(.rodata) - 0) { __start_rodata = .; *(.rodata) *(.rodata.*) *(__vermagic) . = ALIGN(8); __start___tracepoints_ptrs = .; *(__tracepoints_ptrs) __stop___tracepoints_ptrs = .; *(__tracepoints_strings) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } . = ALIGN(8); __bug_table : AT(ADDR(__bug_table) - 0) { __start___bug_table = .; *(__bug_table) __stop___bug_table = .; } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; __start_pci_fixups_resume = .; *(.pci_fixup_resume) __end_pci_fixups_resume = .; __start_pci_fixups_resume_early = .; *(.pci_fixup_resume_early) __end_pci_fixups_resume_early = .; __start_pci_fixups_suspend = .; *(.pci_fixup_suspend) __end_pci_fixups_suspend = .; __start_pci_fixups_suspend_late = .; *(.pci_fixup_suspend_late) __end_pci_fixups_suspend_late = .; } .builtin_fw : AT(ADDR(.builtin_fw) - 0) { __start_builtin_fw = .; *(.builtin_fw) __end_builtin_fw = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(SORT(___ksymtab+*)) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(SORT(___ksymtab_gpl+*)) __stop___ksymtab_gpl = .; } __ksymtab_unused : AT(ADDR(__ksymtab_unused) - 0) { __start___ksymtab_unused = .; *(SORT(___ksymtab_unused+*)) __stop___ksymtab_unused = .; } __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - 0) { __start___ksymtab_unused_gpl = .; *(SORT(___ksymtab_unused_gpl+*)) __stop___ksymtab_unused_gpl = .; } __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - 0) { __start___ksymtab_gpl_future = .; *(SORT(___ksymtab_gpl_future+*)) __stop___ksymtab_gpl_future = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(SORT(___kcrctab+*)) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(SORT(___kcrctab_gpl+*)) __stop___kcrctab_gpl = .; } __kcrctab_unused : AT(ADDR(__kcrctab_unused) - 0) { __start___kcrctab_unused = .; *(SORT(___kcrctab_unused+*)) __stop___kcrctab_unused = .; } __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - 0) { __start___kcrctab_unused_gpl = .; *(SORT(___kcrctab_unused_gpl+*)) __stop___kcrctab_unused_gpl = .; } __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - 0) { __start___kcrctab_gpl_future = .; *(SORT(___kcrctab_gpl_future+*)) __stop___kcrctab_gpl_future = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __init_rodata : AT(ADDR(__init_rodata) - 0) { *(.ref.rodata) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; } __modver : AT(ADDR(__modver) - 0) { __start___modver = .; *(__modver) __stop___modver = .; . = ALIGN(((1 << 12))); __end_rodata = .; } . = ALIGN(((1 << 12))); . = ALIGN(8); __ex_table : AT(ADDR(__ex_table) - 0) { __start___ex_table = .; *(__ex_table) __stop___ex_table = .; } .notes : AT(ADDR(.notes) - 0) { __start_notes = .; *(.note.*) __stop_notes = .; } . = ALIGN(1<<12); _etext = .; /* End of text and rodata section */ . = ALIGN(1<<12); __init_begin = .; . = ALIGN(8); .init.text : AT(ADDR(.init.text) - 0) { _sinittext = .; *(.init.text) *(.meminit.text) _einittext = .; } .exit.text : { *(.exit.text) *(.memexit.text) } . = ALIGN(1<<12); .init.data : { *(.init.data) *(.meminit.data) . = ALIGN(8); __start_mcount_loc = .; *(__mcount_loc) __stop_mcount_loc = .; *(.init.rodata) . = ALIGN(8); __start_ftrace_events = .; *(_ftrace_events) __stop_ftrace_events = .; __start_ftrace_enum_maps = .; *(_ftrace_enum_map) __stop_ftrace_enum_maps = .; . = ALIGN(8); __start_syscalls_metadata = .; *(__syscalls_metadata) __stop_syscalls_metadata = .; *(.meminit.rodata) . = ALIGN(8); __clk_of_table = .; *(__clk_of_table) *(__clk_of_table_end) . = ALIGN(8); __reservedmem_of_table = .; *(__reservedmem_of_table) *(__reservedmem_of_table_end) . = ALIGN(8); __clksrc_of_table = .; *(__clksrc_of_table) *(__clksrc_of_table_end) . = ALIGN(8); __cpu_method_of_table = .; *(__cpu_method_of_table) *(__cpu_method_of_table_end) . = ALIGN(8); __cpuidle_method_of_table = .; *(__cpuidle_method_of_table) *(__cpuidle_method_of_table_end) . = ALIGN(32); __dtb_start = .; *(.dtb.init.rodata) __dtb_end = .; . = ALIGN(8); __irqchip_of_table = .; *(__irqchip_of_table) *(__irqchip_of_table_end) . = ALIGN(8); __irqchip_acpi_probe_table = .; *(__irqchip_acpi_probe_table) __irqchip_acpi_probe_table_end = .; . = ALIGN(8); __clksrc_acpi_probe_table = .; *(__clksrc_acpi_probe_table) __clksrc_acpi_probe_table_end = .; . = ALIGN(32); __earlycon_table = .; *(__earlycon_table) *(__earlycon_table_end) . = ALIGN(8); __earlycon_of_table = .; *(__earlycon_of_table) *(__earlycon_of_table_end) . = ALIGN(16); __setup_start = .; *(.init.setup) __setup_end = .; __initcall_start = .; *(.initcallearly.init) __initcall0_start = .; *(.initcall0.init) *(.initcall0s.init) __initcall1_start = .; *(.initcall1.init) *(.initcall1s.init) __initcall2_start = .; *(.initcall2.init) *(.initcall2s.init) __initcall3_start = .; *(.initcall3.init) *(.initcall3s.init) __initcall4_start = .; *(.initcall4.init) *(.initcall4s.init) __initcall5_start = .; *(.initcall5.init) *(.initcall5s.init) __initcallrootfs_start = .; *(.initcallrootfs.init) *(.initcallrootfss.init) __initcall6_start = .; *(.initcall6.init) *(.initcall6s.init) __initcall7_start = .; *(.initcall7.init) *(.initcall7s.init) __initcall_end = .; __con_initcall_start = .; *(.con_initcall.init) __con_initcall_end = .; __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; . = ALIGN(4); __initramfs_start = .; *(.init.ramfs) . = ALIGN(8); *(.init.ramfs.info) } .exit.data : { *(.exit.data) *(.memexit.data) *(.memexit.rodata) } . = ALIGN((1 << 12)); .data..percpu : AT(ADDR(.data..percpu) - 0) { __per_cpu_load = .; __per_cpu_start = .; *(.data..percpu..first) . = ALIGN((1 << 12)); *(.data..percpu..page_aligned) . = ALIGN((1 << 7)); *(.data..percpu..read_mostly) . = ALIGN((1 << 7)); *(.data..percpu) *(.data..percpu..shared_aligned) __per_cpu_end = .; } . = ALIGN((1 << 12)); __init_end = .; . = ALIGN(4); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } .altinstr_replacement : { *(.altinstr_replacement) } . = ALIGN((1 << 12)); _data = .; _sdata = .; . = ALIGN((1 << 12)); .data : AT(ADDR(.data) - 0) { . = ALIGN(16384); *(.data..init_task) . = ALIGN((1 << 12)); __nosave_begin = .; *(.data..nosave) . = ALIGN((1 << 12)); __nosave_end = .; . = ALIGN((1 << 12)); *(.data..page_aligned) . = ALIGN((1 << 7)); *(.data..cacheline_aligned) . = ALIGN((1 << 7)); *(.data..read_mostly) . = ALIGN((1 << 7)); *(.data) *(.ref.data) *(.data..shared_aligned) *(.data.unlikely) . = ALIGN(32); *(__tracepoints) . = ALIGN(8); __start___jump_table = .; *(__jump_table) __stop___jump_table = .; . = ALIGN(8); __start___verbose = .; *(__verbose) __stop___verbose = .; __start___trace_bprintk_fmt = .; *(__trace_printk_fmt) __stop___trace_bprintk_fmt = .; __start___tracepoint_str = .; *(__tracepoint_str) __stop___tracepoint_str = .; CONSTRUCTORS } .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } _edata = .; . = ALIGN(0); __bss_start = .; . = ALIGN(0); .sbss : AT(ADDR(.sbss) - 0) { *(.sbss) *(.scommon) } . = ALIGN(0); .bss : AT(ADDR(.bss) - 0) { *(.bss..page_aligned) *(.dynbss) *(.bss) *(COMMON) } . = ALIGN(0); __bss_stop = .; . = ALIGN((1 << 12)); idmap_pg_dir = .; . += ((((((48)) - 4) / (12 - 3)) - 1) * (1 << 12)); swapper_pg_dir = .; . += ((3 - 1) * (1 << 12)); _end = .; .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } _kernel_size_le = ((_end - _text) & 0xffffffffffffffff); _kernel_offset_le = ((0x00080000) & 0xffffffffffffffff); _kernel_flags_le = ((((0 << 0) | (((12 - 10) / 2) << 1))) & 0xffffffffffffffff); } /* * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. */ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(0x00001000 - 1)) <= 0x00001000, "HYP init code too big or misaligned") ASSERT(__idmap_text_end - (__idmap_text_start & ~(0x00001000 - 1)) <= 0x00001000, "ID map text too big or misaligned") /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ ASSERT(_text == ((0xffffffffffffffff << ((39) - 1)) + 0x00080000), "HEAD is misaligned")