[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f9f8b946064a2c92e932c20f6e47a51381766725.1433937132.git.jpoimboe@redhat.com>
Date: Wed, 10 Jun 2015 07:06:10 -0500
From: Josh Poimboeuf <jpoimboe@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Cc: Michal Marek <mmarek@...e.cz>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Borislav Petkov <bp@...en8.de>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andi Kleen <andi@...stfloor.org>, x86@...nel.org,
live-patching@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v5 02/10] x86: Compile-time asm code validation
Add a new CONFIG_ASM_VALIDATION option which adds an asmvalidate host
tool which runs on every compiled .S file. Its goal is to enforce sane
rules on all asm code, so that stack debug metadata (frame/back chain
pointers and/or DWARF CFI metadata) can be made reliable.
It enforces the following rules:
1. Each callable function must be annotated with the ELF STT_FUNC type.
This is typically done using the ENTRY/ENDPROC macros. If
asmvalidate finds a return instruction outside of a function, it
flags an error, since that usually indicates callable code which
should be annotated accordingly.
2. Each callable function must never leave its own bounds (i.e. with a
jump to outside the function) except when returning.
3. Each callable non-leaf function must have frame pointer logic (if
required by CONFIG_FRAME_POINTER or the architecture's back chain
rules). This can be done with the new FP_SAVE/FP_RESTORE macros.
It currently only supports x86_64. It *almost* supports x86_32, but the
stackvalidate code doesn't yet know how to deal with 32-bit REL
relocations for the return whitelists. I tried to make the code generic
so that support for other architectures can be plugged in pretty easily.
As a first step, CONFIG_ASM_VALIDATION is disabled by default, and all
reported non-compliances result in warnings. Once we get them all
cleaned up, we can change the default to CONFIG_ASM_VALIDATION=y and
change the warnings to build errors so the asm code can stay clean.
Signed-off-by: Josh Poimboeuf <jpoimboe@...hat.com>
---
MAINTAINERS | 6 +
arch/Kconfig | 3 +
arch/x86/Kconfig | 1 +
arch/x86/Makefile | 6 +-
arch/x86/include/asm/func.h | 32 ++++
lib/Kconfig.debug | 21 +++
scripts/Makefile | 1 +
scripts/Makefile.build | 23 ++-
scripts/asmvalidate/Makefile | 17 ++
scripts/asmvalidate/arch-x86.c | 283 ++++++++++++++++++++++++++++++
scripts/asmvalidate/arch.h | 40 +++++
scripts/asmvalidate/asmvalidate.c | 324 ++++++++++++++++++++++++++++++++++
scripts/asmvalidate/elf.c | 354 ++++++++++++++++++++++++++++++++++++++
scripts/asmvalidate/elf.h | 74 ++++++++
scripts/asmvalidate/list.h | 217 +++++++++++++++++++++++
15 files changed, 1399 insertions(+), 3 deletions(-)
create mode 100644 scripts/asmvalidate/Makefile
create mode 100644 scripts/asmvalidate/arch-x86.c
create mode 100644 scripts/asmvalidate/arch.h
create mode 100644 scripts/asmvalidate/asmvalidate.c
create mode 100644 scripts/asmvalidate/elf.c
create mode 100644 scripts/asmvalidate/elf.h
create mode 100644 scripts/asmvalidate/list.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 469cd4d..831bf8b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1664,6 +1664,12 @@ S: Maintained
F: Documentation/hwmon/asc7621
F: drivers/hwmon/asc7621.c
+ASM VALIDATION
+M: Josh Poimboeuf <jpoimboe@...hat.com>
+S: Supported
+F: scripts/asmvalidate/
+F: arch/x86/include/asm/func.h
+
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@...il.com>
L: acpi4asus-user@...ts.sourceforge.net
diff --git a/arch/Kconfig b/arch/Kconfig
index a65eafb..8d85326 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -499,6 +499,9 @@ config ARCH_HAS_ELF_RANDOMIZE
- arch_mmap_rnd()
- arch_randomize_brk()
+config HAVE_ASM_VALIDATION
+ bool
+
#
# ABI hall of shame
#
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 228aa35..a85e149 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -144,6 +144,7 @@ config X86
select VIRT_TO_BUS
select X86_DEV_DMA_OPS if X86_64
select X86_FEATURE_NAMES if PROC_FS
+ select HAVE_ASM_VALIDATION if X86_64
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 118e6de..e2dd40e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -174,9 +174,13 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
-archscripts: scripts_basic
+archscripts: scripts_basic $(objtree)/arch/x86/lib/inat-tables.c
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
+# this file is needed early by scripts/asmvalidate
+$(objtree)/arch/x86/lib/inat-tables.c:
+ $(Q)$(MAKE) $(build)=arch/x86/lib $@
+
###
# Syscall table generation
diff --git a/arch/x86/include/asm/func.h b/arch/x86/include/asm/func.h
index 4d62782..52b3225 100644
--- a/arch/x86/include/asm/func.h
+++ b/arch/x86/include/asm/func.h
@@ -9,6 +9,14 @@
* every callable non-leaf asm function.
*/
.macro FP_SAVE name
+ .if CONFIG_ASM_VALIDATION
+ 163:
+ .pushsection __asmvalidate_fp_save, "ae"
+ _ASM_ALIGN
+ .long 163b - .
+ .popsection
+ .endif
+
.if CONFIG_FRAME_POINTER
push %_ASM_BP
_ASM_MOV %_ASM_SP, %_ASM_BP
@@ -19,6 +27,30 @@
.if CONFIG_FRAME_POINTER
pop %_ASM_BP
.endif
+
+ .if CONFIG_ASM_VALIDATION
+ 163:
+ .pushsection __asmvalidate_fp_restore, "ae"
+ _ASM_ALIGN
+ .long 163b - .
+ .popsection
+ .endif
+.endm
+
+/*
+ * This macro tells the asm validation script to ignore the instruction
+ * immediately after the macro. It should only be used in special cases where
+ * you're 100% sure that the asm validation constraints don't need to be
+ * adhered to. Use with caution!
+ */
+.macro ASMVALIDATE_IGNORE
+ .if CONFIG_ASM_VALIDATION
+ 163:
+ .pushsection __asmvalidate_ignore, "ae"
+ _ASM_ALIGN
+ .long 163b - .
+ .popsection
+ .endif
.endm
#endif /* _ASM_X86_FUNC_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b908048..119dfd1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -332,6 +332,27 @@ config FRAME_POINTER
larger and slower, but it gives very useful debugging information
in case of kernel bugs. (precise oopses/stacktraces/warnings)
+
+config ASM_VALIDATION
+ bool "Enable compile-time asm code validation"
+ depends on HAVE_ASM_VALIDATION
+ default n
+ help
+ Add compile-time checks to enforce sane rules on all asm code, so
+ that stack debug metadata can be more reliable. Enforces the
+ following rules:
+
+ 1. Each callable function must be annotated with the ELF STT_FUNC
+ type.
+
+ 2. Each callable function must never leave its own bounds except when
+ returning.
+
+ 3. Each callable non-leaf function must have frame pointer logic (if
+ required by CONFIG_FRAME_POINTER or the architecture's back chain
+ rules.
+
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
diff --git a/scripts/Makefile b/scripts/Makefile
index 2016a64..c4c8350 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -37,6 +37,7 @@ subdir-y += mod
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_DTC) += dtc
subdir-$(CONFIG_GDB_SCRIPTS) += gdb
+subdir-$(CONFIG_ASM_VALIDATION) += asmvalidate
# Let clean descend into subdirs
subdir- += basic kconfig package
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 01df30a..8bf1085 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -253,6 +253,25 @@ define rule_cc_o_c
mv -f $(dot-target).tmp $(dot-target).cmd
endef
+ifdef CONFIG_ASM_VALIDATION
+asmvalidate = $(objtree)/scripts/asmvalidate/asmvalidate
+cmd_asmvalidate = \
+ case $(@) in \
+ arch/x86/purgatory/*) ;; \
+ arch/x86/realmode/rm/*) ;; \
+ *) $(asmvalidate) "$(@)"; \
+ esac;
+endif
+
+define rule_as_o_S
+ $(call echo-cmd,as_o_S) $(cmd_as_o_S); \
+ $(cmd_asmvalidate) \
+ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' > \
+ $(dot-target).tmp; \
+ rm -f $(depfile); \
+ mv -f $(dot-target).tmp $(dot-target).cmd
+endef
+
# Built-in and composite module parts
$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call cmd,force_checksrc)
@@ -290,8 +309,8 @@ $(obj)/%.s: $(src)/%.S FORCE
quiet_cmd_as_o_S = AS $(quiet_modtag) $@
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
-$(obj)/%.o: $(src)/%.S FORCE
- $(call if_changed_dep,as_o_S)
+$(obj)/%.o: $(src)/%.S $(asmvalidate) FORCE
+ $(call if_changed_rule,as_o_S)
targets += $(real-objs-y) $(real-objs-m) $(lib-y)
targets += $(extra-y) $(MAKECMDGOALS) $(always)
diff --git a/scripts/asmvalidate/Makefile b/scripts/asmvalidate/Makefile
new file mode 100644
index 0000000..a202ad6
--- /dev/null
+++ b/scripts/asmvalidate/Makefile
@@ -0,0 +1,17 @@
+hostprogs-y := asmvalidate
+always := $(hostprogs-y)
+
+asmvalidate-objs := asmvalidate.o elf.o
+
+HOSTCFLAGS += -Werror
+HOSTLOADLIBES_asmvalidate := -lelf
+
+ifdef CONFIG_X86
+
+asmvalidate-objs += arch-x86.o
+
+HOSTCFLAGS_arch-x86.o := -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/
+
+$(obj)/arch-x86.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+
+endif
diff --git a/scripts/asmvalidate/arch-x86.c b/scripts/asmvalidate/arch-x86.c
new file mode 100644
index 0000000..87e7073
--- /dev/null
+++ b/scripts/asmvalidate/arch-x86.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@...hat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdio.h>
+
+#define unlikely(cond) (cond)
+#include <asm/insn.h>
+#include <inat.c>
+#include <insn.c>
+
+#include "elf.h"
+#include "arch.h"
+
+static int is_x86_64(struct elf *elf)
+{
+ switch (elf->ehdr.e_machine) {
+ case EM_X86_64:
+ return 1;
+ case EM_386:
+ return 0;
+ default:
+ WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ return -1;
+ }
+}
+
+int arch_insn_decode(struct elf *elf, struct arch_insn *arch_insn,
+ unsigned long addr, unsigned int maxlen)
+{
+ struct insn insn;
+ int x86_64;
+ unsigned char op1, op2, ext;
+
+ x86_64 = is_x86_64(elf);
+ if (x86_64 == -1)
+ return 1;
+
+ insn_init(&insn, (void *)addr, maxlen, x86_64);
+ insn_get_length(&insn);
+ insn_get_opcode(&insn);
+ insn_get_modrm(&insn);
+ insn_get_immediate(&insn);
+
+ if (!insn.opcode.got)
+ return 1;
+
+ memset(arch_insn, 0, sizeof(*arch_insn));
+
+ arch_insn->len = insn.length;
+
+ if (insn.vex_prefix.nbytes)
+ return 0;
+
+ op1 = insn.opcode.bytes[0];
+ op2 = insn.opcode.bytes[1];
+
+ switch (op1) {
+ case 0x70 ... 0x7f:
+ arch_insn->jump = true;
+ break;
+ case 0x0f:
+ if (op2 >= 0x80 && op2 <= 0x8f)
+ arch_insn->jump = true;
+ break;
+ case 0xe3:
+ arch_insn->jump = true;
+ break;
+ case 0xe9:
+ case 0xeb:
+ arch_insn->jump = true;
+ arch_insn->unconditional = true;
+ break;
+
+ case 0xc2:
+ case 0xc3:
+ arch_insn->ret = true;
+ break;
+ case 0xe8:
+ arch_insn->call = true;
+ break;
+ case 0xff:
+ ext = X86_MODRM_REG(insn.modrm.bytes[0]);
+ if (ext == 2 || ext == 3)
+ arch_insn->call = true;
+ else if (ext == 4 || ext == 5) {
+ arch_insn->jump = true;
+ arch_insn->unconditional = true;
+ }
+ break;
+ }
+
+ if (arch_insn->jump && insn.immediate.value)
+ arch_insn->dest = addr + insn.length + insn.immediate.value;
+
+ return 0;
+}
+
+static struct rela *find_rela_at_offset(struct section *sec,
+ unsigned long offset)
+{
+ struct rela *rela;
+
+ list_for_each_entry(rela, &sec->relas, list)
+ if (rela->offset == offset)
+ return rela;
+
+ return NULL;
+}
+
+static struct symbol *find_containing_func(struct section *sec,
+ unsigned long offset)
+{
+ struct symbol *sym;
+ unsigned long addr = sec->start + offset;
+
+ list_for_each_entry(sym, &sec->symbols, list) {
+ if (sym->type != STT_FUNC)
+ continue;
+ if (addr >= sym->start && addr < sym->end)
+ return sym;
+ }
+
+ return NULL;
+}
+
+#define ALT_ENTRY_SIZE 13
+#define ALT_INSTR_OFFSET 0
+#define ALT_REPL_OFFSET 4
+#define ALT_REPL_LEN_OFFSET 11
+
+static int validate_alternative_insn(struct elf *elf, struct symbol *old_func,
+ struct section *replacesec,
+ unsigned long offset, int maxlen,
+ unsigned int *len)
+{
+ struct rela *dest_rela;
+ struct arch_insn insn;
+ int ret, warnings = 0;
+
+ ret = arch_insn_decode(elf, &insn, replacesec->start + offset, maxlen);
+ if (ret) {
+ WARN("can't decode instruction at %s+0x%lx", replacesec->name,
+ offset);
+ return -1;
+ }
+
+ *len = insn.len;
+
+ if (insn.call) {
+ WARN("call instruction in %s", replacesec->name);
+ return 1;
+ } else if (insn.jump) {
+ if (insn.dest) {
+ WARN("unexpected non-relocated jump dest at %s+0x%lx",
+ replacesec->name, offset);
+ return -1;
+ }
+
+ dest_rela = find_rela_at_offset(replacesec->rela, offset + 1);
+ if (!dest_rela) {
+ WARN("can't find rela for jump instruction at %s+0x%lx",
+ replacesec->name, offset);
+ return -1;
+ }
+
+ if (old_func &&
+ !(dest_rela->sym->sec == old_func->sec &&
+ dest_rela->addend+dest_rela->sym->start+4 >= old_func->start &&
+ dest_rela->addend+dest_rela->sym->start+4 < old_func->end)) {
+ WARN("alternative jump to outside the scope of original function %s",
+ old_func->name);
+ warnings++;
+ }
+ }
+
+ return warnings;
+}
+
+static int validate_alternative_entry(struct elf *elf, struct section *altsec,
+ struct section *replacesec, int entry)
+{
+ struct rela *old_rela, *new_rela;
+ struct symbol *old_func;
+ unsigned long alt_offset, insn_offset;
+ unsigned int insn_len;
+ unsigned char new_insns_len;
+ int ret, warnings = 0;
+
+ alt_offset = entry * ALT_ENTRY_SIZE;
+
+ old_rela = find_rela_at_offset(altsec->rela,
+ alt_offset + ALT_INSTR_OFFSET);
+ if (!old_rela) {
+ WARN("can't find altinstructions rela at offset 0x%lx",
+ alt_offset + ALT_INSTR_OFFSET);
+ return -1;
+ }
+
+ if (old_rela->sym->type != STT_SECTION) {
+ WARN("don't know how to deal with non-section symbol %s",
+ old_rela->sym->name);
+ return -1;
+ }
+
+ old_func = find_containing_func(old_rela->sym->sec, old_rela->addend);
+
+ new_rela = find_rela_at_offset(altsec->rela,
+ alt_offset + ALT_REPL_OFFSET);
+ if (!new_rela) {
+ WARN("can't find altinstructions rela at offset 0x%lx",
+ alt_offset + ALT_REPL_OFFSET);
+ return -1;
+ }
+
+ if (new_rela->sym != replacesec->sym) {
+ WARN("new_rela sym %s isn't .altinstr_replacement",
+ new_rela->sym->name);
+ return -1;
+ }
+
+ new_insns_len = *(unsigned char *)(altsec->start + alt_offset + ALT_REPL_LEN_OFFSET);
+
+ for (insn_offset = new_rela->addend; new_insns_len > 0;
+ insn_offset += insn_len, new_insns_len -= insn_len) {
+ ret = validate_alternative_insn(elf, old_func, replacesec,
+ insn_offset, new_insns_len,
+ &insn_len);
+ if (ret < 0)
+ return ret;
+
+ warnings += ret;
+ }
+
+ return warnings;
+}
+
+int arch_validate_alternatives(struct elf *elf)
+{
+ struct section *altsec, *replacesec;
+ int entry, ret, warnings = 0;
+ unsigned int nr_entries;
+
+ altsec = elf_find_section_by_name(elf, ".altinstructions");
+ if (!altsec)
+ return 0;
+
+ if ((altsec->end - altsec->start) % ALT_ENTRY_SIZE != 0) {
+ WARN(".altinstructions size not a multiple of %d",
+ ALT_ENTRY_SIZE);
+ return -1;
+ }
+
+ nr_entries = (altsec->end - altsec->start) / ALT_ENTRY_SIZE;
+
+ replacesec = elf_find_section_by_name(elf, ".altinstr_replacement");
+ if (!replacesec)
+ return 0;
+
+ for (entry = 0; entry < nr_entries; entry++) {
+ ret = validate_alternative_entry(elf, altsec, replacesec,
+ entry);
+ if (ret < 0)
+ return ret;
+
+ warnings += ret;
+ }
+
+ return warnings;
+}
diff --git a/scripts/asmvalidate/arch.h b/scripts/asmvalidate/arch.h
new file mode 100644
index 0000000..da85a18
--- /dev/null
+++ b/scripts/asmvalidate/arch.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@...hat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ARCH_H_
+#define _ARCH_H_
+
+#include <stdbool.h>
+#include "elf.h"
+
+struct arch_insn {
+ bool call, ret, jump;
+ unsigned int len;
+
+ /* jump-specific variables */
+ bool unconditional;
+ unsigned long dest;
+};
+
+/* decode an instruction and populate the arch_insn accordingly */
+int arch_insn_decode(struct elf *elf, struct arch_insn *arch_insn,
+ unsigned long addr, unsigned int maxlen);
+
+/* validate any .altinstructions sections */
+int arch_validate_alternatives(struct elf *elf);
+
+#endif /* _ARCH_H_ */
diff --git a/scripts/asmvalidate/asmvalidate.c b/scripts/asmvalidate/asmvalidate.c
new file mode 100644
index 0000000..2c586c0
--- /dev/null
+++ b/scripts/asmvalidate/asmvalidate.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@...hat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * asmvalidate:
+ *
+ * This tool runs on every compiled .S file. Its goal is to enforce sane rules
+ * on all asm code, so that stack debug metadata (frame/back chain pointers
+ * and/or DWARF CFI metadata) can be made reliable.
+ *
+ * It enforces the following rules:
+ *
+ * 1. Each callable function must be annotated with the ELF STT_FUNC type.
+ * This is typically done using the ENTRY/ENDPROC macros. If asmvalidate
+ * finds a return instruction outside of a function, it flags an error,
+ * since that usually indicates callable code which should be annotated
+ * accordingly.
+ *
+ * 2. Each callable function must never leave its own bounds (i.e. with a jump
+ * to outside the function) except when returning.
+ *
+ * 3. Each callable non-leaf function must have frame pointer logic (if
+ * required by CONFIG_FRAME_POINTER or the architecture's back chain rules).
+ * This should by done by the FP_SAVE/FP_RESTORE macros.
+ *
+ */
+
+#include <argp.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "elf.h"
+#include "arch.h"
+
+int warnings;
+
+struct args {
+ char *args[1];
+};
+static const char args_doc[] = "file.o";
+static struct argp_option options[] = {
+ {0},
+};
+static error_t parse_opt(int key, char *arg, struct argp_state *state)
+{
+ /* Get the input argument from argp_parse, which we
+ know is a pointer to our args structure. */
+ struct args *args = state->input;
+
+ switch (key) {
+ case ARGP_KEY_ARG:
+ if (state->arg_num >= 1)
+ /* Too many arguments. */
+ argp_usage(state);
+ args->args[state->arg_num] = arg;
+ break;
+ case ARGP_KEY_END:
+ if (state->arg_num < 1)
+ /* Not enough arguments. */
+ argp_usage(state);
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+static struct argp argp = { options, parse_opt, args_doc, 0 };
+
+static bool used_macro(struct elf *elf, struct section *sec,
+ unsigned long offset, const char *macro)
+{
+ struct section *macro_sec;
+ struct rela *rela;
+ char rela_sec[51];
+
+ strcpy(rela_sec, ".rela__asmvalidate_");
+ strncat(rela_sec, macro, 50 - strlen(rela_sec));
+
+ macro_sec = elf_find_section_by_name(elf, rela_sec);
+ if (!macro_sec)
+ return false;
+
+ list_for_each_entry(rela, ¯o_sec->relas, list)
+ if (rela->sym->type == STT_SECTION &&
+ rela->sym == sec->sym &&
+ rela->addend == offset)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if the ASMVALIDATE_IGNORE macro was used at the given section offset.
+ */
+static bool ignore_macro(struct elf *elf, struct section *sec,
+ unsigned long offset)
+{
+ return used_macro(elf, sec, offset, "ignore");
+}
+
+/*
+ * Check if the FP_SAVE macro was used at the given section offset.
+ */
+static bool fp_save_macro(struct elf *elf, struct section *sec,
+ unsigned long offset)
+{
+ return used_macro(elf, sec, offset, "fp_save");
+}
+
+/*
+ * Check if the FP_RESTORE macro was used at the given section offset.
+ */
+static bool fp_restore_macro(struct elf *elf, struct section *sec,
+ unsigned long offset)
+{
+ return used_macro(elf, sec, offset, "fp_restore");
+}
+
+/*
+ * For the given collection of instructions which are outside an STT_FUNC
+ * function, ensure there are no (un-whitelisted) return instructions.
+ */
+static int validate_nonfunction(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len)
+{
+ unsigned long insn_offset;
+ struct arch_insn insn;
+ int ret, warnings = 0;
+
+ for (insn_offset = offset; len > 0;
+ insn_offset += insn.len, len -= insn.len) {
+ ret = arch_insn_decode(elf, &insn, sec->start + insn_offset,
+ len);
+ if (ret)
+ return -1;
+
+ if (insn.ret && !ignore_macro(elf, sec, insn_offset)) {
+ WARN("%s+0x%lx: return instruction outside of a function",
+ sec->name, insn_offset);
+ warnings++;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * validate_function():
+ *
+ * 1. Ensure the function never leaves its own bounds.
+ *
+ * 2. If it's a non-leaf function, ensure that it uses the frame pointer macros
+ * (FP_SAVE and FP_RESTORE).
+ *
+ * Return value:
+ * -1: bad instruction
+ * 1: missing frame pointer logic
+ * 0: validation succeeded
+ */
+static int validate_function(struct elf *elf, struct symbol *func)
+{
+ struct section *sec = func->sec;
+ unsigned long addr;
+ struct arch_insn insn;
+ bool leaf = true, fp;
+ int ret, warnings = 0;
+
+ fp = fp_save_macro(elf, sec, func->start - sec->start);
+
+ for (addr = func->start; addr < func->end; addr += insn.len) {
+ ret = arch_insn_decode(elf, &insn, addr, func->end - addr);
+ if (ret)
+ return -1;
+
+ if (insn.call)
+ leaf = false;
+ else if (insn.ret) {
+ if (fp &&
+ !fp_restore_macro(elf, sec, addr - sec->start)) {
+ WARN("%s()+0x%lx: return not preceded by FP_RESTORE macro",
+ func->name, addr - func->start);
+ warnings++;
+ }
+ }
+ else if (insn.jump &&
+ (insn.dest < func->start ||
+ insn.dest >= func->end) &&
+ !ignore_macro(elf, sec, addr - sec->start)) {
+ WARN("%s()+0x%lx: unsupported jump to outside of function",
+ func->name, addr - func->start);
+ warnings++;
+ }
+ }
+
+ if (!insn.ret &&
+ !(insn.unconditional &&
+ insn.dest >= func->start && insn.dest < func->end)) {
+ WARN("%s(): unsupported fallthrough at end of function",
+ func->name);
+ warnings++;
+ }
+
+ if (!leaf && !fp) {
+ WARN("%s(): missing FP_SAVE/RESTORE macros", func->name);
+ warnings++;
+ }
+
+ return warnings;
+}
+
+/*
+ * For the given section, find all functions and non-function regions and
+ * validate them accordingly.
+ */
+static int validate_section(struct elf *elf, struct section *sec)
+{
+ struct symbol *func, *last_func;
+ struct symbol null_func = {};
+ int ret, warnings = 0;
+
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ return 0;
+
+ if (list_empty(&sec->symbols)) {
+ WARN("%s: no symbols", sec->name);
+ return -1;
+ }
+
+ /* alternatives are validated later by arch_validate_alternatives() */
+ if (!strcmp(sec->name, ".altinstr_replacement"))
+ return 0;
+
+ last_func = &null_func;
+ last_func->start = last_func->end = sec->start;
+ list_for_each_entry(func, &sec->symbols, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ if (func->start != last_func->start &&
+ func->end != last_func->end &&
+ func->start < last_func->end) {
+ WARN("overlapping functions %s and %s",
+ last_func->name, func->name);
+ warnings++;
+ }
+
+ if (func->start > last_func->end) {
+ ret = validate_nonfunction(elf, sec,
+ last_func->end - sec->start,
+ func->start - last_func->end);
+ if (ret < 0)
+ return -1;
+
+ warnings += ret;
+ }
+
+ ret = validate_function(elf, func);
+ if (ret < 0)
+ return -1;
+
+ warnings += ret;
+
+ last_func = func;
+ }
+
+ if (last_func->end < sec->end) {
+ ret = validate_nonfunction(elf, sec,
+ last_func->end - sec->start,
+ sec->end - last_func->end);
+ if (ret < 0)
+ return -1;
+
+ warnings += ret;
+ }
+
+ return warnings;
+}
+
+int main(int argc, char *argv[])
+{
+ struct args args;
+ struct elf *elf;
+ struct section *sec;
+ int ret, warnings = 0;
+
+ argp_parse(&argp, argc, argv, 0, 0, &args);
+
+ elf = elf_open(args.args[0]);
+ if (!elf) {
+ fprintf(stderr, "error reading elf file %s\n", args.args[0]);
+ return 1;
+ }
+
+ list_for_each_entry(sec, &elf->sections, list) {
+ ret = validate_section(elf, sec);
+ if (ret < 0)
+ return 1;
+
+ warnings += ret;
+ }
+
+ ret = arch_validate_alternatives(elf);
+ if (ret < 0)
+ return 1;
+
+ warnings += ret;
+
+ /* ignore warnings for now until we get all the asm code cleaned up */
+ return 0;
+}
diff --git a/scripts/asmvalidate/elf.c b/scripts/asmvalidate/elf.c
new file mode 100644
index 0000000..b2b0986
--- /dev/null
+++ b/scripts/asmvalidate/elf.c
@@ -0,0 +1,354 @@
+/*
+ * elf.c - ELF access library
+ *
+ * Adapted from kpatch (https://github.com/dynup/kpatch):
+ * Copyright (C) 2013-2015 Josh Poimboeuf <jpoimboe@...hat.com>
+ * Copyright (C) 2014 Seth Jennings <sjenning@...hat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "elf.h"
+
+struct section *elf_find_section_by_name(struct elf *elf, const char *name)
+{
+ struct section *sec;
+
+ list_for_each_entry(sec, &elf->sections, list)
+ if (!strcmp(sec->name, name))
+ return sec;
+
+ return NULL;
+}
+
+static struct section *elf_find_section_by_index(struct elf *elf,
+ unsigned int index)
+{
+ struct section *sec;
+
+ list_for_each_entry(sec, &elf->sections, list)
+ if (sec->index == index)
+ return sec;
+
+ return NULL;
+}
+
+static struct symbol *elf_find_symbol_by_index(struct elf *elf,
+ unsigned int index)
+{
+ struct section *sec;
+ struct symbol *sym;
+
+ list_for_each_entry(sec, &elf->sections, list)
+ list_for_each_entry(sym, &sec->symbols, list)
+ if (sym->index == index)
+ return sym;
+
+ return NULL;
+}
+
+static int elf_read_sections(struct elf *elf)
+{
+ Elf_Scn *s = NULL;
+ struct section *sec;
+ size_t shstrndx, sections_nr;
+ int i;
+
+ if (elf_getshdrnum(elf->elf, §ions_nr)) {
+ perror("elf_getshdrnum");
+ return -1;
+ }
+
+ if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
+ perror("elf_getshdrstrndx");
+ return -1;
+ }
+
+ for (i = 0; i < sections_nr; i++) {
+ sec = malloc(sizeof(*sec));
+ if (!sec) {
+ perror("malloc");
+ return -1;
+ }
+ memset(sec, 0, sizeof(*sec));
+
+ INIT_LIST_HEAD(&sec->symbols);
+ INIT_LIST_HEAD(&sec->relas);
+
+ list_add_tail(&sec->list, &elf->sections);
+
+ s = elf_getscn(elf->elf, i);
+ if (!s) {
+ perror("elf_getscn");
+ return -1;
+ }
+
+ sec->index = elf_ndxscn(s);
+
+ if (!gelf_getshdr(s, &sec->sh)) {
+ perror("gelf_getshdr");
+ return -1;
+ }
+
+ sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
+ if (!sec->name) {
+ perror("elf_strptr");
+ return -1;
+ }
+
+ sec->data = elf_getdata(s, NULL);
+ if (!sec->data) {
+ perror("elf_getdata");
+ return -1;
+ }
+
+ if (sec->data->d_off != 0 ||
+ sec->data->d_size != sec->sh.sh_size) {
+ WARN("unexpected data attributes for %s", sec->name);
+ return -1;
+ }
+
+ sec->start = (unsigned long)sec->data->d_buf;
+ sec->end = sec->start + sec->data->d_size;
+ }
+
+ /* sanity check, one more call to elf_nextscn() should return NULL */
+ if (elf_nextscn(elf->elf, s)) {
+ WARN("section entry mismatch");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int elf_read_symbols(struct elf *elf)
+{
+ struct section *symtab;
+ struct symbol *sym;
+ struct list_head *entry, *tmp;
+ int symbols_nr, i;
+
+ symtab = elf_find_section_by_name(elf, ".symtab");
+ if (!symtab) {
+ WARN("missing symbol table");
+ return -1;
+ }
+
+ symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+
+ for (i = 0; i < symbols_nr; i++) {
+ sym = malloc(sizeof(*sym));
+ if (!sym) {
+ perror("malloc");
+ return -1;
+ }
+ memset(sym, 0, sizeof(*sym));
+
+ sym->index = i;
+
+ if (!gelf_getsym(symtab->data, i, &sym->sym)) {
+ perror("gelf_getsym");
+ goto err;
+ }
+
+ sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
+ sym->sym.st_name);
+ if (!sym->name) {
+ perror("elf_strptr");
+ goto err;
+ }
+
+ sym->type = GELF_ST_TYPE(sym->sym.st_info);
+ sym->bind = GELF_ST_BIND(sym->sym.st_info);
+
+ if (sym->sym.st_shndx > SHN_UNDEF &&
+ sym->sym.st_shndx < SHN_LORESERVE) {
+ sym->sec = elf_find_section_by_index(elf,
+ sym->sym.st_shndx);
+ if (!sym->sec) {
+ WARN("couldn't find section for symbol %s",
+ sym->name);
+ goto err;
+ }
+ if (sym->type == STT_SECTION) {
+ sym->name = sym->sec->name;
+ sym->sec->sym = sym;
+ }
+ } else
+ sym->sec = elf_find_section_by_index(elf, 0);
+
+ sym->start = sym->sec->start + sym->sym.st_value;
+ sym->end = sym->start + sym->sym.st_size;
+
+ /* sorted insert into a per-section list */
+ entry = &sym->sec->symbols;
+ list_for_each_prev(tmp, &sym->sec->symbols) {
+ struct symbol *s;
+
+ s = list_entry(tmp, struct symbol, list);
+
+ if (sym->start > s->start) {
+ entry = tmp;
+ break;
+ }
+
+ if (sym->start == s->start && sym->end >= s->end) {
+ entry = tmp;
+ break;
+ }
+ }
+ list_add(&sym->list, entry);
+ }
+
+ return 0;
+
+err:
+ free(sym);
+ return -1;
+}
+
+static int elf_read_relas(struct elf *elf)
+{
+ struct section *sec;
+ struct rela *rela;
+ int i;
+ unsigned int symndx;
+
+ list_for_each_entry(sec, &elf->sections, list) {
+ if (sec->sh.sh_type != SHT_RELA)
+ continue;
+
+ sec->base = elf_find_section_by_name(elf, sec->name + 5);
+ if (!sec->base) {
+ WARN("can't find base section for rela section %s",
+ sec->name);
+ return -1;
+ }
+
+ sec->base->rela = sec;
+
+ for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) {
+ rela = malloc(sizeof(*rela));
+ if (!rela) {
+ perror("malloc");
+ return -1;
+ }
+ memset(rela, 0, sizeof(*rela));
+
+ list_add_tail(&rela->list, &sec->relas);
+
+ if (!gelf_getrela(sec->data, i, &rela->rela)) {
+ perror("gelf_getrela");
+ return -1;
+ }
+
+ rela->type = GELF_R_TYPE(rela->rela.r_info);
+ rela->addend = rela->rela.r_addend;
+ rela->offset = rela->rela.r_offset;
+ symndx = GELF_R_SYM(rela->rela.r_info);
+ rela->sym = elf_find_symbol_by_index(elf, symndx);
+ if (!rela->sym) {
+ WARN("can't find rela entry symbol %d for %s",
+ symndx, sec->name);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+struct elf *elf_open(const char *name)
+{
+ struct elf *elf;
+
+ elf_version(EV_CURRENT);
+
+ elf = malloc(sizeof(*elf));
+ if (!elf) {
+ perror("malloc");
+ return NULL;
+ }
+ memset(elf, 0, sizeof(*elf));
+
+ INIT_LIST_HEAD(&elf->sections);
+
+ elf->name = strdup(name);
+ if (!elf->name) {
+ perror("strdup");
+ goto err;
+ }
+
+ elf->fd = open(name, O_RDONLY);
+ if (elf->fd == -1) {
+ perror("open");
+ goto err;
+ }
+
+ elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
+ if (!elf->elf) {
+ perror("elf_begin");
+ goto err;
+ }
+
+ if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
+ perror("gelf_getehdr");
+ goto err;
+ }
+
+ if (elf_read_sections(elf))
+ goto err;
+
+ if (elf_read_symbols(elf))
+ goto err;
+
+ if (elf_read_relas(elf))
+ goto err;
+
+ return elf;
+
+err:
+ elf_close(elf);
+ return NULL;
+}
+
+void elf_close(struct elf *elf)
+{
+ struct section *sec, *tmpsec;
+ struct symbol *sym, *tmpsym;
+
+ list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
+ list_for_each_entry_safe(sym, tmpsym, &sec->symbols, list) {
+ list_del(&sym->list);
+ free(sym);
+ }
+ list_del(&sec->list);
+ free(sec);
+ }
+ if (elf->name)
+ free(elf->name);
+ if (elf->fd > 0)
+ close(elf->fd);
+ if (elf->elf)
+ elf_end(elf->elf);
+ free(elf);
+}
diff --git a/scripts/asmvalidate/elf.h b/scripts/asmvalidate/elf.h
new file mode 100644
index 0000000..abfc902
--- /dev/null
+++ b/scripts/asmvalidate/elf.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@...hat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ELF_H_
+#define _ELF_H_
+
+#include <gelf.h>
+#include "list.h"
+
+#define WARN(format, ...) \
+ fprintf(stderr, \
+ "asmvalidate: %s: " format "\n", \
+ elf->name, ##__VA_ARGS__)
+
+struct section {
+ struct list_head list;
+ GElf_Shdr sh;
+ struct list_head symbols;
+ struct list_head relas;
+ struct section *base, *rela;
+ struct symbol *sym;
+ Elf_Data *data;
+ char *name;
+ int index;
+ unsigned long start, end;
+};
+
+struct symbol {
+ struct list_head list;
+ GElf_Sym sym;
+ struct section *sec;
+ char *name;
+ int index;
+ unsigned char bind, type;
+ unsigned long start, end;
+};
+
+struct rela {
+ struct list_head list;
+ GElf_Rela rela;
+ struct symbol *sym;
+ unsigned int type;
+ int offset;
+ int addend;
+};
+
+struct elf {
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ int fd;
+ char *name;
+ struct list_head sections;
+};
+
+
+struct elf *elf_open(const char *name);
+struct section *elf_find_section_by_name(struct elf *elf, const char *name);
+void elf_close(struct elf *elf);
+
+#endif /* _ELF_H_ */
diff --git a/scripts/asmvalidate/list.h b/scripts/asmvalidate/list.h
new file mode 100644
index 0000000..25716b5
--- /dev/null
+++ b/scripts/asmvalidate/list.h
@@ -0,0 +1,217 @@
+#ifndef _LIST_H
+#define _LIST_H
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) *__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define LIST_POISON1 ((void *) 0x00100100)
+#define LIST_POISON2 ((void *) 0x00200200)
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ INIT_LIST_HEAD(entry);
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add_tail(list, head);
+}
+
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+
+ return (next == head) && (next == head->prev);
+}
+
+static inline void list_rotate_left(struct list_head *head)
+{
+ struct list_head *first;
+
+ if (!list_empty(head)) {
+ first = head->next;
+ list_move_tail(first, head);
+ }
+}
+
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+#define list_for_each_prev_safe(pos, n, head) \
+ for (pos = (head)->prev, n = pos->prev; \
+ pos != (head); \
+ pos = n, n = pos->prev)
+
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+#define list_for_each_entry_from(pos, head, member) \
+ for (; &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+#define list_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_next_entry(pos, member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member), \
+ n = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_prev_entry(n, member))
+
+#endif /* _LIST_H */
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists