lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <1260258910.13131.26.camel@yhuang-dev.sh.intel.com>
Date:	Tue, 08 Dec 2009 15:55:10 +0800
From:	Huang Ying <ying.huang@...el.com>
To:	Len Brown <len.brown@...el.com>
Cc:	ACPI Devel Maling List <linux-acpi@...r.kernel.org>,
	Andi Kleen <ak@...ux.intel.com>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [PATCH 1/4] acpi, apei, APEI supporting infrastructure

APEI stands for ACPI Platform Error Interface, which allows to report
errors (for example from the chipset) to the operating system. This
improves NMI handling especially. In addition it supports error
serialization and error injection.

For more information about APEI, please refer to ACPI Specification
version 4.0, chapter 17.

This patch provides some common functions used by more than one APEI
tables. Including,

- Framework of interpreter for EINJ and ERST

A machine readable language is defined for EINJ and ERST for OS to
execute, and so to drive the firmware to fulfill the corresponding
functions. The machine language for EINJ and ERST is compatible, so a
common framework is defined for them.

- IO memory pre-mapping and atomic accessing

APEI ERST operations may be used for permanent storage in hardware
error handler. That is, it may be called in atomic contexts such as
IRQ or NMI, etc. And, ERST/EINJ implement their operations via IO
memory/port accessing.  But the IO memory accessing method provided by
ACPI (acpi_read/acpi_write) maps the IO memory during it is accessed,
so it can not be used in atomic context. To solve the issue, the IO
memory should be pre-mapped during EINJ/ERST initializing. A linked
list is used to record which memory area has been mapped, when memory
is accessed in hardware error handler, search the linked list for the
mapped virtual address from the given physical address.

Signed-off-by: Huang Ying <ying.huang@...el.com>
---
 drivers/acpi/Kconfig              |    2 
 drivers/acpi/Makefile             |    2 
 drivers/acpi/apei/Kconfig         |    9 
 drivers/acpi/apei/Makefile        |    3 
 drivers/acpi/apei/apei-base.c     |  899 ++++++++++++++++++++++++++++++++++++++
 drivers/acpi/apei/apei-internal.h |  101 ++++
 6 files changed, 1016 insertions(+)
 create mode 100644 drivers/acpi/apei/Kconfig
 create mode 100644 drivers/acpi/apei/Makefile
 create mode 100644 drivers/acpi/apei/apei-base.c
 create mode 100644 drivers/acpi/apei/apei-internal.h

--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,6 @@ config ACPI_SBS
 	  To compile this driver as a module, choose M here:
 	  the modules will be called sbs and sbshc.
 
+source "drivers/acpi/apei/Kconfig"
+
 endif	# ACPI
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -64,3 +64,5 @@ processor-y			+= processor_idle.o proces
 processor-$(CONFIG_CPU_FREQ)	+= processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI)		+= apei/
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,9 @@
+config ACPI_APEI
+	tristate "ACPI Platform Error Interface (APEI)"
+	depends on X86
+	help
+	  APEI allows to report errors (for example from the chipset)
+	  to the operating system. This improves NMI handling
+	  especially. In addition it supports error serialization and
+	  error injection.
+
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ACPI_APEI)		+= apei.o
+
+apei-y := apei-base.o
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,899 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI tables,
+ * including IO memory pre-mapping, then accessing in NMI handler;
+ * framework of interpreter for ERST and EINJ; resource management for
+ * APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ *	Author: Huang Ying <ying.huang@...el.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+struct dentry *apei_debug_dir;
+EXPORT_SYMBOL_GPL(apei_debug_dir);
+
+/*
+ * The following is used for NMI handler to access IO memory area,
+ * because ioremap/iounmap can not be used in NMI handler. The IO
+ * memory area is pre-mapped in process context and accessed in NMI
+ * handler.
+ */
+
+static LIST_HEAD(apei_iomaps);
+/*
+ * Used for mutual exclusion between writers of apei_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(apei_iomaps_lock);
+
+struct apei_iomap {
+	struct list_head list;
+	unsigned long paddr;
+	unsigned long size;
+	void __iomem *vaddr;
+	struct kref ref;
+};
+
+/* apei_iomaps_lock or RCU read lock must be held before calling */
+static struct apei_iomap *__apei_find_iomap(unsigned long paddr,
+					    unsigned long size)
+{
+	struct apei_iomap *map;
+
+	list_for_each_entry_rcu(map, &apei_iomaps, list) {
+		if (map->paddr + map->size >= paddr + size &&
+		    map->paddr <= paddr)
+			return map;
+	}
+	return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * apei_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__apei_ioremap_fast(unsigned long paddr,
+					 unsigned long size)
+{
+	struct apei_iomap *map;
+
+	map = __apei_find_iomap(paddr, size);
+	if (map)
+		return map->vaddr + (paddr - map->paddr);
+	else
+		return NULL;
+}
+
+/* apei_iomaps_lock must be held before calling */
+static void __iomem *__apei_try_ioremap(unsigned long paddr,
+					unsigned long size)
+{
+	struct apei_iomap *map;
+
+	map = __apei_find_iomap(paddr, size);
+	if (map) {
+		kref_get(&map->ref);
+		return map->vaddr + (paddr - map->paddr);
+	} else
+		return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __apei_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into apei_iomaps list.
+ */
+static void __iomem *apei_ioremap(unsigned long paddr,
+				  unsigned long size)
+{
+	void __iomem *vaddr;
+	struct apei_iomap *map;
+	unsigned long pg_off, pg_sz;
+	unsigned long flags;
+
+	spin_lock_irqsave(&apei_iomaps_lock, flags);
+	vaddr = __apei_try_ioremap(paddr, size);
+	spin_unlock_irqrestore(&apei_iomaps_lock, flags);
+	if (vaddr)
+		return vaddr;
+
+	pg_off = paddr & PAGE_MASK;
+	pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+	vaddr = ioremap(pg_off, pg_sz);
+	if (!vaddr)
+		return NULL;
+	map = kmalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		goto err_unmap;
+	INIT_LIST_HEAD(&map->list);
+	map->paddr = pg_off;
+	map->size = pg_sz;
+	map->vaddr = vaddr;
+	kref_init(&map->ref);
+
+	spin_lock_irqsave(&apei_iomaps_lock, flags);
+	vaddr = __apei_try_ioremap(paddr, size);
+	if (vaddr) {
+		spin_unlock_irqrestore(&apei_iomaps_lock, flags);
+		iounmap(map->vaddr);
+		kfree(map);
+		return vaddr;
+	}
+	list_add_tail_rcu(&map->list, &apei_iomaps);
+	spin_unlock_irqrestore(&apei_iomaps_lock, flags);
+
+	return vaddr + (paddr - pg_off);
+err_unmap:
+	iounmap(vaddr);
+	return NULL;
+}
+
+/* apei_iomaps_lock must be held before calling */
+static void __apei_kref_del_iomap(struct kref *ref)
+{
+	struct apei_iomap *map;
+
+	map = container_of(ref, struct apei_iomap, ref);
+	list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void apei_iounmap(unsigned long paddr, unsigned long size)
+{
+	struct apei_iomap *map;
+	unsigned long flags;
+	int del;
+
+	spin_lock_irqsave(&apei_iomaps_lock, flags);
+	map = __apei_find_iomap(paddr, size);
+	BUG_ON(!map);
+	del = kref_put(&map->ref, __apei_kref_del_iomap);
+	spin_unlock_irqrestore(&apei_iomaps_lock, flags);
+
+	if (!del)
+		return;
+
+	synchronize_rcu();
+	iounmap(map->vaddr);
+	kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int apei_check_gar(struct acpi_generic_address *reg,
+			  u64 *paddr, int silent)
+{
+	u32 width;
+
+	/* Handle possible alignment issues */
+	memcpy(paddr, &reg->address, sizeof(*paddr));
+	if (!*paddr) {
+		if (!silent)
+			pr_info(APEI_PFX
+			"Invalid physical address in GAR, firmware bug?\n");
+		return -EINVAL;
+	}
+
+	width = reg->bit_width;
+	if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+		if (!silent)
+			pr_info(APEI_PFX
+			"Invalid bit width in GAR, firmware bug?\n");
+		return -EINVAL;
+	}
+
+	if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+	    reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+		if (!silent)
+			pr_info(APEI_PFX
+			"Invalid address space type in GAR, firmware bug?\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Pre-map, working on GAR */
+int apei_ioremap_gar(struct acpi_generic_address *reg)
+{
+	u64 paddr;
+	void __iomem *vaddr;
+	int rc;
+
+	if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+		return 0;
+
+	rc = apei_check_gar(reg, &paddr, 0);
+	if (rc)
+		return rc;
+
+	vaddr = apei_ioremap(paddr, reg->bit_width / 8);
+	if (!vaddr)
+		return -EIO;
+
+	return 0;
+}
+
+/* Post-unmap, working on GAR */
+int apei_iounmap_gar(struct acpi_generic_address *reg)
+{
+	u64 paddr;
+	int rc;
+
+	if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+		return 0;
+
+	rc = apei_check_gar(reg, &paddr, 0);
+	if (rc)
+		return rc;
+
+	apei_iounmap(paddr, reg->bit_width / 8);
+
+	return 0;
+}
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int apei_read_mem(u64 paddr, u64 *val, u32 width)
+{
+	void __iomem *addr;
+
+	rcu_read_lock();
+	addr = __apei_ioremap_fast(paddr, width);
+	switch (width) {
+	case 8:
+		*val = readb(addr);
+		break;
+	case 16:
+		*val = readw(addr);
+		break;
+	case 32:
+		*val = readl(addr);
+		break;
+	case 64:
+		*val = readq(addr);
+		break;
+	default:
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static int apei_write_mem(u64 paddr, u64 val, u32 width)
+{
+	void __iomem *addr;
+
+	rcu_read_lock();
+	addr = __apei_ioremap_fast(paddr, width);
+	switch (width) {
+	case 8:
+		writeb(val, addr);
+		break;
+	case 16:
+		writew(val, addr);
+		break;
+	case 32:
+		writel(val, addr);
+		break;
+	case 64:
+		writeq(val, addr);
+		break;
+	default:
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static int apei_read_port(u64 port, u32 *val, u32 width)
+{
+	switch (width) {
+	case 8:
+		*val = inb(port);
+		break;
+	case 16:
+		*val = inw(port);
+		break;
+	case 32:
+		*val = inl(port);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int apei_write_port(u64 port, u32 val, u32 width)
+{
+	switch (width) {
+	case 8:
+		outb(val, port);
+		break;
+	case 16:
+		outw(val, port);
+		break;
+	case 32:
+		outl(val, port);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+	u64 paddr;
+	int rc;
+
+	rc = apei_check_gar(reg, &paddr, 1);
+	if (rc)
+		return rc;
+
+	*val = 0;
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+		return apei_read_mem(paddr, val, reg->bit_width);
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+		return apei_read_port(paddr, (u32 *)val, reg->bit_width);
+	default:
+		return -EINVAL;
+	}
+}
+
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+	u64 paddr;
+	int rc;
+
+	rc = apei_check_gar(reg, &paddr, 1);
+	if (rc)
+		return rc;
+
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+		return apei_write_mem(paddr, val, reg->bit_width);
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+		return apei_write_port(paddr, val, reg->bit_width);
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER	0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+			struct apei_exec_ins_type *ins_table,
+			u32 instructions,
+			struct acpi_whea_header *action_table,
+			u32 entries)
+{
+	ctx->ins_table = ins_table;
+	ctx->instructions = instructions;
+	ctx->action_table = action_table;
+	ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+	int rc;
+
+	rc = apei_read(val, &entry->register_region);
+	if (rc)
+		return rc;
+	*val >>= entry->register_region.bit_offset;
+	*val &= entry->mask;
+
+	return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+			    struct acpi_whea_header *entry)
+{
+	int rc;
+	u64 val = 0;
+
+	rc = __apei_exec_read_register(entry, &val);
+	if (rc)
+		return rc;
+	ctx->value = val;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+				  struct acpi_whea_header *entry)
+{
+	int rc;
+
+	rc = apei_exec_read_register(ctx, entry);
+	if (rc)
+		return rc;
+	ctx->value = (ctx->value == entry->value);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+	int rc;
+
+	val &= entry->mask;
+	val <<= entry->register_region.bit_offset;
+	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+		u64 valr = 0;
+		rc = apei_read(&valr, &entry->register_region);
+		if (rc)
+			return rc;
+		valr &= ~(entry->mask << entry->register_region.bit_offset);
+		val |= valr;
+	}
+	rc = apei_write(val, &entry->register_region);
+
+	return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+			     struct acpi_whea_header *entry)
+{
+	return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+				   struct acpi_whea_header *entry)
+{
+	int rc;
+
+	ctx->value = entry->value;
+	rc = apei_exec_write_register(ctx, entry);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+		   struct acpi_whea_header *entry)
+{
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+	int rc;
+	u32 i, ip;
+	struct acpi_whea_header *entry;
+	apei_exec_ins_func_t run;
+
+	ctx->ip = 0;
+
+	/*
+	 * "ip" is the instruction pointer of current instruction,
+	 * "ctx->ip" specifies the next instruction to executed,
+	 * instruction "run" function may change the "ctx->ip" to
+	 * implement "goto" semantics.
+	 */
+rewind:
+	ip = 0;
+	for (i = 0; i < ctx->entries; i++) {
+		entry = &ctx->action_table[i];
+		if (entry->action != action)
+			continue;
+		if (ip == ctx->ip) {
+			if (entry->instruction >= ctx->instructions ||
+			    !ctx->ins_table[entry->instruction].run) {
+				pr_info(APEI_PFX
+			"Invalid action table, unknown instruction type!\n");
+				return -EINVAL;
+			}
+			run = ctx->ins_table[entry->instruction].run;
+			rc = run(ctx, entry);
+			if (rc < 0)
+				return rc;
+			else if (rc != APEI_EXEC_SET_IP)
+				ctx->ip++;
+		}
+		ip++;
+		if (ctx->ip < ip)
+			goto rewind;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+				      struct acpi_whea_header *entry,
+				      void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+				    apei_exec_entry_func_t func,
+				    void *data,
+				    int *end)
+{
+	u8 ins;
+	int i, rc;
+	struct acpi_whea_header *entry;
+	struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+	for (i = 0; i < ctx->entries; i++) {
+		entry = ctx->action_table + i;
+		ins = entry->instruction;
+		if (end)
+			*end = i;
+		if (ins >= ctx->instructions || !ins_table[ins].run) {
+			pr_info(APEI_PFX
+			"Invalid action table, unknown instruction type!\n");
+			return -EINVAL;
+		}
+		rc = func(ctx, entry, data);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+				struct acpi_whea_header *entry,
+				void *data)
+{
+	u8 ins = entry->instruction;
+
+	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+		return apei_ioremap_gar(&entry->register_region);
+
+	return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+	int rc, end;
+
+	rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+				      NULL, &end);
+	if (rc) {
+		struct apei_exec_context ctx_unmap;
+		memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+		ctx_unmap.entries = end;
+		apei_exec_post_unmap_gars(&ctx_unmap);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+				   struct acpi_whea_header *entry,
+				   void *data)
+{
+	u8 ins = entry->instruction;
+
+	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+		apei_iounmap_gar(&entry->register_region);
+
+	return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+	return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+					NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+	struct list_head list;
+	unsigned long start;
+	unsigned long end;
+};
+
+static int apei_res_add(struct list_head *res_list,
+			unsigned long start, unsigned long size)
+{
+	struct apei_res *res, *resn, *res_ins = NULL;
+	unsigned long end = start + size;
+
+	if (end <= start)
+		return 0;
+repeat:
+	list_for_each_entry_safe(res, resn, res_list, list) {
+		if (res->start > end || res->end < start)
+			continue;
+		else if (end <= res->end && start >= res->start) {
+			kfree(res_ins);
+			return 0;
+		}
+		list_del(&res->list);
+		res->start = start = min(res->start, start);
+		res->end = end = max(res->end, end);
+		kfree(res_ins);
+		res_ins = res;
+		goto repeat;
+	}
+
+	if (res_ins)
+		list_add(&res_ins->list, res_list);
+	else {
+		res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+		if (!res_ins)
+			return -ENOMEM;
+		res_ins->start = start;
+		res_ins->end = end;
+		list_add(&res_ins->list, res_list);
+	}
+
+	return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+			struct list_head *res_list2)
+{
+	struct apei_res *res1, *resn1, *res2, *res;
+	res1 = list_entry(res_list1->next, struct apei_res, list);
+	resn1 = list_entry(res1->list.next, struct apei_res, list);
+	while (&res1->list != res_list1) {
+		list_for_each_entry(res2, res_list2, list) {
+			if (res1->start >= res2->end ||
+			    res1->end <= res2->start)
+				continue;
+			else if (res1->end <= res2->end &&
+				 res1->start >= res2->start) {
+				list_del(&res1->list);
+				kfree(res1);
+				break;
+			} else if (res1->end > res2->end &&
+				   res1->start < res2->start) {
+				res = kmalloc(sizeof(*res), GFP_KERNEL);
+				if (!res)
+					return -ENOMEM;
+				res->start = res2->end;
+				res->end = res1->end;
+				res1->end = res2->start;
+				list_add(&res->list, &res1->list);
+				resn1 = res;
+			} else {
+				if (res1->start < res2->start)
+					res1->end = res2->start;
+				else
+					res1->start = res2->end;
+			}
+		}
+		res1 = resn1;
+		resn1 = list_entry(resn1->list.next, struct apei_res, list);
+	}
+
+	return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+	struct apei_res *res, *resn;
+
+	list_for_each_entry_safe(res, resn, res_list, list) {
+		list_del(&res->list);
+		kfree(res);
+	}
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+	apei_res_clean(&resources->iomem);
+	apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+		       struct apei_resources *resources2)
+{
+	int rc;
+
+	rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+	if (rc)
+		return rc;
+	return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+			   const char *desc)
+{
+	struct apei_res *res, *res_bak;
+	struct resource *r;
+
+	list_for_each_entry(res, &resources->iomem, list) {
+		r = request_mem_region(res->start, res->end - res->start,
+				       desc);
+		if (!r) {
+			pr_info(APEI_PFX
+		"Can not request iomem region <%016llx-%016llx> for GARs.\n",
+				(unsigned long long)res->start,
+				(unsigned long long)res->end);
+			res_bak = res;
+			goto err_unmap_iomem;
+		}
+	}
+
+	list_for_each_entry(res, &resources->ioport, list) {
+		r = request_region(res->start, res->end - res->start, desc);
+		if (!r) {
+			pr_info(APEI_PFX
+		"Can not request ioport region <%016llx-%016llx> for GARs.\n",
+				(unsigned long long)res->start,
+				(unsigned long long)res->end);
+			res_bak = res;
+			goto err_unmap_ioport;
+		}
+	}
+
+	return 0;
+err_unmap_ioport:
+	list_for_each_entry(res, &resources->ioport, list) {
+		if (res == res_bak)
+			break;
+		release_mem_region(res->start, res->end - res->start);
+	}
+	res_bak = NULL;
+err_unmap_iomem:
+	list_for_each_entry(res, &resources->iomem, list) {
+		if (res == res_bak)
+			break;
+		release_region(res->start, res->end - res->start);
+	}
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+	struct apei_res *res;
+
+	list_for_each_entry(res, &resources->iomem, list)
+		release_mem_region(res->start, res->end - res->start);
+	list_for_each_entry(res, &resources->ioport, list)
+		release_region(res->start, res->end - res->start);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+				struct acpi_whea_header *entry,
+				void *data)
+{
+	struct apei_resources *resources = data;
+	struct acpi_generic_address *reg = &entry->register_region;
+	u8 ins = entry->instruction;
+	u64 paddr;
+	int rc;
+
+	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+		return 0;
+
+	rc = apei_check_gar(reg, &paddr, 0);
+	if (rc)
+		return rc;
+
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+		return apei_res_add(&resources->iomem, paddr,
+				    reg->bit_width / 8);
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+		return apei_res_add(&resources->ioport, paddr,
+				    reg->bit_width / 8);
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+				struct apei_resources *resources)
+{
+	return apei_exec_for_each_entry(ctx, collect_res_callback,
+					resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+static int __init apei_init(void)
+{
+	apei_debug_dir = debugfs_create_dir("apei", NULL);
+	if (!apei_debug_dir)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void __exit apei_exit(void)
+{
+	debugfs_remove_recursive(apei_debug_dir);
+	WARN_ON(!list_empty(&apei_iomaps));
+}
+
+module_init(apei_init);
+module_exit(apei_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Platform Error Interface support");
+MODULE_LICENSE("GPL");
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,101 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+int apei_ioremap_gar(struct acpi_generic_address *reg);
+int apei_iounmap_gar(struct acpi_generic_address *reg);
+
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+				    struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER	0x0001
+
+struct apei_exec_ins_type {
+	u32 flags;
+	apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+	u32 ip;
+	u64 value;
+	u64 var1;
+	u64 var2;
+	u64 src_base;
+	u64 dst_base;
+	struct apei_exec_ins_type *ins_table;
+	u32 instructions;
+	struct acpi_whea_header *action_table;
+	u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+			struct apei_exec_ins_type *ins_table,
+			u32 instructions,
+			struct acpi_whea_header *action_table,
+			u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+					   u64 input)
+{
+	ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+	return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP	1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+			    struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+				  struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+			     struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+				   struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+		   struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+	struct list_head iomem;
+	struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+	INIT_LIST_HEAD(&resources->iomem);
+	INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+		       struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+			   const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+				struct apei_resources *resources);
+
+struct dentry;
+extern struct dentry *apei_debug_dir;
+#endif


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ