lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-aeb9dd1de98c1a5f2007ea5d2a154c1244caf8a0@git.kernel.org>
Date:   Tue, 21 Mar 2017 05:19:57 -0700
From:   tip-bot for Lu Baolu <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     peterz@...radead.org, hpa@...or.com, baolu.lu@...ux.intel.com,
        linux-kernel@...r.kernel.org, mingo@...nel.org,
        gregkh@...uxfoundation.org, mathias.nyman@...ux.intel.com,
        tglx@...utronix.de, torvalds@...ux-foundation.org
Subject: [tip:x86/debug] usb/early: Add driver for xhci debug capability

Commit-ID:  aeb9dd1de98c1a5f2007ea5d2a154c1244caf8a0
Gitweb:     http://git.kernel.org/tip/aeb9dd1de98c1a5f2007ea5d2a154c1244caf8a0
Author:     Lu Baolu <baolu.lu@...ux.intel.com>
AuthorDate: Tue, 21 Mar 2017 16:01:30 +0800
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Tue, 21 Mar 2017 12:30:05 +0100

usb/early: Add driver for xhci debug capability

XHCI debug capability (DbC) is an optional but standalone
functionality provided by an xHCI host controller. Software
learns this capability by walking through the extended
capability list of the host. XHCI specification describes
DbC in section 7.6.

This patch introduces the code to probe and initialize the
debug capability hardware during early boot. With hardware
initialized, the debug target (system on which this code is
running) will present a debug device through the debug port
(normally the first USB3 port). The debug device is fully
compliant with the USB framework and provides the equivalent
of a very high performance (USB3) full-duplex serial link
between the debug host and target. The DbC functionality is
independent of the xHCI host. There isn't any precondition
from the xHCI host side for the DbC to work.

One use for this feature is kernel debugging, for example
when your machine crashes very early before the regular
console code is initialized. Other uses include simpler,
lockless logging instead of a full-blown printk console
driver and klogd.

Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
Acked-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mathias Nyman <mathias.nyman@...ux.intel.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: linux-usb@...r.kernel.org
Link: http://lkml.kernel.org/r/1490083293-3792-3-git-send-email-baolu.lu@linux.intel.com
[ Small fix to the Kconfig help text. ]
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/Kconfig.debug        |   27 +-
 drivers/usb/Makefile          |    2 +-
 drivers/usb/early/Makefile    |    1 +
 drivers/usb/early/xhci-dbc.c  | 1014 +++++++++++++++++++++++++++++++++++++++++
 drivers/usb/early/xhci-dbc.h  |  211 +++++++++
 include/linux/usb/xhci-dbgp.h |   29 ++
 6 files changed, 1281 insertions(+), 3 deletions(-)

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 63c1d13..fcb7604 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -5,6 +5,9 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
+config EARLY_PRINTK_USB
+	bool
+
 config X86_VERBOSE_BOOTUP
 	bool "Enable verbose x86 bootup info messages"
 	default y
@@ -23,19 +26,20 @@ config EARLY_PRINTK
 	  This is useful for kernel debugging when your machine crashes very
 	  early before the console code is initialized. For normal operation
 	  it is not recommended because it looks ugly and doesn't cooperate
-	  with klogd/syslogd or the X server. You should normally N here,
+	  with klogd/syslogd or the X server. You should normally say N here,
 	  unless you want to debug such a crash.
 
 config EARLY_PRINTK_DBGP
 	bool "Early printk via EHCI debug port"
 	depends on EARLY_PRINTK && PCI
+	select EARLY_PRINTK_USB
 	---help---
 	  Write kernel log output directly into the EHCI debug port.
 
 	  This is useful for kernel debugging when your machine crashes very
 	  early before the console code is initialized. For normal operation
 	  it is not recommended because it looks ugly and doesn't cooperate
-	  with klogd/syslogd or the X server. You should normally N here,
+	  with klogd/syslogd or the X server. You should normally say N here,
 	  unless you want to debug such a crash. You need usb debug device.
 
 config EARLY_PRINTK_EFI
@@ -48,6 +52,25 @@ config EARLY_PRINTK_EFI
 	  This is useful for kernel debugging when your machine crashes very
 	  early before the console code is initialized.
 
+config EARLY_PRINTK_USB_XDBC
+	bool "Early printk via the xHCI debug port"
+	depends on EARLY_PRINTK && PCI
+	select EARLY_PRINTK_USB
+	---help---
+	  Write kernel log output directly into the xHCI debug port.
+
+	  One use for this feature is kernel debugging, for example when your
+	  machine crashes very early before the regular console code is
+	  initialized. Other uses include simpler, lockless logging instead of
+	  a full-blown printk console driver + klogd.
+
+	  For normal production environments this is normally not recommended,
+	  because it doesn't feed events into klogd/syslogd and doesn't try to
+	  print anything on the screen.
+
+	  You should normally say N here, unless you want to debug early
+	  crashes or need a very simple printk logging facility.
+
 config X86_PTDUMP_CORE
 	def_bool n
 
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 7791af6..53d1356 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_USB_MICROTEK)	+= image/
 obj-$(CONFIG_USB_SERIAL)	+= serial/
 
 obj-$(CONFIG_USB)		+= misc/
-obj-$(CONFIG_EARLY_PRINTK_DBGP)	+= early/
+obj-$(CONFIG_EARLY_PRINTK_USB)	+= early/
 
 obj-$(CONFIG_USB_ATM)		+= atm/
 obj-$(CONFIG_USB_SPEEDTOUCH)	+= atm/
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
index 24bbe51..fcde228 100644
--- a/drivers/usb/early/Makefile
+++ b/drivers/usb/early/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o
+obj-$(CONFIG_EARLY_PRINTK_USB_XDBC) += xhci-dbc.o
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
new file mode 100644
index 0000000..1268818
--- /dev/null
+++ b/drivers/usb/early/xhci-dbc.c
@@ -0,0 +1,1014 @@
+/**
+ * xhci-dbc.c - xHCI debug capability early driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@...ux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/console.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/bootmem.h>
+#include <linux/io.h>
+#include <asm/pci-direct.h>
+#include <asm/fixmap.h>
+#include <linux/bcd.h>
+#include <linux/export.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include "../host/xhci.h"
+#include "xhci-dbc.h"
+
+static struct xdbc_state xdbc;
+static bool early_console_keep;
+
+#define XDBC_TRACE
+#ifdef XDBC_TRACE
+#define	xdbc_trace	trace_printk
+#else
+static inline void xdbc_trace(const char *fmt, ...) { }
+#endif /* XDBC_TRACE */
+
+static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
+{
+	u64 val64, sz64, mask64;
+	void __iomem *base;
+	u32 val, sz;
+	u8 byte;
+
+	val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
+	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
+	sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
+	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
+
+	if (val == 0xffffffff || sz == 0xffffffff) {
+		pr_notice("invalid mmio bar\n");
+		return NULL;
+	}
+
+	val64	= val & PCI_BASE_ADDRESS_MEM_MASK;
+	sz64	= sz & PCI_BASE_ADDRESS_MEM_MASK;
+	mask64	= PCI_BASE_ADDRESS_MEM_MASK;
+
+	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
+		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
+		sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
+		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
+
+		val64	|= (u64)val << 32;
+		sz64	|= (u64)sz << 32;
+		mask64	|= ~0ULL << 32;
+	}
+
+	sz64 &= mask64;
+
+	if (!sz64) {
+		pr_notice("invalid mmio address\n");
+		return NULL;
+	}
+
+	sz64 = 1ULL << __ffs64(sz64);
+
+	/* Check if the mem space is enabled: */
+	byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
+	if (!(byte & PCI_COMMAND_MEMORY)) {
+		byte |= PCI_COMMAND_MEMORY;
+		write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
+	}
+
+	xdbc.xhci_start = val64;
+	xdbc.xhci_length = sz64;
+	base = early_ioremap(val64, sz64);
+
+	return base;
+}
+
+static void * __init xdbc_get_page(dma_addr_t *dma_addr)
+{
+	void *virt;
+
+	virt = alloc_bootmem_pages_nopanic(PAGE_SIZE);
+	if (!virt)
+		return NULL;
+
+	if (dma_addr)
+		*dma_addr = (dma_addr_t)__pa(virt);
+
+	return virt;
+}
+
+static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
+{
+	u32 bus, dev, func, class;
+
+	for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
+		for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
+			for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
+
+				class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
+				if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
+					continue;
+
+				if (xdbc_num-- != 0)
+					continue;
+
+				*b = bus;
+				*d = dev;
+				*f = func;
+
+				return 0;
+			}
+		}
+	}
+
+	return -1;
+}
+
+static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
+{
+	u32 result;
+
+	do {
+		result = readl(ptr);
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(delay);
+		wait -= delay;
+	} while (wait > 0);
+
+	return -ETIMEDOUT;
+}
+
+static void __init xdbc_bios_handoff(void)
+{
+	int offset, timeout;
+	u32 val;
+
+	offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
+	val = readl(xdbc.xhci_base + offset);
+
+	if (val & XHCI_HC_BIOS_OWNED) {
+		writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
+		timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
+
+		if (timeout) {
+			pr_notice("failed to hand over xHCI control from BIOS\n");
+			writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
+		}
+	}
+
+	/* Disable BIOS SMIs and clear all SMI events: */
+	val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
+	val &= XHCI_LEGACY_DISABLE_SMI;
+	val |= XHCI_LEGACY_SMI_EVENTS;
+	writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
+}
+
+static int __init
+xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
+{
+	seg->trbs = xdbc_get_page(&seg->dma);
+	if (!seg->trbs)
+		return -ENOMEM;
+
+	ring->segment = seg;
+
+	return 0;
+}
+
+static void __init xdbc_free_ring(struct xdbc_ring *ring)
+{
+	struct xdbc_segment *seg = ring->segment;
+
+	if (!seg)
+		return;
+
+	free_bootmem(seg->dma, PAGE_SIZE);
+	ring->segment = NULL;
+}
+
+static void xdbc_reset_ring(struct xdbc_ring *ring)
+{
+	struct xdbc_segment *seg = ring->segment;
+	struct xdbc_trb *link_trb;
+
+	memset(seg->trbs, 0, PAGE_SIZE);
+
+	ring->enqueue = seg->trbs;
+	ring->dequeue = seg->trbs;
+	ring->cycle_state = 1;
+
+	if (ring != &xdbc.evt_ring) {
+		link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
+		link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
+		link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
+		link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
+	}
+}
+
+static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		s[i] = cpu_to_le16(c[i]);
+}
+
+static void xdbc_mem_init(void)
+{
+	struct xdbc_ep_context *ep_in, *ep_out;
+	struct usb_string_descriptor *s_desc;
+	struct xdbc_erst_entry *entry;
+	struct xdbc_strings *strings;
+	struct xdbc_context *ctx;
+	unsigned int max_burst;
+	u32 string_length;
+	int index = 0;
+	u32 dev_info;
+
+	xdbc_reset_ring(&xdbc.evt_ring);
+	xdbc_reset_ring(&xdbc.in_ring);
+	xdbc_reset_ring(&xdbc.out_ring);
+	memset(xdbc.table_base, 0, PAGE_SIZE);
+	memset(xdbc.out_buf, 0, PAGE_SIZE);
+
+	/* Initialize event ring segment table: */
+	xdbc.erst_size	= 16;
+	xdbc.erst_base	= xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
+	xdbc.erst_dma	= xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
+
+	index += XDBC_ERST_ENTRY_NUM;
+	entry = (struct xdbc_erst_entry *)xdbc.erst_base;
+
+	entry->seg_addr		= cpu_to_le64(xdbc.evt_seg.dma);
+	entry->seg_size		= cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
+	entry->__reserved_0	= 0;
+
+	/* Initialize ERST registers: */
+	writel(1, &xdbc.xdbc_reg->ersts);
+	xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
+	xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
+
+	/* Debug capability contexts: */
+	xdbc.dbcc_size	= 64 * 3;
+	xdbc.dbcc_base	= xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
+	xdbc.dbcc_dma	= xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
+
+	index += XDBC_DBCC_ENTRY_NUM;
+
+	/* Popluate the strings: */
+	xdbc.string_size = sizeof(struct xdbc_strings);
+	xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
+	xdbc.string_dma	 = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
+	strings		 = (struct xdbc_strings *)xdbc.string_base;
+
+	index += XDBC_STRING_ENTRY_NUM;
+
+	/* Serial string: */
+	s_desc			= (struct usb_string_descriptor *)strings->serial;
+	s_desc->bLength		= (strlen(XDBC_STRING_SERIAL) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+
+	xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
+	string_length = s_desc->bLength;
+	string_length <<= 8;
+
+	/* Product string: */
+	s_desc			= (struct usb_string_descriptor *)strings->product;
+	s_desc->bLength		= (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+
+	xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
+	string_length += s_desc->bLength;
+	string_length <<= 8;
+
+	/* Manufacture string: */
+	s_desc			= (struct usb_string_descriptor *)strings->manufacturer;
+	s_desc->bLength		= (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+
+	xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
+	string_length += s_desc->bLength;
+	string_length <<= 8;
+
+	/* String0: */
+	strings->string0[0]	= 4;
+	strings->string0[1]	= USB_DT_STRING;
+	strings->string0[2]	= 0x09;
+	strings->string0[3]	= 0x04;
+
+	string_length += 4;
+
+	/* Populate info Context: */
+	ctx = (struct xdbc_context *)xdbc.dbcc_base;
+
+	ctx->info.string0	= cpu_to_le64(xdbc.string_dma);
+	ctx->info.manufacturer	= cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
+	ctx->info.product	= cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
+	ctx->info.serial	= cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
+	ctx->info.length	= cpu_to_le32(string_length);
+
+	/* Populate bulk out endpoint context: */
+	max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
+	ep_out = (struct xdbc_ep_context *)&ctx->out;
+
+	ep_out->ep_info1	= 0;
+	ep_out->ep_info2	= cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
+	ep_out->deq		= cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
+
+	/* Populate bulk in endpoint context: */
+	ep_in = (struct xdbc_ep_context *)&ctx->in;
+
+	ep_in->ep_info1		= 0;
+	ep_in->ep_info2		= cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
+	ep_in->deq		= cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
+
+	/* Set DbC context and info registers: */
+	xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
+
+	dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
+	writel(dev_info, &xdbc.xdbc_reg->devinfo1);
+
+	dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
+	writel(dev_info, &xdbc.xdbc_reg->devinfo2);
+
+	xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
+	xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
+}
+
+static void xdbc_do_reset_debug_port(u32 id, u32 count)
+{
+	void __iomem *ops_reg;
+	void __iomem *portsc;
+	u32 val, cap_length;
+	int i;
+
+	cap_length = readl(xdbc.xhci_base) & 0xff;
+	ops_reg = xdbc.xhci_base + cap_length;
+
+	id--;
+	for (i = id; i < (id + count); i++) {
+		portsc = ops_reg + 0x400 + i * 0x10;
+		val = readl(portsc);
+		if (!(val & PORT_CONNECT))
+			writel(val | PORT_RESET, portsc);
+	}
+}
+
+static void xdbc_reset_debug_port(void)
+{
+	u32 val, port_offset, port_count;
+	int offset = 0;
+
+	do {
+		offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
+		if (!offset)
+			break;
+
+		val = readl(xdbc.xhci_base + offset);
+		if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
+			continue;
+
+		val = readl(xdbc.xhci_base + offset + 8);
+		port_offset = XHCI_EXT_PORT_OFF(val);
+		port_count = XHCI_EXT_PORT_COUNT(val);
+
+		xdbc_do_reset_debug_port(port_offset, port_count);
+	} while (1);
+}
+
+static void
+xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	struct xdbc_trb *trb, *link_trb;
+
+	trb = ring->enqueue;
+	trb->field[0] = cpu_to_le32(field1);
+	trb->field[1] = cpu_to_le32(field2);
+	trb->field[2] = cpu_to_le32(field3);
+	trb->field[3] = cpu_to_le32(field4);
+
+	++(ring->enqueue);
+	if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
+		link_trb = ring->enqueue;
+		if (ring->cycle_state)
+			link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
+		else
+			link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+		ring->enqueue = ring->segment->trbs;
+		ring->cycle_state ^= 1;
+	}
+}
+
+static void xdbc_ring_doorbell(int target)
+{
+	writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
+}
+
+static int xdbc_start(void)
+{
+	u32 ctrl, status;
+	int ret;
+
+	ctrl = readl(&xdbc.xdbc_reg->control);
+	writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
+	ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
+	if (ret) {
+		xdbc_trace("failed to initialize hardware\n");
+		return ret;
+	}
+
+	/* Reset port to avoid bus hang: */
+	if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
+		xdbc_reset_debug_port();
+
+	/* Wait for port connection: */
+	ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
+	if (ret) {
+		xdbc_trace("waiting for connection timed out\n");
+		return ret;
+	}
+
+	/* Wait for debug device to be configured: */
+	ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
+	if (ret) {
+		xdbc_trace("waiting for device configuration timed out\n");
+		return ret;
+	}
+
+	/* Check port number: */
+	status = readl(&xdbc.xdbc_reg->status);
+	if (!DCST_DEBUG_PORT(status)) {
+		xdbc_trace("invalid root hub port number\n");
+		return -ENODEV;
+	}
+
+	xdbc.port_number = DCST_DEBUG_PORT(status);
+
+	xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
+		   readl(&xdbc.xdbc_reg->control), xdbc.port_number);
+
+	return 0;
+}
+
+static int xdbc_bulk_transfer(void *data, int size, bool read)
+{
+	struct xdbc_ring *ring;
+	struct xdbc_trb *trb;
+	u32 length, control;
+	u32 cycle;
+	u64 addr;
+
+	if (size > XDBC_MAX_PACKET) {
+		xdbc_trace("bad parameter, size %d\n", size);
+		return -EINVAL;
+	}
+
+	if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
+	    !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
+	    (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
+	    (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
+
+		xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
+		return -EIO;
+	}
+
+	ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
+	trb = ring->enqueue;
+	cycle = ring->cycle_state;
+	length = TRB_LEN(size);
+	control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
+
+	if (cycle)
+		control &= cpu_to_le32(~TRB_CYCLE);
+	else
+		control |= cpu_to_le32(TRB_CYCLE);
+
+	if (read) {
+		memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
+		addr = xdbc.in_dma;
+		xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
+	} else {
+		memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
+		memcpy(xdbc.out_buf, data, size);
+		addr = xdbc.out_dma;
+		xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
+	}
+
+	xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
+
+	/*
+	 * Add a barrier between writes of trb fields and flipping
+	 * the cycle bit:
+	 */
+	wmb();
+	if (cycle)
+		trb->field[3] |= cpu_to_le32(cycle);
+	else
+		trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+	xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
+
+	return size;
+}
+
+static int xdbc_handle_external_reset(void)
+{
+	int ret = 0;
+
+	xdbc.flags = 0;
+	writel(0, &xdbc.xdbc_reg->control);
+	ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
+	if (ret)
+		goto reset_out;
+
+	xdbc_mem_init();
+
+	mmiowb();
+
+	ret = xdbc_start();
+	if (ret < 0)
+		goto reset_out;
+
+	xdbc_trace("dbc recovered\n");
+
+	xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
+
+	xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+
+	return 0;
+
+reset_out:
+	xdbc_trace("failed to recover from external reset\n");
+	return ret;
+}
+
+static int __init xdbc_early_setup(void)
+{
+	int ret;
+
+	writel(0, &xdbc.xdbc_reg->control);
+	ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
+	if (ret)
+		return ret;
+
+	/* Allocate the table page: */
+	xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
+	if (!xdbc.table_base)
+		return -ENOMEM;
+
+	/* Get and store the transfer buffer: */
+	xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
+	if (!xdbc.out_buf)
+		return -ENOMEM;
+
+	/* Allocate the event ring: */
+	ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
+	if (ret < 0)
+		return ret;
+
+	/* Allocate IN/OUT endpoint transfer rings: */
+	ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
+	if (ret < 0)
+		return ret;
+
+	ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
+	if (ret < 0)
+		return ret;
+
+	xdbc_mem_init();
+
+	mmiowb();
+
+	ret = xdbc_start();
+	if (ret < 0) {
+		writel(0, &xdbc.xdbc_reg->control);
+		return ret;
+	}
+
+	xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
+
+	xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+
+	return 0;
+}
+
+int __init early_xdbc_parse_parameter(char *s)
+{
+	unsigned long dbgp_num = 0;
+	u32 bus, dev, func, offset;
+	int ret;
+
+	if (!early_pci_allowed())
+		return -EPERM;
+
+	if (strstr(s, "keep"))
+		early_console_keep = true;
+
+	if (xdbc.xdbc_reg)
+		return 0;
+
+	if (*s && kstrtoul(s, 0, &dbgp_num))
+		dbgp_num = 0;
+
+	pr_notice("dbgp_num: %lu\n", dbgp_num);
+
+	/* Locate the host controller: */
+	ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
+	if (ret) {
+		pr_notice("failed to locate xhci host\n");
+		return -ENODEV;
+	}
+
+	xdbc.vendor	= read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
+	xdbc.device	= read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
+	xdbc.bus	= bus;
+	xdbc.dev	= dev;
+	xdbc.func	= func;
+
+	/* Map the IO memory: */
+	xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
+	if (!xdbc.xhci_base)
+		return -EINVAL;
+
+	/* Locate DbC registers: */
+	offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
+	if (!offset) {
+		pr_notice("xhci host doesn't support debug capability\n");
+		early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+		xdbc.xhci_base = NULL;
+		xdbc.xhci_length = 0;
+
+		return -ENODEV;
+	}
+	xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
+
+	return 0;
+}
+
+int __init early_xdbc_setup_hardware(void)
+{
+	int ret;
+
+	if (!xdbc.xdbc_reg)
+		return -ENODEV;
+
+	xdbc_bios_handoff();
+
+	raw_spin_lock_init(&xdbc.lock);
+
+	ret = xdbc_early_setup();
+	if (ret) {
+		pr_notice("failed to setup the connection to host\n");
+
+		xdbc_free_ring(&xdbc.evt_ring);
+		xdbc_free_ring(&xdbc.out_ring);
+		xdbc_free_ring(&xdbc.in_ring);
+
+		if (xdbc.table_dma)
+			free_bootmem(xdbc.table_dma, PAGE_SIZE);
+
+		if (xdbc.out_dma)
+			free_bootmem(xdbc.out_dma, PAGE_SIZE);
+
+		xdbc.table_base = NULL;
+		xdbc.out_buf = NULL;
+	}
+
+	return ret;
+}
+
+static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
+{
+	u32 port_reg;
+
+	port_reg = readl(&xdbc.xdbc_reg->portsc);
+	if (port_reg & PORTSC_CONN_CHANGE) {
+		xdbc_trace("connect status change event\n");
+
+		/* Check whether cable unplugged: */
+		if (!(port_reg & PORTSC_CONN_STATUS)) {
+			xdbc.flags = 0;
+			xdbc_trace("cable unplugged\n");
+		}
+	}
+
+	if (port_reg & PORTSC_RESET_CHANGE)
+		xdbc_trace("port reset change event\n");
+
+	if (port_reg & PORTSC_LINK_CHANGE)
+		xdbc_trace("port link status change event\n");
+
+	if (port_reg & PORTSC_CONFIG_CHANGE)
+		xdbc_trace("config error change\n");
+
+	/* Write back the value to clear RW1C bits: */
+	writel(port_reg, &xdbc.xdbc_reg->portsc);
+}
+
+static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
+{
+	size_t remain_length;
+	u32 comp_code;
+	int ep_id;
+
+	comp_code	= GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
+	remain_length	= EVENT_TRB_LEN(le32_to_cpu(evt_trb->field[2]));
+	ep_id		= TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
+
+	switch (comp_code) {
+	case COMP_SUCCESS:
+		remain_length = 0;
+	case COMP_SHORT_PACKET:
+		break;
+	case COMP_TRB_ERROR:
+	case COMP_BABBLE_DETECTED_ERROR:
+	case COMP_USB_TRANSACTION_ERROR:
+	case COMP_STALL_ERROR:
+	default:
+		if (ep_id == XDBC_EPID_OUT)
+			xdbc.flags |= XDBC_FLAGS_OUT_STALL;
+		if (ep_id == XDBC_EPID_IN)
+			xdbc.flags |= XDBC_FLAGS_IN_STALL;
+
+		xdbc_trace("endpoint %d stalled\n", ep_id);
+		break;
+	}
+
+	if (ep_id == XDBC_EPID_IN) {
+		xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
+		xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+	} else if (ep_id == XDBC_EPID_OUT) {
+		xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
+	} else {
+		xdbc_trace("invalid endpoint id %d\n", ep_id);
+	}
+}
+
+static void xdbc_handle_events(void)
+{
+	struct xdbc_trb *evt_trb;
+	bool update_erdp = false;
+	u32 reg;
+	u8 cmd;
+
+	cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
+	if (!(cmd & PCI_COMMAND_MASTER)) {
+		cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+		write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
+	}
+
+	if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
+		return;
+
+	/* Handle external reset events: */
+	reg = readl(&xdbc.xdbc_reg->control);
+	if (!(reg & CTRL_DBC_ENABLE)) {
+		if (xdbc_handle_external_reset()) {
+			xdbc_trace("failed to recover connection\n");
+			return;
+		}
+	}
+
+	/* Handle configure-exit event: */
+	reg = readl(&xdbc.xdbc_reg->control);
+	if (reg & CTRL_DBC_RUN_CHANGE) {
+		writel(reg, &xdbc.xdbc_reg->control);
+		if (reg & CTRL_DBC_RUN)
+			xdbc.flags |= XDBC_FLAGS_CONFIGURED;
+		else
+			xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
+	}
+
+	/* Handle endpoint stall event: */
+	reg = readl(&xdbc.xdbc_reg->control);
+	if (reg & CTRL_HALT_IN_TR) {
+		xdbc.flags |= XDBC_FLAGS_IN_STALL;
+	} else {
+		xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
+		if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
+			xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+	}
+
+	if (reg & CTRL_HALT_OUT_TR)
+		xdbc.flags |= XDBC_FLAGS_OUT_STALL;
+	else
+		xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
+
+	/* Handle the events in the event ring: */
+	evt_trb = xdbc.evt_ring.dequeue;
+	while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
+		/*
+		 * Add a barrier between reading the cycle flag and any
+		 * reads of the event's flags/data below:
+		 */
+		rmb();
+
+		switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
+		case TRB_TYPE(TRB_PORT_STATUS):
+			xdbc_handle_port_status(evt_trb);
+			break;
+		case TRB_TYPE(TRB_TRANSFER):
+			xdbc_handle_tx_event(evt_trb);
+			break;
+		default:
+			break;
+		}
+
+		++(xdbc.evt_ring.dequeue);
+		if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
+			xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
+			xdbc.evt_ring.cycle_state ^= 1;
+		}
+
+		evt_trb = xdbc.evt_ring.dequeue;
+		update_erdp = true;
+	}
+
+	/* Update event ring dequeue pointer: */
+	if (update_erdp)
+		xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
+}
+
+static int xdbc_bulk_write(const char *bytes, int size)
+{
+	int ret, timeout = 0;
+	unsigned long flags;
+
+retry:
+	if (in_nmi()) {
+		if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
+			return -EAGAIN;
+	} else {
+		raw_spin_lock_irqsave(&xdbc.lock, flags);
+	}
+
+	xdbc_handle_events();
+
+	/* Check completion of the previous request: */
+	if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
+		raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+		udelay(100);
+		timeout += 100;
+		goto retry;
+	}
+
+	if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
+		raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+		xdbc_trace("previous transfer not completed yet\n");
+
+		return -ETIMEDOUT;
+	}
+
+	ret = xdbc_bulk_transfer((void *)bytes, size, false);
+	raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+
+	return ret;
+}
+
+static void early_xdbc_write(struct console *con, const char *str, u32 n)
+{
+	static char buf[XDBC_MAX_PACKET];
+	int chunk, ret;
+	int use_cr = 0;
+
+	if (!xdbc.xdbc_reg)
+		return;
+	memset(buf, 0, XDBC_MAX_PACKET);
+	while (n > 0) {
+		for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
+
+			if (!use_cr && *str == '\n') {
+				use_cr = 1;
+				buf[chunk] = '\r';
+				str--;
+				n++;
+				continue;
+			}
+
+			if (use_cr)
+				use_cr = 0;
+			buf[chunk] = *str;
+		}
+
+		if (chunk > 0) {
+			ret = xdbc_bulk_write(buf, chunk);
+			if (ret < 0)
+				xdbc_trace("missed message {%s}\n", buf);
+		}
+	}
+}
+
+static struct console early_xdbc_console = {
+	.name		= "earlyxdbc",
+	.write		= early_xdbc_write,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+};
+
+void __init early_xdbc_register_console(void)
+{
+	if (early_console)
+		return;
+
+	early_console = &early_xdbc_console;
+	if (early_console_keep)
+		early_console->flags &= ~CON_BOOT;
+	else
+		early_console->flags |= CON_BOOT;
+	register_console(early_console);
+}
+
+static void xdbc_unregister_console(void)
+{
+	if (early_xdbc_console.flags & CON_ENABLED)
+		unregister_console(&early_xdbc_console);
+}
+
+static int xdbc_scrub_function(void *ptr)
+{
+	unsigned long flags;
+
+	while (true) {
+		raw_spin_lock_irqsave(&xdbc.lock, flags);
+		xdbc_handle_events();
+
+		if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
+			raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+			break;
+		}
+
+		raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+		schedule_timeout_interruptible(1);
+	}
+
+	xdbc_unregister_console();
+	writel(0, &xdbc.xdbc_reg->control);
+	xdbc_trace("dbc scrub function exits\n");
+
+	return 0;
+}
+
+static int __init xdbc_init(void)
+{
+	unsigned long flags;
+	void __iomem *base;
+	int ret = 0;
+	u32 offset;
+
+	if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
+		return 0;
+
+	/*
+	 * It's time to shut down the DbC, so that the debug
+	 * port can be reused by the host controller:
+	 */
+	if (early_xdbc_console.index == -1 ||
+	    (early_xdbc_console.flags & CON_BOOT)) {
+		xdbc_trace("hardware not used anymore\n");
+		goto free_and_quit;
+	}
+
+	base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+	if (!base) {
+		xdbc_trace("failed to remap the io address\n");
+		ret = -ENOMEM;
+		goto free_and_quit;
+	}
+
+	raw_spin_lock_irqsave(&xdbc.lock, flags);
+	early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+	xdbc.xhci_base = base;
+	offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
+	xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
+	raw_spin_unlock_irqrestore(&xdbc.lock, flags);
+
+	kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
+
+	return 0;
+
+free_and_quit:
+	xdbc_free_ring(&xdbc.evt_ring);
+	xdbc_free_ring(&xdbc.out_ring);
+	xdbc_free_ring(&xdbc.in_ring);
+	free_bootmem(xdbc.table_dma, PAGE_SIZE);
+	free_bootmem(xdbc.out_dma, PAGE_SIZE);
+	writel(0, &xdbc.xdbc_reg->control);
+	early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+
+	return ret;
+}
+subsys_initcall(xdbc_init);
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
new file mode 100644
index 0000000..2df0f6e
--- /dev/null
+++ b/drivers/usb/early/xhci-dbc.h
@@ -0,0 +1,211 @@
+/*
+ * xhci-dbc.h - xHCI debug capability early driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@...ux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBC_H
+#define __LINUX_XHCI_DBC_H
+
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+/*
+ * xHCI Debug Capability Register interfaces:
+ */
+struct xdbc_regs {
+	__le32	capability;
+	__le32	doorbell;
+	__le32	ersts;		/* Event Ring Segment Table Size*/
+	__le32	__reserved_0;	/* 0c~0f reserved bits */
+	__le64	erstba;		/* Event Ring Segment Table Base Address */
+	__le64	erdp;		/* Event Ring Dequeue Pointer */
+	__le32	control;
+	__le32	status;
+	__le32	portsc;		/* Port status and control */
+	__le32	__reserved_1;	/* 2b~28 reserved bits */
+	__le64	dccp;		/* Debug Capability Context Pointer */
+	__le32	devinfo1;	/* Device Descriptor Info Register 1 */
+	__le32	devinfo2;	/* Device Descriptor Info Register 2 */
+};
+
+#define DEBUG_MAX_BURST(p)	(((p) >> 16) & 0xff)
+
+#define CTRL_DBC_RUN		BIT(0)
+#define CTRL_PORT_ENABLE	BIT(1)
+#define CTRL_HALT_OUT_TR	BIT(2)
+#define CTRL_HALT_IN_TR		BIT(3)
+#define CTRL_DBC_RUN_CHANGE	BIT(4)
+#define CTRL_DBC_ENABLE		BIT(31)
+
+#define DCST_DEBUG_PORT(p)	(((p) >> 24) & 0xff)
+
+#define PORTSC_CONN_STATUS	BIT(0)
+#define PORTSC_CONN_CHANGE	BIT(17)
+#define PORTSC_RESET_CHANGE	BIT(21)
+#define PORTSC_LINK_CHANGE	BIT(22)
+#define PORTSC_CONFIG_CHANGE	BIT(23)
+
+/*
+ * xHCI Debug Capability data structures:
+ */
+struct xdbc_trb {
+	__le32 field[4];
+};
+
+struct xdbc_erst_entry {
+	__le64	seg_addr;
+	__le32	seg_size;
+	__le32	__reserved_0;
+};
+
+struct xdbc_info_context {
+	__le64	string0;
+	__le64	manufacturer;
+	__le64	product;
+	__le64	serial;
+	__le32	length;
+	__le32	__reserved_0[7];
+};
+
+struct xdbc_ep_context {
+	__le32	ep_info1;
+	__le32	ep_info2;
+	__le64	deq;
+	__le32	tx_info;
+	__le32	__reserved_0[11];
+};
+
+struct xdbc_context {
+	struct xdbc_info_context	info;
+	struct xdbc_ep_context		out;
+	struct xdbc_ep_context		in;
+};
+
+#define XDBC_INFO_CONTEXT_SIZE		48
+#define XDBC_MAX_STRING_LENGTH		64
+#define XDBC_STRING_MANUFACTURER	"Linux"
+#define XDBC_STRING_PRODUCT		"Remote GDB"
+#define XDBC_STRING_SERIAL		"0001"
+
+struct xdbc_strings {
+	char	string0[XDBC_MAX_STRING_LENGTH];
+	char	manufacturer[XDBC_MAX_STRING_LENGTH];
+	char	product[XDBC_MAX_STRING_LENGTH];
+	char	serial[XDBC_MAX_STRING_LENGTH];
+};
+
+#define XDBC_PROTOCOL		1	/* GNU Remote Debug Command Set */
+#define XDBC_VENDOR_ID		0x1d6b	/* Linux Foundation 0x1d6b */
+#define XDBC_PRODUCT_ID		0x0004	/* __le16 idProduct; device 0004 */
+#define XDBC_DEVICE_REV		0x0010	/* 0.10 */
+
+/*
+ * xHCI Debug Capability software state structures:
+ */
+struct xdbc_segment {
+	struct xdbc_trb		*trbs;
+	dma_addr_t		dma;
+};
+
+#define XDBC_TRBS_PER_SEGMENT	256
+
+struct xdbc_ring {
+	struct xdbc_segment	*segment;
+	struct xdbc_trb		*enqueue;
+	struct xdbc_trb		*dequeue;
+	u32			cycle_state;
+};
+
+#define XDBC_EPID_OUT		2
+#define XDBC_EPID_IN		3
+
+struct xdbc_state {
+	u16			vendor;
+	u16			device;
+	u32			bus;
+	u32			dev;
+	u32			func;
+	void __iomem		*xhci_base;
+	u64			xhci_start;
+	size_t			xhci_length;
+	int			port_number;
+
+	/* DbC register base */
+	struct xdbc_regs __iomem *xdbc_reg;
+
+	/* DbC table page */
+	dma_addr_t		table_dma;
+	void			*table_base;
+
+	/* event ring segment table */
+	dma_addr_t		erst_dma;
+	size_t			erst_size;
+	void			*erst_base;
+
+	/* event ring segments */
+	struct xdbc_ring	evt_ring;
+	struct xdbc_segment	evt_seg;
+
+	/* debug capability contexts */
+	dma_addr_t		dbcc_dma;
+	size_t			dbcc_size;
+	void			*dbcc_base;
+
+	/* descriptor strings */
+	dma_addr_t		string_dma;
+	size_t			string_size;
+	void			*string_base;
+
+	/* bulk OUT endpoint */
+	struct xdbc_ring	out_ring;
+	struct xdbc_segment	out_seg;
+	void			*out_buf;
+	dma_addr_t		out_dma;
+
+	/* bulk IN endpoint */
+	struct xdbc_ring	in_ring;
+	struct xdbc_segment	in_seg;
+	void			*in_buf;
+	dma_addr_t		in_dma;
+
+	u32			flags;
+
+	/* spinlock for early_xdbc_write() reentrancy */
+	raw_spinlock_t		lock;
+};
+
+#define XDBC_PCI_MAX_BUSES	256
+#define XDBC_PCI_MAX_DEVICES	32
+#define XDBC_PCI_MAX_FUNCTION	8
+
+#define XDBC_TABLE_ENTRY_SIZE	64
+#define XDBC_ERST_ENTRY_NUM	1
+#define XDBC_DBCC_ENTRY_NUM	3
+#define XDBC_STRING_ENTRY_NUM	4
+
+/* Bits definitions for xdbc_state.flags: */
+#define XDBC_FLAGS_INITIALIZED	BIT(0)
+#define XDBC_FLAGS_IN_STALL	BIT(1)
+#define XDBC_FLAGS_OUT_STALL	BIT(2)
+#define XDBC_FLAGS_IN_PROCESS	BIT(3)
+#define XDBC_FLAGS_OUT_PROCESS	BIT(4)
+#define XDBC_FLAGS_CONFIGURED	BIT(5)
+
+#define XDBC_MAX_PACKET		1024
+
+/* Door bell target: */
+#define OUT_EP_DOORBELL		0
+#define IN_EP_DOORBELL		1
+#define DOOR_BELL_TARGET(p)	(((p) & 0xff) << 8)
+
+#define xdbc_read64(regs)	xhci_read_64(NULL, (regs))
+#define xdbc_write64(val, regs)	xhci_write_64(NULL, (val), (regs))
+
+#endif /* __LINUX_XHCI_DBC_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
new file mode 100644
index 0000000..80c1cca
--- /dev/null
+++ b/include/linux/usb/xhci-dbgp.h
@@ -0,0 +1,29 @@
+/*
+ * Standalone xHCI debug capability driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@...ux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBGP_H
+#define __LINUX_XHCI_DBGP_H
+
+#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
+int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_setup_hardware(void);
+void __init early_xdbc_register_console(void);
+#else
+static inline int __init early_xdbc_setup_hardware(void)
+{
+	return -ENODEV;
+}
+static inline void __init early_xdbc_register_console(void)
+{
+}
+#endif /* CONFIG_EARLY_PRINTK_USB_XDBC */
+#endif /* __LINUX_XHCI_DBGP_H */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ