lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1465647219-7798-3-git-send-email-srinivas.pandruvada@linux.intel.com>
Date:	Sat, 11 Jun 2016 05:13:35 -0700
From:	Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
To:	jikos@...nel.org, jic23@...nel.org, benjamin.tissoires@...hat.com
Cc:	linux-input@...r.kernel.org, linux-iio@...r.kernel.org,
	linux-kernel@...r.kernel.org, chaya.golan@...el.com,
	daniel.drubin@...el.com, A.Bhattacharya@....ac.be,
	Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
Subject: [PATCH 2/6] hid: intel_ish-hid: ISH Transport layer

From: Daniel Drubin <daniel.drubin@...el.com>

The ISH transport layer (ishtp) is a bi-directional protocol implemented
on the top of PCI based inter processor communication layer. This layer
offers:
- Connection management
- Flow control with the firmware
- Multiple client sessions
- Client message transfer
- Client message reception
- DMA for RX and TX for fast data transfer

Refer to Documentation/hid/intel-ish-hid.txt for
overview of the functionality implemented in this layer.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
---
 drivers/hid/Kconfig                         |    2 +
 drivers/hid/Makefile                        |    2 +
 drivers/hid/intel-ish-hid/Kconfig           |   22 +
 drivers/hid/intel-ish-hid/Makefile          |   10 +
 drivers/hid/intel-ish-hid/ishtp/bus.c       |  774 ++++++++++++++++++
 drivers/hid/intel-ish-hid/ishtp/bus.h       |  105 +++
 drivers/hid/intel-ish-hid/ishtp/client.c    | 1129 +++++++++++++++++++++++++++
 drivers/hid/intel-ish-hid/ishtp/client.h    |  194 +++++
 drivers/hid/intel-ish-hid/ishtp/dma-if.c    |  178 +++++
 drivers/hid/intel-ish-hid/ishtp/hbm.c       |  911 +++++++++++++++++++++
 drivers/hid/intel-ish-hid/ishtp/hbm.h       |  319 ++++++++
 drivers/hid/intel-ish-hid/ishtp/init.c      |   94 +++
 drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h |  280 +++++++
 13 files changed, 4020 insertions(+)
 create mode 100644 drivers/hid/intel-ish-hid/Kconfig
 create mode 100644 drivers/hid/intel-ish-hid/Makefile
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/bus.c
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/bus.h
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/client.c
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/client.h
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/dma-if.c
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/hbm.c
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/hbm.h
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/init.c
 create mode 100644 drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h

diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 5646ca4..56e69b4 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -944,4 +944,6 @@ source "drivers/hid/usbhid/Kconfig"
 
 source "drivers/hid/i2c-hid/Kconfig"
 
+source "drivers/hid/intel-ish-hid/Kconfig"
+
 endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a2fb562..404b288 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -112,3 +112,5 @@ obj-$(CONFIG_USB_MOUSE)		+= usbhid/
 obj-$(CONFIG_USB_KBD)		+= usbhid/
 
 obj-$(CONFIG_I2C_HID)		+= i2c-hid/
+
+obj-$(CONFIG_INTEL_ISH_HID)	+= intel-ish-hid/
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
new file mode 100644
index 0000000..8914f3b
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -0,0 +1,22 @@
+menu "Intel ISH HID support"
+	depends on X86 && PCI
+
+config INTEL_ISH_HID_TRANSPORT
+	bool
+	default n
+
+config INTEL_ISH_HID
+	bool "Intel Integrated Sensor Hub"
+	default n
+	select INTEL_ISH_HID_TRANSPORT
+	help
+	  The Integrated Sensor Hub (ISH) enables the ability to offload
+	  sensor polling and algorithm processing to a dedicated low power
+	  processor in the chipset. This allows the core processor to go into
+	  low power modes more often, resulting in the increased battery life.
+	  The current processors that support ISH are: Cherrytrail, Skylake,
+	  Broxton and Kaby Lake.
+
+	  Say Y here if you want to support Intel ISH. If unsure, say N.
+
+endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
new file mode 100644
index 0000000..a5eaa6e
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile - Intel ISH HID drivers
+# Copyright (c) 2014-2016, Intel Corporation.
+#
+obj-$(CONFIG_INTEL_ISH_HID_TRANSPORT) += intel-ishtp.o
+intel-ishtp-objs := ishtp/init.o
+intel-ishtp-objs += ishtp/hbm.o
+intel-ishtp-objs += ishtp/client.o
+intel-ishtp-objs += ishtp/bus.o
+intel-ishtp-objs += ishtp/dma-if.o
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
new file mode 100644
index 0000000..2f5cb5d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -0,0 +1,774 @@
+/*
+ * ISHTP bus driver
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "bus.h"
+#include "ishtp-dev.h"
+#include "client.h"
+#include "hbm.h"
+
+#define to_ishtp_cl_driver(d) container_of(d, struct ishtp_cl_driver, driver)
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+
+/**
+ * ishtp_recv() - process ishtp message
+ *
+ * @dev: ishtp device
+ *
+ * If a message with valid header and size is received, then
+ * this function calls appropriate handler. The host or firmware
+ * address is zero, then they are host bus management message,
+ * otherwise they are message fo clients.
+ */
+void	ishtp_recv(struct ishtp_device *dev)
+{
+	uint32_t	msg_hdr;
+	struct ishtp_msg_hdr	*ishtp_hdr;
+
+	/* Read ISHTP header dword */
+	msg_hdr = dev->ops->ishtp_read_hdr(dev);
+	if (!msg_hdr)
+		return;
+
+	dev->ops->sync_fw_clock(dev);
+
+	ishtp_hdr = (struct ishtp_msg_hdr *)&msg_hdr;
+	dev->ishtp_msg_hdr = msg_hdr;
+
+	/* Sanity check: ISHTP frag. length in header */
+	if (ishtp_hdr->length > dev->mtu) {
+		dev_err(dev->devc,
+			"ISHTP hdr - bad length: %u; dropped [%08X]\n",
+			(unsigned int)ishtp_hdr->length, msg_hdr);
+		return;
+	}
+
+	/* ISHTP bus message */
+	if (!ishtp_hdr->host_addr && !ishtp_hdr->fw_addr)
+		recv_hbm(dev, ishtp_hdr);
+	/* ISHTP fixed-client message */
+	else if (!ishtp_hdr->host_addr)
+		recv_fixed_cl_msg(dev, ishtp_hdr);
+	else
+		/* ISHTP client message */
+		recv_ishtp_cl_msg(dev, ishtp_hdr);
+}
+EXPORT_SYMBOL(ishtp_recv);
+
+/**
+ * ishtp_send_msg() - Send ishtp message
+ *
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ * @ipc_send_compl: completion callback
+ * @ipc_send_compl_param: completion callback parameter
+ *
+ * Send a multi fragment message via IPC. After sending the first fragment
+ * the completion callback is called to schedule transmit of next fragment.
+ * This returns IPC send message status.
+ */
+int	ishtp_send_msg(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+		       void *msg, void(*ipc_send_compl)(void *),
+		       void *ipc_send_compl_prm)
+{
+	unsigned char	ipc_msg[IPC_FULL_MSG_SIZE];
+	uint32_t	drbl_val;
+
+	drbl_val = dev->ops->ipc_get_header(dev, hdr->length +
+					    sizeof(struct ishtp_msg_hdr),
+					    1);
+
+	memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+	memcpy(ipc_msg + sizeof(uint32_t), hdr, sizeof(uint32_t));
+	memcpy(ipc_msg + 2 * sizeof(uint32_t), msg, hdr->length);
+	return	dev->ops->write(dev, ipc_send_compl, ipc_send_compl_prm,
+				ipc_msg, 2 * sizeof(uint32_t) + hdr->length);
+}
+
+/**
+ * ishtp_write_message() - Send ishtp single fragment message
+ *
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ *
+ * Send a single fragment message via IPC.  This returns IPC send message
+ * status.
+ */
+int ishtp_write_message(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+			unsigned char *buf)
+{
+	return ishtp_send_msg(dev, hdr, buf, NULL, NULL);
+}
+
+/**
+ * ishtp_fw_cl_by_uuid() - locate index of fw client
+ *
+ * @dev: ishtp device
+ * @uuid: uuid of the client to search
+ * returns fw client index or -ENOENT if not found
+ */
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
+{
+	int i, res = -ENOENT;
+
+	for (i = 0; i < dev->fw_clients_num; ++i) {
+		if (uuid_le_cmp(*uuid, dev->fw_clients[i].props.protocol_name)
+				== 0) {
+			res = i;
+			break;
+		}
+	}
+	return res;
+}
+EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
+
+/**
+ * ishtp_fw_cl_by_id() - return index to fw_clients for client_id
+ *
+ * @dev: the ishtp device structure
+ * @client_id: fw client id to search
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id)
+{
+	int i, res = -ENOENT;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->fw_clients_lock, flags);
+	for (i = 0; i < dev->fw_clients_num; i++) {
+		if (dev->fw_clients[i].client_id == client_id) {
+			res = i;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+	return res;
+}
+
+/**
+ * ishtp_cl_device_probe() - Bus probe() callback
+ *
+ * @dev: the device structure
+ *
+ * This is a bus probe callback and calls the drive probe function.
+ */
+static int ishtp_cl_device_probe(struct device *dev)
+{
+	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+	struct ishtp_cl_driver *driver;
+
+	if (!device)
+		return 0;
+
+	driver = to_ishtp_cl_driver(dev->driver);
+	if (!driver || !driver->probe)
+		return -ENODEV;
+
+	return driver->probe(device);
+}
+
+/**
+ * ishtp_cl_device_remove() - Bus remove() callback
+ *
+ * @dev: the device structure
+ *
+ * This is a bus remove callback and calls the drive remove function.
+ * Since the ISH driver model supports only built in, this is
+ * primarily can be called during pci driver init failure.
+ */
+static int ishtp_cl_device_remove(struct device *dev)
+{
+	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+	struct ishtp_cl_driver *driver;
+
+	if (!device || !dev->driver)
+		return 0;
+
+	if (device->event_cb) {
+		device->event_cb = NULL;
+		cancel_work_sync(&device->event_work);
+	}
+
+	driver = to_ishtp_cl_driver(dev->driver);
+	if (!driver->remove) {
+		dev->driver = NULL;
+
+		return 0;
+	}
+
+	return driver->remove(device);
+}
+
+/**
+ * ishtp_cl_device_suspend() - Bus suspend callback
+ *
+ * @dev:	device
+ * @state:	pm notification type
+ *
+ * Called during device suspend process.
+ */
+static int ishtp_cl_device_suspend(struct device *dev, pm_message_t state)
+{
+	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+	struct ishtp_cl_driver *driver;
+	int ret = 0;
+
+	if (!device)
+		return 0;
+
+	driver = to_ishtp_cl_driver(dev->driver);
+	if (driver && driver->driver.pm) {
+		if (driver->driver.pm->suspend)
+			ret = driver->driver.pm->suspend(dev);
+	}
+
+	return ret;
+}
+
+/**
+ * ishtp_cl_device_resume() - Bus resume callback
+ *
+ * @dev:	device
+ *
+ * Called during device resume process.
+ */
+static int ishtp_cl_device_resume(struct device *dev)
+{
+	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+	struct ishtp_cl_driver *driver;
+	int ret = 0;
+
+	if (!device)
+		return 0;
+
+	/*
+	 * When ISH needs hard reset, it is done asynchrnously, hence bus
+	 * resume will  be called before full ISH resume
+	 */
+	if (device->ishtp_dev->resume_flag)
+		return 0;
+
+	driver = to_ishtp_cl_driver(dev->driver);
+	if (driver && driver->driver.pm) {
+		if (driver->driver.pm->resume)
+			ret = driver->driver.pm->resume(dev);
+	}
+
+	return ret;
+}
+
+/**
+ * ishtp_cl_device_reset() - Reset callback
+ *
+ * @device:	ishtp client device instance
+ *
+ * This is a callback when HW reset is done and the device need
+ * reinit.
+ */
+static int ishtp_cl_device_reset(struct ishtp_cl_device *device)
+{
+	struct ishtp_cl_driver *driver;
+	int ret = 0;
+
+	device->event_cb = NULL;
+	cancel_work_sync(&device->event_work);
+
+	driver = to_ishtp_cl_driver(device->dev.driver);
+	if (driver && driver->reset)
+		ret = driver->reset(device);
+
+	return ret;
+}
+
+
+static struct bus_type ishtp_cl_bus_type = {
+	.name		= "ishtp",
+	.probe		= ishtp_cl_device_probe,
+	.remove		= ishtp_cl_device_remove,
+	.suspend	= ishtp_cl_device_suspend,
+	.resume		= ishtp_cl_device_resume,
+};
+
+static void ishtp_cl_dev_release(struct device *dev)
+{
+	kfree(to_ishtp_cl_device(dev));
+}
+
+static struct device_type ishtp_cl_device_type = {
+	.release	= ishtp_cl_dev_release,
+};
+
+/**
+ * ishtp_cl_bus_init() - Function to register bus
+ *
+ * This register a bus during driver init
+ */
+int  ishtp_cl_bus_init(void)
+{
+	int	rv;
+
+	rv = bus_register(&ishtp_cl_bus_type);
+	return	rv;
+}
+EXPORT_SYMBOL(ishtp_cl_bus_init);
+
+/**
+ * ishtp_cl_bus_exit() - Function to unregister bus
+ *
+ * This unregister a bus during driver exit via failure
+ * path. Since there is no built in model, the driver
+ * can't be unloaded.
+ */
+void ishtp_cl_bus_exit(void)
+{
+	bus_unregister(&ishtp_cl_bus_type);
+}
+EXPORT_SYMBOL(ishtp_cl_bus_exit);
+
+/**
+ * ishtp_bus_add_device() - Function to create device on bus
+ *
+ * @dev:	ishtp device
+ * @uuid:	uuid of the client
+ * @name:	Name of the client
+ *
+ * Allocate ISHTP bus client device, attach it to uuid
+ * and register with ISHTP bus.
+ */
+struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+					     uuid_le uuid, char *name)
+{
+	struct ishtp_cl_device *device;
+	int status;
+	unsigned long flags;
+	struct list_head *pos;
+
+	spin_lock_irqsave(&dev->device_list_lock, flags);
+	list_for_each(pos, &dev->device_list) {
+		device = list_entry(pos, struct ishtp_cl_device, device_link);
+		if (!strcmp(name, dev_name(&device->dev))) {
+			device->fw_client = &dev->fw_clients[
+				dev->fw_client_presentation_num - 1];
+			spin_unlock_irqrestore(&dev->device_list_lock, flags);
+			ishtp_cl_device_reset(device);
+			return device;
+		}
+	}
+	spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+	device = kzalloc(sizeof(struct ishtp_cl_device), GFP_KERNEL);
+	if (!device)
+		return NULL;
+
+	device->dev.parent = dev->devc;
+	device->dev.bus = &ishtp_cl_bus_type;
+	device->dev.type = &ishtp_cl_device_type;
+	device->ishtp_dev = dev;
+
+	device->fw_client =
+		&dev->fw_clients[dev->fw_client_presentation_num - 1];
+
+	dev_set_name(&device->dev, "%s", name);
+
+	spin_lock_irqsave(&dev->device_list_lock, flags);
+	list_add_tail(&device->device_link, &dev->device_list);
+	spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+	status = device_register(&device->dev);
+	if (status) {
+		spin_lock_irqsave(&dev->device_list_lock, flags);
+		list_del(&device->device_link);
+		spin_unlock_irqrestore(&dev->device_list_lock, flags);
+		dev_err(dev->devc, "Failed to register ISHTP client device\n");
+		kfree(device);
+		return NULL;
+	}
+	return device;
+}
+
+/**
+ * ishtp_bus_remove_device() - Function to relase device on bus
+ *
+ * @device:	client device instance
+ *
+ * This is a counterpart of ishtp_bus_add_device.
+ * Device is unregistered.
+ * the device structure is freed in 'ishtp_cl_dev_release' function
+ * Called only during error in pci driver init path.
+ */
+void ishtp_bus_remove_device(struct ishtp_cl_device *device)
+{
+	device_unregister(&device->dev);
+}
+
+/**
+ * __ishtp_cl_driver_register() - Client driver register
+ *
+ * @driver:	the client driver instance
+ * @owner:	Owner of this driver module
+ *
+ * Once a client driver is probed, it created a client
+ * instance and registers with the bus.
+ */
+int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+	struct module *owner)
+{
+	int err;
+
+	driver->driver.name = driver->name;
+	driver->driver.owner = owner;
+	driver->driver.bus = &ishtp_cl_bus_type;
+
+	err = driver_register(&driver->driver);
+	if (err)
+		return err;
+
+	return 0;
+}
+EXPORT_SYMBOL(__ishtp_cl_driver_register);
+
+/**
+ * ishtp_cl_driver_unregister() - Client driver unregister
+ *
+ * @driver:	the client driver instance
+ *
+ * Unregister client during device removal process.
+ */
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver)
+{
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_unregister);
+
+/**
+ * ishtp_bus_event_work() - event work function
+ *
+ * @work:	work struct pointer
+ *
+ * Once an event is received for a client this work
+ * function is called. If the device has registered a
+ * callback then the callback is called.
+ */
+static void ishtp_bus_event_work(struct work_struct *work)
+{
+	struct ishtp_cl_device *device;
+
+	device = container_of(work, struct ishtp_cl_device, event_work);
+
+	if (device->event_cb)
+		device->event_cb(device);
+}
+
+/**
+ * ishtp_cl_bus_rx_event() - schedule event work
+ *
+ * @device:	client device instance
+ *
+ * Once an event is received for a client this schedules
+ * a work function to process.
+ */
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
+{
+	if (!device || !device->event_cb)
+		return;
+
+	if (device->event_cb)
+		schedule_work(&device->event_work);
+}
+
+/**
+ * ishtp_register_event_cb() - Register callback
+ *
+ * @device:	client device instance
+ * @event_cb:	Event processor for an client
+ *
+ * Register a callback for events, called from client driver
+ */
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+	void (*event_cb)(struct ishtp_cl_device *))
+{
+	if (device->event_cb)
+		return -EALREADY;
+
+	device->event_cb = event_cb;
+	INIT_WORK(&device->event_work, ishtp_bus_event_work);
+
+	return 0;
+}
+EXPORT_SYMBOL(ishtp_register_event_cb);
+
+/**
+ * ishtp_get_device() - update usage count for the device
+ *
+ * @cl_device	client device instance
+ *
+ * Increment the usage count. The device can't be deleted
+ */
+void ishtp_get_device(struct ishtp_cl_device *cl_device)
+{
+	cl_device->reference_count++;
+}
+EXPORT_SYMBOL(ishtp_get_device);
+
+/**
+ * ishtp_put_device() - decrement usage count for the device
+ *
+ * @cl_device	client device instance
+ *
+ * Decrement the usage count. The device can be deleted is count = 0
+ */
+void ishtp_put_device(struct ishtp_cl_device *cl_device)
+{
+	cl_device->reference_count--;
+}
+EXPORT_SYMBOL(ishtp_put_device);
+
+/**
+ * ishtp_bus_new_client() - Create a new client
+ *
+ * @dev:	ISHTP device instance
+ *
+ * Once bus protocol enumerates a client, this is called
+ * to add a device for the client.
+ */
+int	ishtp_bus_new_client(struct ishtp_device *dev)
+{
+	int	i;
+	char	*dev_name;
+	struct ishtp_cl_device	*cl_device;
+	uuid_le	device_uuid;
+
+	/*
+	 * For all reported clients, create an unconnected client and add its
+	 * device to ISHTP bus.
+	 * If appropriate driver has loaded, this will trigger its probe().
+	 * Otherwise, probe() will be called when driver is loaded
+	 */
+	i = dev->fw_client_presentation_num - 1;
+	device_uuid = dev->fw_clients[i].props.protocol_name;
+	dev_name = kasprintf(GFP_KERNEL,
+		"{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
+		device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
+		device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
+		device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
+		device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
+		device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
+		device_uuid.b[15]);
+	if (!dev_name)
+		return	-ENOMEM;
+
+	cl_device = ishtp_bus_add_device(dev, device_uuid, dev_name);
+	if (!cl_device) {
+		kfree(dev_name);
+		return	-ENOENT;
+	}
+
+	kfree(dev_name);
+
+	return	0;
+}
+
+/**
+ * does_driver_bind_uuid() - Check if uuid matches
+ *
+ * @dev:	device instance
+ * @id:		uuid
+ *
+ * Check if the driver is bounded to an uuid.
+ */
+static int	does_driver_bind_uuid(struct device *dev, void *id)
+{
+	uuid_le	*uuid = id;
+	struct ishtp_cl_device	*device;
+
+	if (!dev->driver)
+		return	0;
+
+	device = to_ishtp_cl_device(dev);
+	if (!uuid_le_cmp(device->fw_client->props.protocol_name, *uuid))
+		return	1;
+
+	return	0;
+}
+
+/**
+ * ishtp_can_client_connect() - Check if driver exist for an uuid
+ *
+ * @dev:	device instance
+ * @id:		uuid
+ *
+ * Check if the driver is attached to an uuid.
+ */
+int	ishtp_can_client_connect(struct ishtp_device *ishtp_dev, uuid_le *uuid)
+{
+	int	rv;
+
+	rv = bus_for_each_dev(&ishtp_cl_bus_type, NULL, uuid,
+		does_driver_bind_uuid);
+
+	return	!rv;
+}
+
+/**
+ * ishtp_cl_device_bind() - bind a device
+ *
+ * @cl:		ishtp client device
+ *
+ * Binds connected ishtp_cl to ISHTP bus device
+ */
+int	ishtp_cl_device_bind(struct ishtp_cl *cl)
+{
+	struct ishtp_cl_device	*cl_device;
+	unsigned long flags;
+	int	rv;
+
+	if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
+		return	-EFAULT;
+
+	rv = -ENOENT;
+	spin_lock_irqsave(&cl->dev->device_list_lock, flags);
+	list_for_each_entry(cl_device, &cl->dev->device_list,
+			device_link) {
+		if (cl_device->fw_client->client_id == cl->fw_client_id) {
+			cl->device = cl_device;
+			rv = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
+	return	rv;
+}
+
+/**
+ * ishtp_bus_remove_all_clients() - Remove all clients
+ *
+ * @ishtp_dev:		ishtp device
+ *
+ * This is part of reset flow. This function the main processing
+ * only targets error processing, if the FW has forced reset or
+ * error to remove connected clients.
+ * Need to revisit this again to make sure whether if this really
+ * happens after a client is enunmerated after successful FW startup,
+ * where we have to remove clients in our built in driver model !
+ */
+void	ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev)
+{
+	struct ishtp_cl_device	*cl_device, *n;
+	struct ishtp_cl	*cl;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
+	list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
+		cl->state = ISHTP_CL_DISCONNECTED;
+
+		/*
+		 * Wake any pending process. The waiter would check dev->state
+		 * and determine that it's not enabled already,
+		 * and will return error to its caller
+		 */
+		wake_up_interruptible(&cl->wait_ctrl_res);
+
+		/* Disband any pending read/write requests and free rb */
+		ishtp_cl_flush_queues(cl);
+
+		/* Remove all free and in_process rings, both Rx and Tx */
+		ishtp_cl_free_rx_ring(cl);
+		ishtp_cl_free_tx_ring(cl);
+
+		/*
+		 * Free client and ISHTP bus client device structures
+		 * don't free host client because it is part of the OS fd
+		 * structure
+		 */
+	}
+	spin_unlock_irqrestore(&ishtp_dev->cl_list_lock, flags);
+
+	/* Release DMA buffers for client messages */
+	ishtp_cl_free_dma_buf(ishtp_dev);
+
+	/* remove bus clients */
+	spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+	list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
+				 device_link) {
+		if (cl_device->reference_count)
+			continue;
+
+		list_del(&cl_device->device_link);
+		ishtp_bus_remove_device(cl_device);
+	}
+	spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+
+	/* Free all client structures */
+	spin_lock_irqsave(&ishtp_dev->fw_clients_lock, flags);
+	kfree(ishtp_dev->fw_clients);
+	ishtp_dev->fw_clients = NULL;
+	ishtp_dev->fw_clients_num = 0;
+	ishtp_dev->fw_client_presentation_num = 0;
+	ishtp_dev->fw_client_index = 0;
+	bitmap_zero(ishtp_dev->fw_clients_map, ISHTP_CLIENTS_MAX);
+	spin_unlock_irqrestore(&ishtp_dev->fw_clients_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_bus_remove_all_clients);
+
+/**
+ * ishtp_reset_handler() - IPC reset handler
+ *
+ * @dev:	ishtp device
+ *
+ * ISHTP Handler for IPC_RESET notification
+ */
+int	ishtp_reset_handler(struct ishtp_device *dev)
+{
+	unsigned long	flags;
+
+	/* Handle FW-initiated reset */
+	dev->dev_state = ISHTP_DEV_RESETTING;
+
+	/* Clear BH processing queue - no further HBMs */
+	spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+	dev->rd_msg_fifo_head = dev->rd_msg_fifo_tail = 0;
+	spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+
+	/* Handle ISH FW reset against upper layers */
+	ishtp_bus_remove_all_clients(dev);
+
+	return	0;
+}
+EXPORT_SYMBOL(ishtp_reset_handler);
+
+/**
+ * ishtp_reset_compl_handler() - Reset completion handler
+ *
+ * @dev:	ishtp device
+ *
+ * ISHTP handler for IPC_RESET sequence completion to start
+ * host message bus start protocol sequence.
+ */
+int	ishtp_reset_compl_handler(struct ishtp_device *dev)
+{
+	dev->dev_state = ISHTP_DEV_INIT_CLIENTS;
+	dev->hbm_state = ISHTP_HBM_START;
+	ishtp_hbm_start_req(dev);
+
+	return	0;
+}
+EXPORT_SYMBOL(ishtp_reset_compl_handler);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
new file mode 100644
index 0000000..4c494e6
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -0,0 +1,105 @@
+/*
+ * ISHTP bus definitions
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_ISHTP_CL_BUS_H
+#define _LINUX_ISHTP_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct ishtp_cl;
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_msg_hdr;
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @dev:	device pointer
+ * @ishtp_dev:	pointer to ishtp device structure to primarily to access
+ *		hw device operation callbacks and properties
+ * @fw_client:	fw_client pointer to get fw information like protocol name
+ *		max message length etc.
+ * @device_link: Link to next client in the list on a bus
+ * @event_work:	Used to schedule rx event for client
+ * @driver_data: Storage driver private data
+ * @reference_count:	Used for get/put device
+ * @event_cb:	Callback to driver to send events
+ *
+ * An ishtp_cl_device pointer is returned from ishtp_add_device()
+ * and links ISHTP bus clients to their actual host client pointer.
+ * Drivers for ISHTP devices will get an ishtp_cl_device pointer
+ * when being probed and shall use it for doing bus I/O.
+ */
+struct ishtp_cl_device {
+	struct device		dev;
+	struct ishtp_device	*ishtp_dev;
+	struct ishtp_fw_client	*fw_client;
+	struct list_head	device_link;
+	struct work_struct	event_work;
+	void			*driver_data;
+	bool			reference_count;
+	void (*event_cb)(struct ishtp_cl_device *device);
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver:	driver instance on a bus
+ * @name:	Name of the device for probe
+ * @probe:	driver callback for device probe
+ * @remove:	driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+	struct device_driver driver;
+	const char *name;
+	int (*probe)(struct ishtp_cl_device *dev);
+	int (*remove)(struct ishtp_cl_device *dev);
+	int (*reset)(struct ishtp_cl_device *dev);
+	const struct dev_pm_ops *pm;
+};
+
+int	__ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+	struct module *owner);
+#define ishtp_cl_driver_register(driver)		\
+	__ishtp_cl_driver_register(driver, THIS_MODULE)
+void	ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+
+int	ishtp_register_event_cb(struct ishtp_cl_device *device,
+	void (*read_cb)(struct ishtp_cl_device *));
+void	ishtp_put_device(struct ishtp_cl_device *);
+void	ishtp_get_device(struct ishtp_cl_device *);
+
+int	ishtp_cl_bus_init(void);
+void	ishtp_cl_bus_exit(void);
+int	ishtp_bus_new_client(struct ishtp_device *dev);
+void	ishtp_remove_all_clients(struct ishtp_device *dev);
+int	ishtp_cl_device_bind(struct ishtp_cl *cl);
+void	ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
+int	ishtp_reset_handler(struct ishtp_device *dev);
+int	ishtp_reset_compl_handler(struct ishtp_device *dev);
+void	ishtp_recv(struct ishtp_device *dev);
+
+/* Write a multi-fragment message */
+int	ishtp_send_msg(struct ishtp_device *dev,
+		       struct ishtp_msg_hdr *hdr, void *msg,
+		       void (*ipc_send_compl)(void *),
+		       void *ipc_send_compl_prm);
+
+/* Write a single-fragment message */
+int	ishtp_write_message(struct ishtp_device *dev,
+			    struct ishtp_msg_hdr *hdr,
+			    unsigned char *buf);
+
+#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
new file mode 100644
index 0000000..990aa88
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -0,0 +1,1129 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "hbm.h"
+#include "client.h"
+
+/* ishtp_read_list_flush - removes list entry belonging to cl */
+void ishtp_read_list_flush(struct ishtp_cl *cl)
+{
+	struct ishtp_cl_rb *rb;
+	struct ishtp_cl_rb *next;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
+	list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
+		if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
+			list_del(&rb->list);
+			ishtp_io_rb_free(rb);
+		}
+	spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
+}
+
+/* ishtp_io_rb_free - free ishtp_rb_private related memory */
+void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
+{
+	if (rb == NULL)
+		return;
+
+	kfree(rb->buffer.data);
+	kfree(rb);
+}
+
+/**
+ * ishtp_io_rb_init - allocate and initialize request block
+ *
+ * returns ishtp_cl_rb pointer or NULL;
+ */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
+{
+	struct ishtp_cl_rb *rb;
+
+	rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
+	if (!rb)
+		return NULL;
+
+	INIT_LIST_HEAD(&rb->list);
+	rb->cl = cl;
+	rb->buf_idx = 0;
+	return rb;
+}
+
+/* ishtp_io_rb_alloc_buf - allocate respose buffer */
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
+{
+	if (!rb)
+		return -EINVAL;
+
+	if (length == 0)
+		return 0;
+
+	rb->buffer.data = kmalloc(length, GFP_KERNEL);
+	if (!rb->buffer.data)
+		return -ENOMEM;
+
+	rb->buffer.size = length;
+	return 0;
+}
+
+/*
+ * ishtp_io_rb_recycle - re-append rb to its client's free list
+ * and send flow control if needed
+ */
+int ishtp_io_rb_recycle(struct ishtp_cl_rb *rb)
+{
+	struct ishtp_cl *cl;
+	int	rets = 0;
+	unsigned long	flags;
+
+	if (!rb || !rb->cl)
+		return	-EFAULT;
+
+	cl = rb->cl;
+	spin_lock_irqsave(&cl->free_list_spinlock, flags);
+	list_add_tail(&rb->list, &cl->free_rb_list.list);
+	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+	/*
+	 * If we returned the first buffer to empty 'free' list,
+	 * send flow control
+	 */
+	if (!cl->out_flow_ctrl_creds)
+		rets = ishtp_cl_read_start(cl);
+
+	return	rets;
+}
+EXPORT_SYMBOL(ishtp_io_rb_recycle);
+
+/* ishtp_cl_flush_queues - flushes queue lists belonging to cl */
+int ishtp_cl_flush_queues(struct ishtp_cl *cl)
+{
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	ishtp_read_list_flush(cl);
+
+	return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_flush_queues);
+
+/* ishtp_cl_init - initializes cl */
+void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
+{
+	memset(cl, 0, sizeof(struct ishtp_cl));
+	init_waitqueue_head(&cl->wait_ctrl_res);
+	spin_lock_init(&cl->free_list_spinlock);
+	spin_lock_init(&cl->in_process_spinlock);
+	spin_lock_init(&cl->tx_list_spinlock);
+	spin_lock_init(&cl->tx_free_list_spinlock);
+	spin_lock_init(&cl->fc_spinlock);
+	INIT_LIST_HEAD(&cl->link);
+	cl->dev = dev;
+
+	INIT_LIST_HEAD(&cl->free_rb_list.list);
+	INIT_LIST_HEAD(&cl->tx_list.list);
+	INIT_LIST_HEAD(&cl->tx_free_list.list);
+	INIT_LIST_HEAD(&cl->in_process_list.list);
+
+	cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
+	cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+
+	/* dma */
+	cl->last_tx_path = CL_TX_PATH_IPC;
+	cl->last_dma_acked = 1;
+	cl->last_dma_addr = NULL;
+	cl->last_ipc_acked = 1;
+}
+
+int	ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
+{
+	struct ishtp_cl_rb *rb;
+	unsigned long	flags;
+
+	/* release allocated memory - pass over free_rb_list */
+	spin_lock_irqsave(&cl->free_list_spinlock, flags);
+	while (!list_empty(&cl->free_rb_list.list)) {
+		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
+				list);
+		list_del(&rb->list);
+		kfree(rb->buffer.data);
+		kfree(rb);
+	}
+	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+	/* release allocated memory - pass over in_process_list */
+	spin_lock_irqsave(&cl->in_process_spinlock, flags);
+	while (!list_empty(&cl->in_process_list.list)) {
+		rb = list_entry(cl->in_process_list.list.next,
+				struct ishtp_cl_rb, list);
+		list_del(&rb->list);
+		kfree(rb->buffer.data);
+		kfree(rb);
+	}
+	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+	return	0;
+}
+
+int	ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
+{
+	struct ishtp_cl_tx_ring	*tx_buf;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+	/* release allocated memory - pass over tx_free_list */
+	while (!list_empty(&cl->tx_free_list.list)) {
+		tx_buf = list_entry(cl->tx_free_list.list.next,
+				    struct ishtp_cl_tx_ring, list);
+		list_del(&tx_buf->list);
+		kfree(tx_buf->send_buf.data);
+		kfree(tx_buf);
+	}
+	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+
+	spin_lock_irqsave(&cl->tx_list_spinlock, flags);
+	/* release allocated memory - pass over tx_list */
+	while (!list_empty(&cl->tx_list.list)) {
+		tx_buf = list_entry(cl->tx_list.list.next,
+				    struct ishtp_cl_tx_ring, list);
+		list_del(&tx_buf->list);
+		kfree(tx_buf->send_buf.data);
+		kfree(tx_buf);
+	}
+	spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
+
+	return	0;
+}
+
+int	ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
+{
+	size_t	len = cl->device->fw_client->props.max_msg_length;
+	int	j;
+	struct ishtp_cl_rb *rb;
+	int	ret = 0;
+	unsigned long	flags;
+
+	for (j = 0; j < cl->rx_ring_size; ++j) {
+		rb = ishtp_io_rb_init(cl);
+		if (!rb) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		ret = ishtp_io_rb_alloc_buf(rb, len);
+		if (ret)
+			goto out;
+		spin_lock_irqsave(&cl->free_list_spinlock, flags);
+		list_add_tail(&rb->list, &cl->free_rb_list.list);
+		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+	}
+
+	return	0;
+
+out:
+	dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
+	ishtp_cl_free_rx_ring(cl);
+	return	ret;
+}
+
+int	ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
+{
+	size_t	len = cl->device->fw_client->props.max_msg_length;
+	int	j;
+	unsigned long	flags;
+
+	/* Allocate pool to free Tx bufs */
+	for (j = 0; j < cl->tx_ring_size; ++j) {
+		struct ishtp_cl_tx_ring	*tx_buf;
+
+		tx_buf = kmalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
+		if (!tx_buf)
+			goto	out;
+
+		memset(tx_buf, 0, sizeof(struct ishtp_cl_tx_ring));
+		tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
+		if (!tx_buf->send_buf.data) {
+			kfree(tx_buf);
+			goto	out;
+		}
+
+		spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+		list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+		spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+	}
+	return	0;
+out:
+	dev_err(&cl->device->dev, "error in allocating Tx pool\n");
+	ishtp_cl_free_rx_ring(cl);
+	return	-ENOMEM;
+}
+
+/**
+ * ishtp_cl_allocate - allocates cl structure and sets it up.
+ * returns The allocated file or NULL on failure
+ */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+{
+	struct ishtp_cl *cl;
+
+	cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
+	if (!cl)
+		return NULL;
+
+	ishtp_cl_init(cl, dev);
+	return cl;
+}
+EXPORT_SYMBOL(ishtp_cl_allocate);
+
+void	ishtp_cl_free(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	unsigned long flags;
+
+	if (!cl)
+		return;
+
+	dev = cl->dev;
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	ishtp_cl_free_rx_ring(cl);
+	ishtp_cl_free_tx_ring(cl);
+	kfree(cl);
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_free);
+
+/**
+ * ishtp_cl_find_read_rb - find this cl's callback in the read list
+ * returns rb on success, NULL on error
+ */
+struct ishtp_cl_rb *ishtp_cl_find_read_rb(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev = cl->dev;
+	struct ishtp_cl_rb *rb = NULL;
+	unsigned long	dev_flags;
+
+	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+	list_for_each_entry(rb, &dev->read_list.list, list)
+		if (ishtp_cl_cmp_id(cl, rb->cl)) {
+			spin_unlock_irqrestore(&dev->read_list_spinlock,
+				dev_flags);
+			return rb;
+		}
+	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+	return NULL;
+}
+
+/**
+ * ishtp_cl_link: allocate host id in the host map
+ * @id - fixed host id or (-1) for generating one
+ */
+int ishtp_cl_link(struct ishtp_cl *cl, int id)
+{
+	struct ishtp_device *dev;
+	unsigned long	flags, flags_cl;
+	int	ret = 0;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	dev = cl->dev;
+
+	spin_lock_irqsave(&dev->device_lock, flags);
+
+	if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
+		ret = -EMFILE;
+		goto unlock_dev;
+	}
+
+	/* If Id is not assigned get one*/
+	if (id == ISHTP_HOST_CLIENT_ID_ANY)
+		id = find_first_zero_bit(dev->host_clients_map,
+			ISHTP_CLIENTS_MAX);
+
+	if (id >= ISHTP_CLIENTS_MAX) {
+		spin_unlock_irqrestore(&dev->device_lock, flags);
+		dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
+		return -ENOENT;
+	}
+
+	dev->open_handle_count++;
+	cl->host_client_id = id;
+	spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
+	if (dev->dev_state != ISHTP_DEV_ENABLED) {
+		ret = -ENODEV;
+		goto unlock_cl;
+	}
+	list_add_tail(&cl->link, &dev->cl_list);
+	set_bit(id, dev->host_clients_map);
+	cl->state = ISHTP_CL_INITIALIZING;
+
+unlock_cl:
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
+unlock_dev:
+	spin_unlock_irqrestore(&dev->device_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(ishtp_cl_link);
+
+/* ishtp_cl_unlink - remove fw_cl from the list */
+int ishtp_cl_unlink(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	struct ishtp_cl *pos;
+	unsigned long	flags;
+
+	/* don't shout on error exit path */
+	if (!cl || !cl->dev)
+		return 0;
+
+	dev = cl->dev;
+
+	spin_lock_irqsave(&dev->device_lock, flags);
+	if (dev->open_handle_count > 0) {
+		clear_bit(cl->host_client_id, dev->host_clients_map);
+		dev->open_handle_count--;
+	}
+	spin_unlock_irqrestore(&dev->device_lock, flags);
+
+	/*
+	 * This checks that 'cl' is actually linked into device's structure,
+	 * before attempting 'list_del'
+	 */
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(pos, &dev->cl_list, link)
+		if (cl->host_client_id == pos->host_client_id) {
+			list_del_init(&pos->link);
+			break;
+		}
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_unlink);
+
+/* ishtp_cl_disconnect - disconnect host client form the fw one */
+int ishtp_cl_disconnect(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	int err;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (cl->state != ISHTP_CL_DISCONNECTING)
+		return 0;
+
+	if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
+		dev_err(&cl->device->dev, "failed to disconnect.\n");
+		return -ENODEV;
+	}
+
+	err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+			(dev->dev_state != ISHTP_DEV_ENABLED ||
+			cl->state == ISHTP_CL_DISCONNECTED),
+			ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
+
+	/*
+	 * If FW reset arrived, this will happen. Don't check cl->,
+	 * as 'cl' may be freed already
+	 */
+	if (dev->dev_state != ISHTP_DEV_ENABLED)
+		return -ENODEV;
+
+	if (cl->state == ISHTP_CL_DISCONNECTED)
+		return 0;
+
+	return -ENODEV;
+}
+
+/**
+ * ishtp_cl_is_other_connecting - checks if other
+ * client with the same fw client id is connecting
+ * returns true if other client is connected, 0 - otherwise.
+ */
+bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	struct ishtp_cl *pos;
+	unsigned long	flags;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return false;
+
+	dev = cl->dev;
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(pos, &dev->cl_list, link) {
+		if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
+				cl->fw_client_id == pos->fw_client_id) {
+			spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+			return true;
+		}
+	}
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+	return false;
+}
+
+/* ishtp_cl_connect - connect host client to the fw one */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	if (ishtp_cl_is_other_connecting(cl))
+		return	-EBUSY;
+
+	dev = cl->dev;
+
+	if (ishtp_hbm_cl_connect_req(dev, cl))
+		return -ENODEV;
+
+	rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+				(dev->dev_state == ISHTP_DEV_ENABLED &&
+				(cl->state == ISHTP_CL_CONNECTED ||
+				 cl->state == ISHTP_CL_DISCONNECTED)),
+				ishtp_secs_to_jiffies(
+					ISHTP_CL_CONNECT_TIMEOUT));
+	/*
+	 * If FW reset arrived, this will happen. Don't check cl->,
+	 * as 'cl' may be freed already
+	 */
+	if (dev->dev_state != ISHTP_DEV_ENABLED)
+		return -EFAULT;
+
+	if (cl->state != ISHTP_CL_CONNECTED)
+		return -EFAULT;
+
+	rets = cl->status;
+	if (rets)
+		return rets;
+
+	rets = ishtp_cl_device_bind(cl);
+	if (rets) {
+		ishtp_cl_disconnect(cl);
+		return rets;
+	}
+
+	rets = ishtp_cl_alloc_rx_ring(cl);
+	if (rets) {
+		/* if failed allocation, disconnect */
+		ishtp_cl_disconnect(cl);
+		return rets;
+	}
+
+	rets = ishtp_cl_alloc_tx_ring(cl);
+	if (rets) {
+		/* if failed allocation, disconnect */
+		ishtp_cl_free_rx_ring(cl);
+		ishtp_cl_disconnect(cl);
+		return rets;
+	}
+
+	/* Upon successful connection and allocation, emit flow-control */
+	rets = ishtp_cl_read_start(cl);
+	return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_connect);
+
+/* ishtp_cl_read_start - start to read client message */
+int ishtp_cl_read_start(struct ishtp_cl *cl)
+{
+	struct ishtp_device *dev;
+	struct ishtp_cl_rb *rb;
+	int rets;
+	int i;
+	unsigned long	flags;
+	unsigned long	dev_flags;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (cl->state != ISHTP_CL_CONNECTED)
+		return -ENODEV;
+
+	if (dev->dev_state != ISHTP_DEV_ENABLED)
+		return -ENODEV;
+
+	i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+	if (i < 0) {
+		dev_err(&cl->device->dev, "no such fw client %d\n",
+			cl->fw_client_id);
+		return -ENODEV;
+	}
+
+	/* The current rb is the head of the free rb list */
+	spin_lock_irqsave(&cl->free_list_spinlock, flags);
+	if (list_empty(&cl->free_rb_list.list)) {
+		dev_warn(&cl->device->dev,
+			 "[ishtp-ish] Rx buffers pool is empty\n");
+		rets = -ENOMEM;
+		rb = NULL;
+		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+		goto out;
+	}
+	rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
+	list_del_init(&rb->list);
+	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+	rb->cl = cl;
+	rb->buf_idx = 0;
+
+	INIT_LIST_HEAD(&rb->list);
+	rets = 0;
+
+	/*
+	 * This must be BEFORE sending flow control -
+	 * response in ISR may come too fast...
+	 */
+	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+	list_add_tail(&rb->list, &dev->read_list.list);
+	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+	if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
+		rets = -ENODEV;
+		goto out;
+	}
+out:
+	/* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
+	if (rets && rb) {
+		spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+		list_del(&rb->list);
+		spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+
+		spin_lock_irqsave(&cl->free_list_spinlock, flags);
+		list_add_tail(&rb->list, &cl->free_rb_list.list);
+		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+	}
+	return rets;
+}
+
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
+{
+	struct ishtp_device	*dev;
+	int	id;
+	struct ishtp_cl_tx_ring	*cl_msg;
+	int	have_msg_to_send = 0;
+	unsigned long	tx_flags, tx_free_flags;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (cl->state != ISHTP_CL_CONNECTED) {
+		++cl->err_send_msg;
+		return -EPIPE;
+	}
+
+	if (dev->dev_state != ISHTP_DEV_ENABLED) {
+		++cl->err_send_msg;
+		return -ENODEV;
+	}
+
+	/* Check if we have fw client device */
+	id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+	if (id < 0) {
+		++cl->err_send_msg;
+		return -ENOENT;
+	}
+
+	if (length > dev->fw_clients[id].props.max_msg_length) {
+		++cl->err_send_msg;
+		return -EMSGSIZE;
+	}
+
+	/* No free bufs */
+	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+	if (list_empty(&cl->tx_free_list.list)) {
+		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+			tx_free_flags);
+		++cl->err_send_msg;
+		return	-ENOMEM;
+	}
+
+	cl_msg = list_first_entry(&cl->tx_free_list.list,
+		struct ishtp_cl_tx_ring, list);
+	if (!cl_msg->send_buf.data) {
+		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+			tx_free_flags);
+		return	-EIO;
+		/* Should not happen, as free list is pre-allocated */
+	}
+	/*
+	 * This is safe, as 'length' is already checked for not exceeding
+	 * max ISHTP message size per client
+	 */
+	list_del_init(&cl_msg->list);
+	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+	memcpy(cl_msg->send_buf.data, buf, length);
+	cl_msg->send_buf.size = length;
+	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+	have_msg_to_send = !list_empty(&cl->tx_list.list);
+	list_add_tail(&cl_msg->list, &cl->tx_list.list);
+	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+	if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
+		ishtp_cl_send_msg(dev, cl);
+
+	return	0;
+}
+EXPORT_SYMBOL(ishtp_cl_send);
+
+/* ishtp_cl_read_complete - processes completed operation for a client */
+void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
+{
+	unsigned long	flags;
+	int	schedule_work_flag = 0;
+	struct ishtp_cl	*cl = rb->cl;
+
+	spin_lock_irqsave(&cl->in_process_spinlock, flags);
+	/*
+	 * if in-process list is empty, then need to schedule
+	 * the processing thread
+	 */
+	schedule_work_flag = list_empty(&cl->in_process_list.list);
+	list_add_tail(&rb->list, &cl->in_process_list.list);
+	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+
+	if (schedule_work_flag)
+		ishtp_cl_bus_rx_event(cl->device);
+}
+
+/* ishtp_cl_all_disconnect - disconnect forcefully all connected clients */
+void ishtp_cl_all_disconnect(struct ishtp_device *dev)
+{
+	struct ishtp_cl *cl;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(cl, &dev->cl_list, link) {
+		cl->state = ISHTP_CL_DISCONNECTED;
+		cl->ishtp_flow_ctrl_creds = 0;
+	}
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+static void	ipc_tx_callback(void *prm)
+{
+	struct ishtp_cl	*cl = prm;
+	struct ishtp_cl_tx_ring	*cl_msg;
+	size_t	rem;
+	struct ishtp_device	*dev = (cl ? cl->dev : NULL);
+	struct ishtp_msg_hdr	ishtp_hdr;
+	unsigned long	tx_flags, tx_free_flags;
+	unsigned char	*pmsg;
+
+	if (!dev)
+		return;
+
+	/*
+	 * Other conditions if some critical error has
+	 * occurred before this callback is called
+	 */
+	if (dev->dev_state != ISHTP_DEV_ENABLED)
+		return;
+
+	if (cl->state != ISHTP_CL_CONNECTED)
+		return;
+
+	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+	if (list_empty(&cl->tx_list.list)) {
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		return;
+	}
+
+	if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		return;
+	}
+
+	if (!cl->sending) {
+		--cl->ishtp_flow_ctrl_creds;
+		cl->last_ipc_acked = 0;
+		cl->last_tx_path = CL_TX_PATH_IPC;
+		cl->sending = 1;
+	}
+
+	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+			    list);
+	rem = cl_msg->send_buf.size - cl->tx_offs;
+
+	ishtp_hdr.host_addr = cl->host_client_id;
+	ishtp_hdr.fw_addr = cl->fw_client_id;
+	ishtp_hdr.reserved = 0;
+	pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+	if (rem <= dev->mtu) {
+		ishtp_hdr.length = rem;
+		ishtp_hdr.msg_complete = 1;
+		cl->sending = 0;
+		list_del_init(&cl_msg->list);	/* Must be before write */
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		/* Submit to IPC queue with no callback */
+		ishtp_write_message(dev, &ishtp_hdr, pmsg);
+		spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+		list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+			tx_free_flags);
+	} else {
+		/* Send IPC fragment */
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		cl->tx_offs += dev->mtu;
+		ishtp_hdr.length = dev->mtu;
+		ishtp_hdr.msg_complete = 0;
+		ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+	}
+}
+
+static void	ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+	struct ishtp_cl *cl)
+{
+	/* If last DMA message wasn't acked yet, leave this one in Tx queue */
+	if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
+		return;
+
+	cl->tx_offs = 0;
+	ipc_tx_callback(cl);
+	++cl->send_msg_cnt_ipc;
+}
+
+static void	ishtp_cl_send_msg_dma(struct ishtp_device *dev,
+	struct ishtp_cl *cl)
+{
+	struct ishtp_msg_hdr	hdr;
+	struct dma_xfer_hbm	dma_xfer;
+	unsigned char	*msg_addr;
+	int off;
+	struct ishtp_cl_tx_ring	*cl_msg;
+	unsigned long tx_flags, tx_free_flags;
+
+	/* If last IPC message wasn't acked yet, leave this one in Tx queue */
+	if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
+		return;
+
+	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+	if (list_empty(&cl->tx_list.list)) {
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		return;
+	}
+
+	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+		list);
+
+	msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
+	if (!msg_addr) {
+		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+		if (dev->transfer_path == CL_TX_PATH_DEFAULT)
+			ishtp_cl_send_msg_ipc(dev, cl);
+		return;
+	}
+
+	list_del_init(&cl_msg->list);	/* Must be before write */
+	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+	--cl->ishtp_flow_ctrl_creds;
+	cl->last_dma_acked = 0;
+	cl->last_dma_addr = msg_addr;
+	cl->last_tx_path = CL_TX_PATH_DMA;
+
+	/* write msg to dma buf */
+	memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+
+	/* send dma_xfer hbm msg */
+	off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
+	ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
+	dma_xfer.hbm = DMA_XFER;
+	dma_xfer.fw_client_id = cl->fw_client_id;
+	dma_xfer.host_client_id = cl->host_client_id;
+	dma_xfer.reserved = 0;
+	dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
+	dma_xfer.msg_length = cl_msg->send_buf.size;
+	dma_xfer.reserved2 = 0;
+	ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
+	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+	list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+	++cl->send_msg_cnt_dma;
+}
+
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+	if (dev->transfer_path == CL_TX_PATH_DMA)
+		ishtp_cl_send_msg_dma(dev, cl);
+	else
+		ishtp_cl_send_msg_ipc(dev, cl);
+}
+EXPORT_SYMBOL(ishtp_cl_send_msg);
+
+/*
+ * Receive and dispatch ISHTP client messages
+ *
+ * (!) ISR context
+ */
+void	recv_ishtp_cl_msg(struct ishtp_device *dev,
+		struct ishtp_msg_hdr *ishtp_hdr)
+{
+	struct ishtp_cl *cl;
+	struct ishtp_cl_rb *rb;
+	struct ishtp_cl_rb *new_rb;
+	unsigned char *buffer = NULL;
+	struct ishtp_cl_rb *complete_rb = NULL;
+	unsigned long	dev_flags;
+	unsigned long	flags;
+	int	rb_count;
+
+	if (ishtp_hdr->reserved) {
+		dev_err(dev->devc, "corrupted message header.\n");
+		goto	eoi;
+	}
+
+	if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
+		dev_err(dev->devc,
+			"ISHTP message length in hdr exceeds IPC MTU\n");
+		goto	eoi;
+	}
+
+	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+	rb_count = -1;
+	list_for_each_entry(rb, &dev->read_list.list, list) {
+		++rb_count;
+		cl = rb->cl;
+		if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
+				cl->fw_client_id == ishtp_hdr->fw_addr) ||
+				!(cl->state == ISHTP_CL_CONNECTED))
+			continue;
+
+		 /* If no Rx buffer is allocated, disband the rb */
+		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+			spin_unlock_irqrestore(&dev->read_list_spinlock,
+				dev_flags);
+			dev_err(&cl->device->dev,
+				"Rx buffer is not allocated.\n");
+			list_del(&rb->list);
+			ishtp_io_rb_free(rb);
+			cl->status = -ENOMEM;
+			goto	eoi;
+		}
+
+		/*
+		 * If message buffer overflown (exceeds max. client msg
+		 * size, drop message and return to free buffer.
+		 * Do we need to disconnect such a client? (We don't send
+		 * back FC, so communication will be stuck anyway)
+		 */
+		if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
+			spin_unlock_irqrestore(&dev->read_list_spinlock,
+				dev_flags);
+			dev_err(&cl->device->dev,
+				"message overflow. size %d len %d idx %ld\n",
+				rb->buffer.size, ishtp_hdr->length,
+				rb->buf_idx);
+			list_del(&rb->list);
+			ishtp_io_rb_recycle(rb);
+			cl->status = -EIO;
+			goto	eoi;
+		}
+
+		buffer = rb->buffer.data + rb->buf_idx;
+		dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
+
+		rb->buf_idx += ishtp_hdr->length;
+		if (ishtp_hdr->msg_complete) {
+			/* Last fragment in message - it's complete */
+			cl->status = 0;
+			list_del(&rb->list);
+			complete_rb = rb;
+
+			--cl->out_flow_ctrl_creds;
+			/*
+			 * the whole msg arrived, send a new FC, and add a new
+			 * rb buffer for the next coming msg
+			 */
+			spin_lock_irqsave(&cl->free_list_spinlock, flags);
+
+			if (!list_empty(&cl->free_rb_list.list)) {
+				new_rb = list_entry(cl->free_rb_list.list.next,
+					struct ishtp_cl_rb, list);
+				list_del_init(&new_rb->list);
+				spin_unlock_irqrestore(&cl->free_list_spinlock,
+					flags);
+				new_rb->cl = cl;
+				new_rb->buf_idx = 0;
+				INIT_LIST_HEAD(&new_rb->list);
+				list_add_tail(&new_rb->list,
+					&dev->read_list.list);
+
+				ishtp_hbm_cl_flow_control_req(dev, cl);
+			} else {
+				spin_unlock_irqrestore(&cl->free_list_spinlock,
+					flags);
+			}
+		}
+		/* One more fragment in message (even if this was last) */
+		++cl->recv_msg_num_frags;
+
+		/*
+		 * We can safely break here (and in BH too),
+		 * a single input message can go only to a single request!
+		 */
+		break;
+	}
+
+	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+	/* If it's nobody's message, just read and discard it */
+	if (!buffer) {
+		uint8_t	rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+		dev_err(dev->devc, "Dropped Rx msg - no request\n");
+		dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+		goto	eoi;
+	}
+
+	if (complete_rb) {
+		getnstimeofday(&cl->ts_rx);
+		++cl->recv_msg_cnt_ipc;
+		ishtp_cl_read_complete(complete_rb);
+	}
+eoi:
+	return;
+}
+
+/*
+ * Receive and dispatch ISHTP client dma message
+ *
+ * (!) ISR context
+ */
+void	recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+	struct dma_xfer_hbm *hbm)
+{
+	struct ishtp_cl *cl;
+	struct ishtp_cl_rb *rb;
+	struct ishtp_cl_rb *new_rb;
+	unsigned char *buffer = NULL;
+	struct ishtp_cl_rb *complete_rb = NULL;
+	unsigned long	dev_flags;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+	list_for_each_entry(rb, &dev->read_list.list, list) {
+		cl = rb->cl;
+		if (!cl || !(cl->host_client_id == hbm->host_client_id &&
+				cl->fw_client_id == hbm->fw_client_id) ||
+				!(cl->state == ISHTP_CL_CONNECTED))
+			continue;
+
+		/*
+		 * If no Rx buffer is allocated, disband the rb
+		 */
+		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+			spin_unlock_irqrestore(&dev->read_list_spinlock,
+				dev_flags);
+			dev_err(&cl->device->dev,
+				"response buffer is not allocated.\n");
+			list_del(&rb->list);
+			ishtp_io_rb_free(rb);
+			cl->status = -ENOMEM;
+			goto	eoi;
+		}
+
+		/*
+		 * If message buffer overflown (exceeds max. client msg
+		 * size, drop message and return to free buffer.
+		 * Do we need to disconnect such a client? (We don't send
+		 * back FC, so communication will be stuck anyway)
+		 */
+		if (rb->buffer.size < hbm->msg_length) {
+			spin_unlock_irqrestore(&dev->read_list_spinlock,
+				dev_flags);
+			dev_err(&cl->device->dev,
+				"message overflow. size %d len %d idx %ld\n",
+				rb->buffer.size, hbm->msg_length, rb->buf_idx);
+			list_del(&rb->list);
+			ishtp_io_rb_recycle(rb);
+			cl->status = -EIO;
+			goto	eoi;
+		}
+
+		buffer = rb->buffer.data;
+		memcpy(buffer, msg, hbm->msg_length);
+		rb->buf_idx = hbm->msg_length;
+
+		/* Last fragment in message - it's complete */
+		cl->status = 0;
+		list_del(&rb->list);
+		complete_rb = rb;
+
+		--cl->out_flow_ctrl_creds;
+		/*
+		 * the whole msg arrived, send a new FC, and add a new
+		 * rb buffer for the next coming msg
+		 */
+		spin_lock_irqsave(&cl->free_list_spinlock, flags);
+
+		if (!list_empty(&cl->free_rb_list.list)) {
+			new_rb = list_entry(cl->free_rb_list.list.next,
+				struct ishtp_cl_rb, list);
+			list_del_init(&new_rb->list);
+			spin_unlock_irqrestore(&cl->free_list_spinlock,
+				flags);
+			new_rb->cl = cl;
+			new_rb->buf_idx = 0;
+			INIT_LIST_HEAD(&new_rb->list);
+			list_add_tail(&new_rb->list,
+				&dev->read_list.list);
+
+			ishtp_hbm_cl_flow_control_req(dev, cl);
+		} else {
+			spin_unlock_irqrestore(&cl->free_list_spinlock,
+				flags);
+		}
+
+		/* One more fragment in message (this is always last) */
+		++cl->recv_msg_num_frags;
+
+		/*
+		 * We can safely break here (and in BH too),
+		 * a single input message can go only to a single request!
+		 */
+		break;
+	}
+
+	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+	/* If it's nobody's message, just read and discard it */
+	if (!buffer) {
+		dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
+		goto	eoi;
+	}
+
+	if (complete_rb) {
+		getnstimeofday(&cl->ts_rx);
+		++cl->recv_msg_cnt_dma;
+		ishtp_cl_read_complete(complete_rb);
+	}
+eoi:
+	return;
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
new file mode 100644
index 0000000..b34a282
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -0,0 +1,194 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_CLIENT_H_
+#define _ISHTP_CLIENT_H_
+
+#include <linux/types.h>
+#include "ishtp-dev.h"
+
+/* Client state */
+enum cl_state {
+	ISHTP_CL_INITIALIZING = 0,
+	ISHTP_CL_CONNECTING,
+	ISHTP_CL_CONNECTED,
+	ISHTP_CL_DISCONNECTING,
+	ISHTP_CL_DISCONNECTED
+};
+
+/* Tx and Rx ring size */
+#define	CL_DEF_RX_RING_SIZE	2
+#define	CL_DEF_TX_RING_SIZE	2
+#define	CL_MAX_RX_RING_SIZE	32
+#define	CL_MAX_TX_RING_SIZE	32
+
+#define DMA_SLOT_SIZE		4096
+/* Number of IPC fragments after which it's worth sending via DMA */
+#define	DMA_WORTH_THRESHOLD	3
+
+/* DMA/IPC Tx paths. Other the default means enforcement */
+#define	CL_TX_PATH_DEFAULT	0
+#define	CL_TX_PATH_IPC		1
+#define	CL_TX_PATH_DMA		2
+
+/* Client Tx buffer list entry */
+struct ishtp_cl_tx_ring {
+	struct list_head	list;
+	struct ishtp_msg_data	send_buf;
+};
+
+/* ISHTP client instance */
+struct ishtp_cl {
+	struct list_head	link;
+	struct ishtp_device	*dev;
+	enum cl_state		state;
+	int			status;
+
+	/* Link to ISHTP bus device */
+	struct ishtp_cl_device	*device;
+
+	/* ID of client connected */
+	uint8_t	host_client_id;
+	uint8_t	fw_client_id;
+	uint8_t	ishtp_flow_ctrl_creds;
+	uint8_t	out_flow_ctrl_creds;
+
+	/* dma */
+	int	last_tx_path;
+	/* 0: ack wasn't received,1:ack was received */
+	int	last_dma_acked;
+	unsigned char	*last_dma_addr;
+	/* 0: ack wasn't received,1:ack was received */
+	int	last_ipc_acked;
+
+	/* Rx ring buffer pool */
+	unsigned int	rx_ring_size;
+	struct ishtp_cl_rb	free_rb_list;
+	spinlock_t	free_list_spinlock;
+	/* Rx in-process list */
+	struct ishtp_cl_rb	in_process_list;
+	spinlock_t	in_process_spinlock;
+
+	/* Client Tx buffers list */
+	unsigned int	tx_ring_size;
+	struct ishtp_cl_tx_ring	tx_list, tx_free_list;
+	spinlock_t	tx_list_spinlock;
+	spinlock_t	tx_free_list_spinlock;
+	size_t	tx_offs;	/* Offset in buffer at head of 'tx_list' */
+
+	/**
+	 * if we get a FC, and the list is not empty, we must know whether we
+	 * are at the middle of sending.
+	 * if so -need to increase FC counter, otherwise, need to start sending
+	 * the first msg in list
+	 * (!)This is for counting-FC implementation only. Within single-FC the
+	 * other party may NOT send FC until it receives complete message
+	 */
+	int	sending;
+
+	/* Send FC spinlock */
+	spinlock_t	fc_spinlock;
+
+	/* wait queue for connect and disconnect response from FW */
+	wait_queue_head_t	wait_ctrl_res;
+
+	/* Error stats */
+	unsigned int	err_send_msg;
+	unsigned int	err_send_fc;
+
+	/* Send/recv stats */
+	unsigned int	send_msg_cnt_ipc;
+	unsigned int	send_msg_cnt_dma;
+	unsigned int	recv_msg_cnt_ipc;
+	unsigned int	recv_msg_cnt_dma;
+	unsigned int	recv_msg_num_frags;
+	unsigned int	ishtp_flow_ctrl_cnt;
+	unsigned int	out_flow_ctrl_cnt;
+
+	/* Rx msg ... out FC timing */
+	struct timespec ts_rx;
+	struct timespec ts_out_fc;
+	struct timespec ts_max_fc_delay;
+	void *client_data;
+};
+
+int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, uuid_le *uuid);
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
+
+/*
+ * ISHTP IO Functions
+ */
+struct ishtp_cl_rb	*ishtp_io_rb_init(struct ishtp_cl *cl);
+void	ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
+int	ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
+int	ishtp_io_rb_recycle(struct ishtp_cl_rb *rb);
+
+/*
+ * ISHTP Host Client Functions
+ */
+struct ishtp_cl	*ishtp_cl_allocate(struct ishtp_device *dev);
+void	ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev);
+void	ishtp_cl_free(struct ishtp_cl *cl);
+
+int	ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
+int	ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
+int	ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
+int	ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+
+int	ishtp_cl_link(struct ishtp_cl *cl, int id);
+int	ishtp_cl_unlink(struct ishtp_cl *cl);
+
+int	ishtp_cl_flush_queues(struct ishtp_cl *cl);
+struct ishtp_cl_rb	*ishtp_cl_find_read_rb(struct ishtp_cl *cl);
+void	ishtp_read_list_flush(struct ishtp_cl *cl);
+
+/*
+ *  ISHTP input output function prototype
+ */
+bool	ishtp_cl_is_other_connecting(struct ishtp_cl *cl);
+int	ishtp_cl_disconnect(struct ishtp_cl *cl);
+int	ishtp_cl_connect(struct ishtp_cl *cl);
+int	ishtp_cl_read_start(struct ishtp_cl *cl);
+int	ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+void	ishtp_cl_read_complete(struct ishtp_cl_rb *rb);
+void	ishtp_cl_all_disconnect(struct ishtp_device *dev);
+void	ishtp_cl_all_read_wakeup(struct ishtp_device *dev);
+void	ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
+void	recv_ishtp_cl_msg(struct ishtp_device *dev,
+			  struct ishtp_msg_hdr *ishtp_hdr);
+void	recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+			      struct dma_xfer_hbm *hbm);
+void	ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
+void	ishtp_cl_free_dma_buf(struct ishtp_device *dev);
+void	*ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+				   uint32_t size);
+void	ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+				       void *msg_addr,
+				       uint8_t size);
+
+/**
+ * ishtp_cl_cmp_id - tells if file private data have same id
+ * returns true  - if ids are the same and not NULL
+ */
+static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
+				   const struct ishtp_cl *cl2)
+{
+	return cl1 && cl2 &&
+		(cl1->host_client_id == cl2->host_client_id) &&
+		(cl1->fw_client_id == cl2->fw_client_id);
+}
+
+#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
new file mode 100644
index 0000000..0fed05d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -0,0 +1,178 @@
+/*
+ * ISHTP DMA I/F functions
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "ishtp-dev.h"
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
+ *
+ * @dev: ishtp device
+ *
+ * Allocate RX and TX DMA buffer once during bus setup.
+ * It allocates 1MB, RX and TX DMA buffer, which are divided
+ * into slots.
+ */
+void	ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
+{
+	dma_addr_t	h;
+
+	if (dev->ishtp_host_dma_tx_buf)
+		return;
+
+	dev->ishtp_host_dma_tx_buf_size = 1024*1024;
+	dev->ishtp_host_dma_rx_buf_size = 1024*1024;
+
+	/* Allocate Tx buffer and init usage bitmap */
+	dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
+					dev->ishtp_host_dma_tx_buf_size,
+					&h, GFP_KERNEL);
+	if (dev->ishtp_host_dma_tx_buf)
+		dev->ishtp_host_dma_tx_buf_phys = h;
+
+	dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
+						DMA_SLOT_SIZE;
+
+	dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
+					sizeof(uint8_t),
+					GFP_KERNEL);
+	spin_lock_init(&dev->ishtp_dma_tx_lock);
+
+	/* Allocate Rx buffer */
+	dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
+					dev->ishtp_host_dma_rx_buf_size,
+					 &h, GFP_KERNEL);
+
+	if (dev->ishtp_host_dma_rx_buf)
+		dev->ishtp_host_dma_rx_buf_phys = h;
+}
+
+/**
+ * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
+ *
+ * @dev: ishtp device
+ *
+ * Free DMA buffer when all clients are released. This is
+ * only happens during error path in ISH built in driver
+ * model
+ */
+void	ishtp_cl_free_dma_buf(struct ishtp_device *dev)
+{
+	dma_addr_t	h;
+
+	if (dev->ishtp_host_dma_tx_buf) {
+		h = dev->ishtp_host_dma_tx_buf_phys;
+		dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
+				  dev->ishtp_host_dma_tx_buf, h);
+	}
+
+	if (dev->ishtp_host_dma_rx_buf) {
+		h = dev->ishtp_host_dma_rx_buf_phys;
+		dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
+				  dev->ishtp_host_dma_rx_buf, h);
+	}
+
+	kfree(dev->ishtp_dma_tx_map);
+	dev->ishtp_host_dma_tx_buf = NULL;
+	dev->ishtp_host_dma_rx_buf = NULL;
+	dev->ishtp_dma_tx_map = NULL;
+}
+
+/*
+ * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
+ *
+ * @dev:	ishtp device
+ * @size:	Size of memory to get
+ *
+ * Find and return free address of "size" bytes in dma tx buffer.
+ * the function will mark this address as "in-used" memory.
+ * Returns NULL when no free buffer
+ */
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+				uint32_t size)
+{
+	unsigned long	flags;
+	int i, j, free;
+	/* additional slot is needed if there is rem */
+	int required_slots = (size / DMA_SLOT_SIZE)
+		+ 1 * (size % DMA_SLOT_SIZE != 0);
+
+	spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+	for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+		free = 1;
+		for (j = 0; j < required_slots; j++)
+			if (dev->ishtp_dma_tx_map[i+j]) {
+				free = 0;
+				i += j;
+				break;
+			}
+		if (free) {
+			/* mark memory as "caught" */
+			for (j = 0; j < required_slots; j++)
+				dev->ishtp_dma_tx_map[i+j] = 1;
+			spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+			return (i * DMA_SLOT_SIZE) +
+				(unsigned char *)dev->ishtp_host_dma_tx_buf;
+		}
+	}
+	spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+	dev_err(dev->devc, "No free DMA buffer to send msg\n");
+	return NULL;
+}
+
+/*
+ * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
+ *
+ * @dev:	ishtp device
+ * @msg_addr:	message address of slot
+ * @size:	Size of memory to get
+ *
+ * Release_dma_acked_mem - returnes the acked memory to free list.
+ * (from msg_addr, size bytes long)
+ */
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+				    void *msg_addr,
+				    uint8_t size)
+{
+	unsigned long	flags;
+	int acked_slots = (size / DMA_SLOT_SIZE)
+		+ 1 * (size % DMA_SLOT_SIZE != 0);
+	int i, j;
+
+	if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
+		dev_err(dev->devc, "Bad DMA Tx ack address\n");
+		return;
+	}
+
+	i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+	spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+	for (j = 0; j < acked_slots; j++) {
+		if ((i + j) >= dev->ishtp_dma_num_slots ||
+					!dev->ishtp_dma_tx_map[i+j]) {
+			/* no such slot, or memory is already free */
+			spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+			dev_err(dev->devc, "Bad DMA Tx ack address\n");
+			return;
+		}
+		dev->ishtp_dma_tx_map[i+j] = 0;
+	}
+	spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
new file mode 100644
index 0000000..662bfaa
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -0,0 +1,911 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/* ishtp_hbm_fw_cl_allocate - allocates storage for fw clients */
+static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
+{
+	struct ishtp_fw_client *clients;
+	int b;
+
+	/* count how many ISH clients we have */
+	for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
+		dev->fw_clients_num++;
+
+	if (dev->fw_clients_num <= 0)
+		return;
+
+	/* allocate storage for fw clients representation */
+	clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
+			  GFP_KERNEL);
+	if (!clients) {
+		dev->dev_state = ISHTP_DEV_RESETTING;
+		ish_hw_reset(dev);
+		return;
+	}
+	dev->fw_clients = clients;
+}
+
+/**
+ * ishtp_hbm_cl_hdr - construct client hbm header
+ * @cl: client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
+	void *buf, size_t len)
+{
+	struct ishtp_hbm_cl_cmd *cmd = buf;
+
+	memset(cmd, 0, len);
+
+	cmd->hbm_cmd = hbm_cmd;
+	cmd->host_addr = cl->host_client_id;
+	cmd->fw_addr = cl->fw_client_id;
+}
+
+/* ishtp_hbm_cl_addr_equal - tells if they have the same address */
+static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
+{
+	struct ishtp_hbm_cl_cmd *cmd = buf;
+
+	return cl->host_client_id == cmd->host_addr &&
+		cl->fw_client_id == cmd->fw_addr;
+}
+
+int ishtp_hbm_start_wait(struct ishtp_device *dev)
+{
+	int ret;
+
+	if (dev->hbm_state > ISHTP_HBM_START)
+		return 0;
+
+	dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
+		dev->hbm_state);
+	ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
+					dev->hbm_state >= ISHTP_HBM_STARTED,
+					(ISHTP_INTEROP_TIMEOUT * HZ));
+
+	dev_dbg(dev->devc,
+		"Woke up from waiting for ishtp start. hbm_state=%08X\n",
+		dev->hbm_state);
+
+	if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
+		dev->hbm_state = ISHTP_HBM_IDLE;
+		dev_err(dev->devc,
+		"waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
+			ret, dev->hbm_state);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/* ishtp_hbm_start_req - sends start request message */
+int ishtp_hbm_start_req(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	struct hbm_host_version_request *start_req;
+	const size_t len = sizeof(struct hbm_host_version_request);
+
+	ishtp_hbm_hdr(ishtp_hdr, len);
+
+	/* host start message */
+	start_req = (struct hbm_host_version_request *)data;
+	memset(start_req, 0, len);
+	start_req->hbm_cmd = HOST_START_REQ_CMD;
+	start_req->host_version.major_version = HBM_MAJOR_VERSION;
+	start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+	/*
+	 * (!) Response to HBM start may be so quick that this thread would get
+	 * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
+	 * So set it at first, change back to ISHTP_HBM_IDLE upon failure
+	 */
+	dev->hbm_state = ISHTP_HBM_START;
+	if (ishtp_write_message(dev, ishtp_hdr, data)) {
+		dev_err(dev->devc, "version message send failed\n");
+		dev->dev_state = ISHTP_DEV_RESETTING;
+		dev->hbm_state = ISHTP_HBM_IDLE;
+		ish_hw_reset(dev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ishtp_hbm_start_req);
+
+/* ishtp_hbm_enum_clients_req - sends enumeration client request message */
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	struct hbm_host_enum_request *enum_req;
+	const size_t len = sizeof(struct hbm_host_enum_request);
+
+	/* enumerate clients */
+	ishtp_hbm_hdr(ishtp_hdr, len);
+
+	enum_req = (struct hbm_host_enum_request *)data;
+	memset(enum_req, 0, len);
+	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+	if (ishtp_write_message(dev, ishtp_hdr, data)) {
+		dev->dev_state = ISHTP_DEV_RESETTING;
+		dev_err(dev->devc, "enumeration request send failed\n");
+		ish_hw_reset(dev);
+	}
+	dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
+}
+
+/* ishtp_hbm_prop_requsest - request property for a single client */
+static int ishtp_hbm_prop_req(struct ishtp_device *dev)
+{
+
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	struct hbm_props_request *prop_req;
+	const size_t len = sizeof(struct hbm_props_request);
+	unsigned long next_client_index;
+	uint8_t client_num;
+
+	client_num = dev->fw_client_presentation_num;
+
+	next_client_index = find_next_bit(dev->fw_clients_map,
+		ISHTP_CLIENTS_MAX, dev->fw_client_index);
+
+	/* We got all client properties */
+	if (next_client_index == ISHTP_CLIENTS_MAX) {
+		dev->hbm_state = ISHTP_HBM_WORKING;
+		dev->dev_state = ISHTP_DEV_ENABLED;
+
+		for (dev->fw_client_presentation_num = 1;
+			dev->fw_client_presentation_num < client_num + 1;
+				++dev->fw_client_presentation_num)
+			/* Add new client device */
+			ishtp_bus_new_client(dev);
+		return 0;
+	}
+
+	dev->fw_clients[client_num].client_id = next_client_index;
+
+	ishtp_hbm_hdr(ishtp_hdr, len);
+	prop_req = (struct hbm_props_request *)data;
+
+	memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+	prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+	prop_req->address = next_client_index;
+
+	if (ishtp_write_message(dev, ishtp_hdr, data)) {
+		dev->dev_state = ISHTP_DEV_RESETTING;
+		dev_err(dev->devc, "properties request send failed\n");
+		ish_hw_reset(dev);
+		return -EIO;
+	}
+
+	dev->fw_client_index = next_client_index;
+
+	return 0;
+}
+
+/* ishtp_hbm_stop_req - send stop request message */
+static void ishtp_hbm_stop_req(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	struct hbm_host_stop_request *req;
+	const size_t len = sizeof(struct hbm_host_stop_request);
+
+	ishtp_hbm_hdr(ishtp_hdr, len);
+	req = (struct hbm_host_stop_request *)data;
+
+	memset(req, 0, sizeof(struct hbm_host_stop_request));
+	req->hbm_cmd = HOST_STOP_REQ_CMD;
+	req->reason = DRIVER_STOP_REQUEST;
+
+	ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/* ishtp_hbm_cl_flow_control_req - sends flow control request */
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+				  struct ishtp_cl *cl)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	const size_t len = sizeof(struct hbm_flow_control);
+	int	rv;
+	unsigned int	num_frags;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&cl->fc_spinlock, flags);
+	ishtp_hbm_hdr(ishtp_hdr, len);
+	ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, data, len);
+
+	/*
+	 * Sync possible race when RB recycle and packet receive paths
+	 * both try to send an out FC
+	 */
+	if (cl->out_flow_ctrl_creds) {
+		spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+		return	0;
+	}
+
+	num_frags = cl->recv_msg_num_frags;
+	cl->recv_msg_num_frags = 0;
+
+	rv = ishtp_write_message(dev, ishtp_hdr, data);
+	if (!rv) {
+		++cl->out_flow_ctrl_creds;
+		++cl->out_flow_ctrl_cnt;
+		getnstimeofday(&cl->ts_out_fc);
+		if (cl->ts_rx.tv_sec && cl->ts_rx.tv_nsec) {
+			struct timespec ts_diff;
+
+			ts_diff = timespec_sub(cl->ts_out_fc, cl->ts_rx);
+			if (timespec_compare(&ts_diff, &cl->ts_max_fc_delay)
+					> 0)
+				cl->ts_max_fc_delay = ts_diff;
+		}
+	} else {
+		++cl->err_send_fc;
+	}
+
+	spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+	return	rv;
+}
+EXPORT_SYMBOL(ishtp_hbm_cl_flow_control_req);
+
+/* ishtp_hbm_cl_disconnect_req - sends disconnect message to fw */
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	const size_t len = sizeof(struct hbm_client_connect_request);
+
+	ishtp_hbm_hdr(ishtp_hdr, len);
+	ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, data, len);
+
+	return ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/*
+ * ishtp_hbm_cl_disconnect_res - disconnect response from FW
+ *
+ * @rs: disconnect response bus message
+ */
+static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
+	struct hbm_client_connect_response *rs)
+{
+	struct ishtp_cl *cl = NULL;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(cl, &dev->cl_list, link) {
+		if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
+			cl->state = ISHTP_CL_DISCONNECTED;
+			break;
+		}
+	}
+	if (cl)
+		wake_up_interruptible(&cl->wait_ctrl_res);
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/* ishtp_hbm_cl_connect_req - send connection request to specific fw client */
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[128];
+	struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+	const size_t len = sizeof(struct hbm_client_connect_request);
+
+	ishtp_hbm_hdr(ishtp_hdr, len);
+	ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, data, len);
+
+	return ishtp_write_message(dev, ishtp_hdr, data);
+}
+EXPORT_SYMBOL(ishtp_hbm_cl_connect_req);
+
+/**
+ * ishtp_hbm_cl_connect_res - connect resposne from the FW
+ *
+ * @rs: connect response bus message
+ */
+static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
+	struct hbm_client_connect_response *rs)
+{
+	struct ishtp_cl *cl = NULL;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(cl, &dev->cl_list, link) {
+		if (ishtp_hbm_cl_addr_equal(cl, rs)) {
+			if (!rs->status) {
+				cl->state = ISHTP_CL_CONNECTED;
+				cl->status = 0;
+			} else {
+				cl->state = ISHTP_CL_DISCONNECTED;
+				cl->status = -ENODEV;
+			}
+			break;
+		}
+	}
+	if (cl)
+		wake_up_interruptible(&cl->wait_ctrl_res);
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_client_disconnect_request - disconnect request initiated by fw
+ * host sends disconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the fw
+ */
+static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
+	struct hbm_client_connect_request *disconnect_req)
+{
+	struct ishtp_cl *cl;
+	const size_t len = sizeof(struct hbm_client_connect_response);
+	unsigned long	flags;
+	struct ishtp_msg_hdr hdr;
+	unsigned char data[4];	/* All HBM messages are 4 bytes */
+
+	spin_lock_irqsave(&dev->cl_list_lock, flags);
+	list_for_each_entry(cl, &dev->cl_list, link) {
+		if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
+			cl->state = ISHTP_CL_DISCONNECTED;
+
+			/* send disconnect response */
+			ishtp_hbm_hdr(&hdr, len);
+			ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
+				len);
+			ishtp_write_message(dev, &hdr, data);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/*
+ * ishtp_hbm_dma_xfer_ack - receive ack for ISHTP-over-DMA client message
+ *
+ * Constraint:
+ * First implementation is one ISHTP message per DMA transfer
+ */
+void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
+	struct dma_xfer_hbm *dma_xfer)
+{
+	void	*msg;
+	uint64_t	offs;
+	struct ishtp_msg_hdr	*ishtp_hdr =
+		(struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
+	unsigned int	msg_offs;
+	struct ishtp_cl *cl;
+
+	for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+		msg_offs += sizeof(struct dma_xfer_hbm)) {
+		offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
+		if (offs > dev->ishtp_host_dma_tx_buf_size) {
+			dev_err(dev->devc, "Bad DMA Tx ack message address\n");
+			return;
+		}
+		if (dma_xfer->msg_length >
+				dev->ishtp_host_dma_tx_buf_size - offs) {
+			dev_err(dev->devc, "Bad DMA Tx ack message size\n");
+			return;
+		}
+
+		/* logical address of the acked mem */
+		msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
+		ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
+
+		list_for_each_entry(cl, &dev->cl_list, link) {
+			if (cl->fw_client_id == dma_xfer->fw_client_id &&
+			    cl->host_client_id == dma_xfer->host_client_id)
+				/*
+				 * in case that a single ack may be sent
+				 * over several dma transfers, and the last msg
+				 * addr was inside the acked memory, but not in
+				 * its start
+				 */
+				if (cl->last_dma_addr >=
+							(unsigned char *)msg &&
+						cl->last_dma_addr <
+						(unsigned char *)msg +
+						dma_xfer->msg_length) {
+					cl->last_dma_acked = 1;
+
+					if (!list_empty(&cl->tx_list.list) &&
+						cl->ishtp_flow_ctrl_creds) {
+						/*
+						 * start sending the first msg
+						 */
+						ishtp_cl_send_msg(dev, cl);
+					}
+				}
+		}
+		++dma_xfer;
+	}
+}
+
+/* ishtp_hbm_dma_xfer - receive ISHTP-over-DMA client message */
+void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
+			struct dma_xfer_hbm *dma_xfer)
+{
+	void	*msg;
+	uint64_t	offs;
+	struct ishtp_msg_hdr	hdr;
+	struct ishtp_msg_hdr	*ishtp_hdr =
+		(struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
+	struct dma_xfer_hbm	*prm = dma_xfer;
+	unsigned int	msg_offs;
+
+	for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+		msg_offs += sizeof(struct dma_xfer_hbm)) {
+
+		offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
+		if (offs > dev->ishtp_host_dma_rx_buf_size) {
+			dev_err(dev->devc, "Bad DMA Rx message address\n");
+			return;
+		}
+		if (dma_xfer->msg_length >
+				dev->ishtp_host_dma_rx_buf_size - offs) {
+			dev_err(dev->devc, "Bad DMA Rx message size\n");
+			return;
+		}
+		msg = dev->ishtp_host_dma_rx_buf + offs;
+		recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
+		dma_xfer->hbm = DMA_XFER_ACK;	/* Prepare for response */
+		++dma_xfer;
+	}
+
+	/* Send DMA_XFER_ACK [...] */
+	ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
+	ishtp_write_message(dev, &hdr, (unsigned char *)prm);
+}
+
+/*
+ * ishtp_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ */
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+			struct ishtp_bus_message *hdr)
+{
+	struct ishtp_bus_message *ishtp_msg;
+	struct ishtp_fw_client *fw_client;
+	struct hbm_host_version_response *version_res;
+	struct hbm_client_connect_response *connect_res;
+	struct hbm_client_connect_response *disconnect_res;
+	struct hbm_client_connect_request *disconnect_req;
+	struct hbm_props_response *props_res;
+	struct hbm_host_enum_response *enum_res;
+	struct ishtp_msg_hdr ishtp_hdr;
+	struct dma_alloc_notify	dma_alloc_notify;
+	struct dma_xfer_hbm	*dma_xfer;
+
+	ishtp_msg = hdr;
+
+	switch (ishtp_msg->hbm_cmd) {
+	case HOST_START_RES_CMD:
+		version_res = (struct hbm_host_version_response *)ishtp_msg;
+		if (!version_res->host_version_supported) {
+			dev->version = version_res->fw_max_version;
+
+			dev->hbm_state = ISHTP_HBM_STOPPED;
+			ishtp_hbm_stop_req(dev);
+			return;
+		}
+
+		dev->version.major_version = HBM_MAJOR_VERSION;
+		dev->version.minor_version = HBM_MINOR_VERSION;
+		if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+				dev->hbm_state == ISHTP_HBM_START) {
+			dev->hbm_state = ISHTP_HBM_STARTED;
+			ishtp_hbm_enum_clients_req(dev);
+		} else {
+			dev_err(dev->devc,
+				"reset: wrong host start response\n");
+			/* BUG: why do we arrive here? */
+			ish_hw_reset(dev);
+			return;
+		}
+
+		wake_up_interruptible(&dev->wait_hbm_recvd_msg);
+		break;
+
+	case CLIENT_CONNECT_RES_CMD:
+		connect_res = (struct hbm_client_connect_response *)ishtp_msg;
+		ishtp_hbm_cl_connect_res(dev, connect_res);
+		break;
+
+	case CLIENT_DISCONNECT_RES_CMD:
+		disconnect_res =
+			(struct hbm_client_connect_response *)ishtp_msg;
+		ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
+		break;
+
+	case HOST_CLIENT_PROPERTIES_RES_CMD:
+		props_res = (struct hbm_props_response *)ishtp_msg;
+		fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
+
+		if (props_res->status || !dev->fw_clients) {
+			dev_err(dev->devc,
+			"reset: properties response hbm wrong status\n");
+			ish_hw_reset(dev);
+			return;
+		}
+
+		if (fw_client->client_id != props_res->address) {
+			dev_err(dev->devc,
+				"reset: host properties response address "
+				"mismatch [%02X %02X]\n",
+				fw_client->client_id, props_res->address);
+			ish_hw_reset(dev);
+			return;
+		}
+
+		if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
+			dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
+			dev_err(dev->devc,
+				"reset: unexpected properties response\n");
+			ish_hw_reset(dev);
+			return;
+		}
+
+		fw_client->props = props_res->client_properties;
+		dev->fw_client_index++;
+		dev->fw_client_presentation_num++;
+
+		/* request property for the next client */
+		ishtp_hbm_prop_req(dev);
+
+		if (dev->dev_state != ISHTP_DEV_ENABLED)
+			break;
+
+		ishtp_cl_alloc_dma_buf(dev);
+		if (dev->ishtp_host_dma_rx_buf) {
+			const size_t len = sizeof(dma_alloc_notify);
+
+			memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
+			dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
+			dma_alloc_notify.buf_size =
+					dev->ishtp_host_dma_rx_buf_size;
+			dma_alloc_notify.buf_address =
+					dev->ishtp_host_dma_rx_buf_phys;
+			ishtp_hbm_hdr(&ishtp_hdr, len);
+			ishtp_write_message(dev, &ishtp_hdr,
+				(unsigned char *)&dma_alloc_notify);
+		}
+
+		break;
+
+	case HOST_ENUM_RES_CMD:
+		enum_res = (struct hbm_host_enum_response *) ishtp_msg;
+		memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
+		if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+			dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
+			dev->fw_client_presentation_num = 0;
+			dev->fw_client_index = 0;
+
+			ishtp_hbm_fw_cl_allocate(dev);
+			dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
+
+			/* first property request */
+			ishtp_hbm_prop_req(dev);
+		} else {
+			dev_err(dev->devc,
+			      "reset: unexpected enumeration response hbm\n");
+			ish_hw_reset(dev);
+			return;
+		}
+		break;
+
+	case HOST_STOP_RES_CMD:
+		if (dev->hbm_state != ISHTP_HBM_STOPPED)
+			dev_err(dev->devc, "unexpected stop response\n");
+
+		dev->dev_state = ISHTP_DEV_DISABLED;
+		dev_info(dev->devc, "reset: FW stop response\n");
+		ish_hw_reset(dev);
+		break;
+
+	case CLIENT_DISCONNECT_REQ_CMD:
+		/* search for client */
+		disconnect_req =
+			(struct hbm_client_connect_request *)ishtp_msg;
+		ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
+		break;
+
+	case FW_STOP_REQ_CMD:
+		dev->hbm_state = ISHTP_HBM_STOPPED;
+		break;
+
+	case DMA_BUFFER_ALLOC_RESPONSE:
+		dev->ishtp_host_dma_enabled = 1;
+		break;
+
+	case DMA_XFER:
+		dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+		if (!dev->ishtp_host_dma_enabled) {
+			dev_err(dev->devc,
+				"DMA XFER requested but DMA is not enabled\n");
+			break;
+		}
+		ishtp_hbm_dma_xfer(dev, dma_xfer);
+		break;
+
+	case DMA_XFER_ACK:
+		dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+		if (!dev->ishtp_host_dma_enabled ||
+		    !dev->ishtp_host_dma_tx_buf) {
+			dev_err(dev->devc,
+				"DMA XFER acked but DMA Tx is not enabled\n");
+			break;
+		}
+		ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
+		break;
+
+	default:
+		dev_err(dev->devc, "unknown HBM: %u\n",
+			(unsigned int)ishtp_msg->hbm_cmd);
+
+		break;
+	}
+}
+
+/*
+ * BH processing work function (instead of thread handler)
+ * for processing hbm messages
+ */
+void	bh_hbm_work_fn(struct work_struct *work)
+{
+	unsigned long	flags;
+	struct ishtp_device	*dev;
+	unsigned char	hbm[IPC_PAYLOAD_SIZE];
+
+	dev = container_of(work, struct ishtp_device, bh_hbm_work);
+	spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+	if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
+		memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
+			IPC_PAYLOAD_SIZE);
+		dev->rd_msg_fifo_head =
+			(dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
+			(RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+		spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+		ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
+	} else {
+		spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+	}
+}
+
+/*
+ *	Receive and process ISHTP bus messages
+ *
+ *	(!) ISR context
+ */
+void	recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
+{
+	uint8_t	rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+	struct ishtp_bus_message	*ishtp_msg =
+		(struct ishtp_bus_message *)rd_msg_buf;
+	unsigned long	flags;
+
+	dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+
+	/* Flow control - handle in place */
+	if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
+		struct hbm_flow_control *flow_control =
+			(struct hbm_flow_control *)ishtp_msg;
+		struct ishtp_cl *cl = NULL;
+		unsigned long	flags, tx_flags;
+
+		spin_lock_irqsave(&dev->cl_list_lock, flags);
+		list_for_each_entry(cl, &dev->cl_list, link) {
+			if (cl->host_client_id == flow_control->host_addr &&
+					cl->fw_client_id ==
+					flow_control->fw_addr) {
+				/*
+				 * NOTE: It's valid only for counting
+				 * flow-control implementation to receive a
+				 * FC in the middle of sending. Meanwhile not
+				 * supported
+				 */
+				if (cl->ishtp_flow_ctrl_creds)
+					dev_err(dev->devc,
+					 "recv extra FC from FW client "
+					 "%u (host client %u) "
+					 "(FC count was %d)\n",
+					 (unsigned int)cl->fw_client_id,
+					 (unsigned int)cl->host_client_id,
+					 cl->ishtp_flow_ctrl_creds);
+				else {
+					++cl->ishtp_flow_ctrl_creds;
+					++cl->ishtp_flow_ctrl_cnt;
+					cl->last_ipc_acked = 1;
+					spin_lock_irqsave(
+							&cl->tx_list_spinlock,
+							tx_flags);
+					if (!list_empty(&cl->tx_list.list)) {
+						/*
+						 * start sending the first msg
+						 *	= the callback function
+						 */
+						spin_unlock_irqrestore(
+							&cl->tx_list_spinlock,
+							tx_flags);
+						ishtp_cl_send_msg(dev, cl);
+					} else {
+						spin_unlock_irqrestore(
+							&cl->tx_list_spinlock,
+							tx_flags);
+					}
+				}
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+		goto	eoi;
+	}
+
+	/*
+	 * Some messages that are safe for ISR processing and important
+	 * to be done "quickly" and in-order, go here
+	 */
+	if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
+			ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
+			ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
+			ishtp_msg->hbm_cmd == DMA_XFER) {
+		ishtp_hbm_dispatch(dev, ishtp_msg);
+		goto	eoi;
+	}
+
+	/*
+	 * All other HBMs go here.
+	 * We schedule HBMs for processing serially by using system wq,
+	 * possibly there will be multiple HBMs scheduled at the same time.
+	 */
+	spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+	if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+			(RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
+			dev->rd_msg_fifo_head) {
+		spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+		dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
+			(unsigned int)ishtp_msg->hbm_cmd);
+		goto	eoi;
+	}
+	memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
+		ishtp_hdr->length);
+	dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+		(RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+	spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+	schedule_work(&dev->bh_hbm_work);
+eoi:
+	return;
+}
+
+/*
+ * Receive and process ISHTP fixed client messages
+ *
+ * (!) ISR context
+ */
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+	struct ishtp_msg_hdr *ishtp_hdr)
+{
+	uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+	dev->print_log(dev,
+		"%s() got fixed client msg from client #%d\n",
+		__func__, ishtp_hdr->fw_addr);
+	dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+	if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
+		struct ish_system_states_header *msg_hdr =
+			(struct ish_system_states_header *)rd_msg_buf;
+		if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
+			ishtp_send_resume(dev);
+		/* if FW request arrived here, the system is not suspended */
+		else
+			dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
+				msg_hdr->cmd);
+	}
+}
+
+static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
+	uint8_t cl_addr)
+{
+	hdr->host_addr = 0;
+	hdr->fw_addr = cl_addr;
+	hdr->length = length;
+	hdr->msg_complete = 1;
+	hdr->reserved = 0;
+}
+
+/*** Suspend and resume notification ***/
+
+static uint32_t current_state;
+static uint32_t supported_states = 0 | SUSPEND_STATE_BIT;
+
+void ishtp_send_suspend(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr	ishtp_hdr;
+	struct ish_system_states_status state_status_msg;
+	const size_t len = sizeof(struct ish_system_states_status);
+
+	fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+	memset(&state_status_msg, 0, len);
+	state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+	state_status_msg.supported_states = supported_states;
+	current_state |= SUSPEND_STATE_BIT;
+	dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
+	state_status_msg.states_status = current_state;
+
+	ishtp_write_message(dev, &ishtp_hdr,
+		(unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_suspend);
+
+void ishtp_send_resume(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr	ishtp_hdr;
+	struct ish_system_states_status state_status_msg;
+	const size_t len = sizeof(struct ish_system_states_status);
+
+	fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+	memset(&state_status_msg, 0, len);
+	state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+	state_status_msg.supported_states = supported_states;
+	current_state &= ~SUSPEND_STATE_BIT;
+	dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
+	state_status_msg.states_status = current_state;
+
+	ishtp_write_message(dev, &ishtp_hdr,
+		(unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_resume);
+
+void ishtp_query_subscribers(struct ishtp_device *dev)
+{
+	struct ishtp_msg_hdr	ishtp_hdr;
+	struct ish_system_states_query_subscribers query_subscribers_msg;
+	const size_t len = sizeof(struct ish_system_states_query_subscribers);
+
+	fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+	memset(&query_subscribers_msg, 0, len);
+	query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
+
+	ishtp_write_message(dev, &ishtp_hdr,
+		(unsigned char *)&query_subscribers_msg);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
new file mode 100644
index 0000000..d8ed76b
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -0,0 +1,319 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_HBM_H_
+#define _ISHTP_HBM_H_
+
+#include <linux/uuid.h>
+
+struct ishtp_device;
+struct ishtp_msg_hdr;
+struct ishtp_cl;
+
+/*
+ * Timeouts in Seconds
+ */
+#define ISHTP_INTEROP_TIMEOUT		7 /* Timeout on ready message */
+
+#define ISHTP_CL_CONNECT_TIMEOUT	15 /* HPS: Client Connect Timeout */
+
+/*
+ * ISHTP Version
+ */
+#define HBM_MINOR_VERSION		0
+#define HBM_MAJOR_VERSION		1
+
+/* Host bus message command opcode */
+#define ISHTP_HBM_CMD_OP_MSK		0x7f
+/* Host bus message command RESPONSE */
+#define ISHTP_HBM_CMD_RES_MSK		0x80
+
+/*
+ * ISHTP Bus Message Command IDs
+ */
+#define HOST_START_REQ_CMD		0x01
+#define HOST_START_RES_CMD		0x81
+
+#define HOST_STOP_REQ_CMD		0x02
+#define HOST_STOP_RES_CMD		0x82
+
+#define FW_STOP_REQ_CMD			0x03
+
+#define HOST_ENUM_REQ_CMD		0x04
+#define HOST_ENUM_RES_CMD		0x84
+
+#define HOST_CLIENT_PROPERTIES_REQ_CMD	0x05
+#define HOST_CLIENT_PROPERTIES_RES_CMD	0x85
+
+#define CLIENT_CONNECT_REQ_CMD		0x06
+#define CLIENT_CONNECT_RES_CMD		0x86
+
+#define CLIENT_DISCONNECT_REQ_CMD	0x07
+#define CLIENT_DISCONNECT_RES_CMD	0x87
+
+#define ISHTP_FLOW_CONTROL_CMD		0x08
+
+#define DMA_BUFFER_ALLOC_NOTIFY		0x11
+#define DMA_BUFFER_ALLOC_RESPONSE	0x91
+
+#define DMA_XFER			0x12
+#define DMA_XFER_ACK			0x92
+
+/*
+ * ISHTP Stop Reason
+ * used by hbm_host_stop_request.reason
+ */
+#define	DRIVER_STOP_REQUEST		0x00
+
+/*
+ * ISHTP BUS Interface Section
+ */
+struct ishtp_msg_hdr {
+	uint32_t fw_addr:8;
+	uint32_t host_addr:8;
+	uint32_t length:9;
+	uint32_t reserved:6;
+	uint32_t msg_complete:1;
+} __packed;
+
+struct ishtp_bus_message {
+	uint8_t hbm_cmd;
+	uint8_t data[0];
+} __packed;
+
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ *	CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct ishtp_hbm_cl_cmd {
+	uint8_t hbm_cmd;
+	uint8_t fw_addr;
+	uint8_t host_addr;
+	uint8_t data;
+};
+
+struct hbm_version {
+	uint8_t minor_version;
+	uint8_t major_version;
+} __packed;
+
+struct hbm_host_version_request {
+	uint8_t hbm_cmd;
+	uint8_t reserved;
+	struct hbm_version host_version;
+} __packed;
+
+struct hbm_host_version_response {
+	uint8_t hbm_cmd;
+	uint8_t host_version_supported;
+	struct hbm_version fw_max_version;
+} __packed;
+
+struct hbm_host_stop_request {
+	uint8_t hbm_cmd;
+	uint8_t reason;
+	uint8_t reserved[2];
+} __packed;
+
+struct hbm_host_stop_response {
+	uint8_t hbm_cmd;
+	uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_request {
+	uint8_t hbm_cmd;
+	uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_response {
+	uint8_t hbm_cmd;
+	uint8_t reserved[3];
+	uint8_t valid_addresses[32];
+} __packed;
+
+struct ishtp_client_properties {
+	uuid_le protocol_name;
+	uint8_t protocol_version;
+	uint8_t max_number_of_connections;
+	uint8_t fixed_address;
+	uint8_t single_recv_buf;
+	uint32_t max_msg_length;
+	uint8_t dma_hdr_len;
+#define	ISHTP_CLIENT_DMA_ENABLED	0x80
+	uint8_t reserved4;
+	uint8_t reserved5;
+	uint8_t reserved6;
+} __packed;
+
+struct hbm_props_request {
+	uint8_t hbm_cmd;
+	uint8_t address;
+	uint8_t reserved[2];
+} __packed;
+
+struct hbm_props_response {
+	uint8_t hbm_cmd;
+	uint8_t address;
+	uint8_t status;
+	uint8_t reserved[1];
+	struct ishtp_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
+struct hbm_client_connect_request {
+	uint8_t hbm_cmd;
+	uint8_t fw_addr;
+	uint8_t host_addr;
+	uint8_t reserved;
+} __packed;
+
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
+struct hbm_client_connect_response {
+	uint8_t hbm_cmd;
+	uint8_t fw_addr;
+	uint8_t host_addr;
+	uint8_t status;
+} __packed;
+
+
+#define ISHTP_FC_MESSAGE_RESERVED_LENGTH		5
+
+struct hbm_flow_control {
+	uint8_t hbm_cmd;
+	uint8_t fw_addr;
+	uint8_t host_addr;
+	uint8_t reserved[ISHTP_FC_MESSAGE_RESERVED_LENGTH];
+} __packed;
+
+struct dma_alloc_notify {
+	uint8_t hbm;
+	uint8_t status;
+	uint8_t reserved[2];
+	uint32_t buf_size;
+	uint64_t buf_address;
+	/* [...] May come more size/address pairs */
+} __packed;
+
+struct dma_xfer_hbm {
+	uint8_t hbm;
+	uint8_t fw_client_id;
+	uint8_t host_client_id;
+	uint8_t reserved;
+	uint64_t msg_addr;
+	uint32_t msg_length;
+	uint32_t reserved2;
+} __packed;
+
+/* System state */
+#define ISHTP_SYSTEM_STATE_CLIENT_ADDR		13
+
+#define SYSTEM_STATE_SUBSCRIBE			0x1
+#define SYSTEM_STATE_STATUS			0x2
+#define SYSTEM_STATE_QUERY_SUBSCRIBERS		0x3
+#define SYSTEM_STATE_STATE_CHANGE_REQ		0x4
+/*indicates suspend and resume states*/
+#define SUSPEND_STATE_BIT			(1<<1)
+
+struct ish_system_states_header {
+	uint32_t cmd;
+	uint32_t cmd_status;	/*responses will have this set*/
+} __packed;
+
+struct ish_system_states_subscribe {
+	struct ish_system_states_header hdr;
+	uint32_t states;
+} __packed;
+
+struct ish_system_states_status {
+	struct ish_system_states_header hdr;
+	uint32_t supported_states;
+	uint32_t states_status;
+} __packed;
+
+struct ish_system_states_query_subscribers {
+	struct ish_system_states_header hdr;
+} __packed;
+
+struct ish_system_states_state_change_req {
+	struct ish_system_states_header hdr;
+	uint32_t requested_states;
+	uint32_t states_status;
+} __packed;
+
+/**
+ * enum ishtp_hbm_state - host bus message protocol state
+ *
+ * @ISHTP_HBM_IDLE : protocol not started
+ * @ISHTP_HBM_START : start request message was sent
+ * @ISHTP_HBM_ENUM_CLIENTS : enumeration request was sent
+ * @ISHTP_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ */
+enum ishtp_hbm_state {
+	ISHTP_HBM_IDLE = 0,
+	ISHTP_HBM_START,
+	ISHTP_HBM_STARTED,
+	ISHTP_HBM_ENUM_CLIENTS,
+	ISHTP_HBM_CLIENT_PROPERTIES,
+	ISHTP_HBM_WORKING,
+	ISHTP_HBM_STOPPED,
+};
+
+static inline void ishtp_hbm_hdr(struct ishtp_msg_hdr *hdr, size_t length)
+{
+	hdr->host_addr = 0;
+	hdr->fw_addr = 0;
+	hdr->length = length;
+	hdr->msg_complete = 1;
+	hdr->reserved = 0;
+}
+
+int ishtp_hbm_start_req(struct ishtp_device *dev);
+int ishtp_hbm_start_wait(struct ishtp_device *dev);
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+	struct ishtp_cl *cl);
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev);
+void bh_hbm_work_fn(struct work_struct *work);
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr);
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+	struct ishtp_msg_hdr *ishtp_hdr);
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+	struct ishtp_bus_message *hdr);
+
+void ishtp_send_suspend(struct ishtp_device *dev);
+void ishtp_send_resume(struct ishtp_device *dev);
+void ishtp_query_subscribers(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HBM_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
new file mode 100644
index 0000000..14e0a88
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -0,0 +1,94 @@
+/*
+ * Initialization protocol for ISHTP driver
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+const char *ishtp_dev_state_str(int state)
+{
+	switch (state) {
+	case ISHTP_DEV_INITIALIZING:
+		return	"INITIALIZING";
+	case ISHTP_DEV_INIT_CLIENTS:
+		return	"INIT_CLIENTS";
+	case ISHTP_DEV_ENABLED:
+		return	"ENABLED";
+	case ISHTP_DEV_RESETTING:
+		return	"RESETTING";
+	case ISHTP_DEV_DISABLED:
+		return	"DISABLED";
+	case ISHTP_DEV_POWER_DOWN:
+		return	"POWER_DOWN";
+	case ISHTP_DEV_POWER_UP:
+		return	"POWER_UP";
+	default:
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(ishtp_dev_state_str);
+
+void ishtp_device_init(struct ishtp_device *dev)
+{
+	dev->dev_state = ISHTP_DEV_INITIALIZING;
+	INIT_LIST_HEAD(&dev->cl_list);
+	INIT_LIST_HEAD(&dev->device_list);
+	dev->rd_msg_fifo_head = 0;
+	dev->rd_msg_fifo_tail = 0;
+	spin_lock_init(&dev->rd_msg_spinlock);
+
+	init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+	spin_lock_init(&dev->read_list_spinlock);
+	spin_lock_init(&dev->device_lock);
+	spin_lock_init(&dev->device_list_lock);
+	spin_lock_init(&dev->cl_list_lock);
+	spin_lock_init(&dev->fw_clients_lock);
+	INIT_WORK(&dev->bh_hbm_work, bh_hbm_work_fn);
+
+	bitmap_zero(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+	dev->open_handle_count = 0;
+
+	/*
+	 * Reserving client ID 0 for ISHTP Bus Message communications
+	 */
+	bitmap_set(dev->host_clients_map, 0, 1);
+
+	INIT_LIST_HEAD(&dev->read_list.list);
+
+}
+EXPORT_SYMBOL(ishtp_device_init);
+
+/* ishtp_start - initializes host and fw to start work */
+int ishtp_start(struct ishtp_device *dev)
+{
+	if (ishtp_hbm_start_wait(dev)) {
+		dev_err(dev->devc, "HBM haven't started");
+		goto err;
+	}
+
+	/* suspend & resume notification - send QUERY_SUBSCRIBERS msg */
+	ishtp_query_subscribers(dev);
+
+	return 0;
+err:
+	dev_err(dev->devc, "link layer initialization failed.\n");
+	dev->dev_state = ISHTP_DEV_DISABLED;
+	return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_start);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
new file mode 100644
index 0000000..1f30a81
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -0,0 +1,280 @@
+/*
+ * Most ISHTP provider device and ISHTP logic declarations
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_DEV_H_
+#define _ISHTP_DEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include "bus.h"
+#include "hbm.h"
+
+#define	IPC_PAYLOAD_SIZE	128
+#define ISHTP_RD_MSG_BUF_SIZE	IPC_PAYLOAD_SIZE
+#define	IPC_FULL_MSG_SIZE	132
+
+/* Number of messages to be held in ISR->BH FIFO */
+#define	RD_INT_FIFO_SIZE	64
+
+/*
+ * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
+ * Tx complete interrupt or RX_COMPLETE handler
+ */
+#define	IPC_TX_FIFO_SIZE	512
+
+/*
+ * Number of Maximum ISHTP Clients
+ */
+#define ISHTP_CLIENTS_MAX 256
+
+/*
+ * Number of File descriptors/handles
+ * that can be opened to the driver.
+ *
+ * Limit to 255: 256 Total Clients
+ * minus internal client for ISHTP Bus Messages
+ */
+#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)
+
+/* Internal Clients Number */
+#define ISHTP_HOST_CLIENT_ID_ANY		(-1)
+#define ISHTP_HBM_HOST_CLIENT_ID		0
+
+#define	MAX_DMA_DELAY	20
+
+/* ISHTP device states */
+enum ishtp_dev_state {
+	ISHTP_DEV_INITIALIZING = 0,
+	ISHTP_DEV_INIT_CLIENTS,
+	ISHTP_DEV_ENABLED,
+	ISHTP_DEV_RESETTING,
+	ISHTP_DEV_DISABLED,
+	ISHTP_DEV_POWER_DOWN,
+	ISHTP_DEV_POWER_UP
+};
+const char *ishtp_dev_state_str(int state);
+
+struct ishtp_cl;
+
+/**
+ * struct ishtp_fw_client - representation of fw client
+ *
+ * @props - client properties
+ * @client_id - fw client id
+ */
+struct ishtp_fw_client {
+	struct ishtp_client_properties props;
+	uint8_t client_id;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size:	Size of data in the *data
+ * @data:	Pointer to data
+ */
+struct ishtp_msg_data {
+	uint32_t size;
+	unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list:	Link to list members
+ * @cl:		ISHTP client instance
+ * @buffer:	message header
+ * @buf_idx:	Index into buffer
+ * @read_time:	 unused at this time
+ */
+struct ishtp_cl_rb {
+	struct list_head list;
+	struct ishtp_cl *cl;
+	struct ishtp_msg_data buffer;
+	unsigned long buf_idx;
+	unsigned long read_time;
+};
+
+/*
+ * Control info for IPC messages ISHTP/IPC sending FIFO -
+ * list with inline data buffer
+ * This structure will be filled with parameters submitted
+ * by the caller glue layer
+ * 'buf' may be pointing to the external buffer or to 'inline_data'
+ * 'offset' will be initialized to 0 by submitting
+ *
+ * 'ipc_send_compl' is intended for use by clients that send fragmented
+ * messages. When a fragment is sent down to IPC msg regs,
+ * it will be called.
+ * If it has more fragments to send, it will do it. With last fragment
+ * it will send appropriate ISHTP "message-complete" flag.
+ * It will remove the outstanding message
+ * (mark outstanding buffer as available).
+ * If counting flow control is in work and there are more flow control
+ * credits, it can put the next client message queued in cl.
+ * structure for IPC processing.
+ *
+ */
+struct wr_msg_ctl_info {
+	/* Will be called with 'ipc_send_compl_prm' as parameter */
+	void (*ipc_send_compl)(void *);
+
+	void *ipc_send_compl_prm;
+	size_t length;
+	struct list_head	link;
+	unsigned char	inline_data[IPC_FULL_MSG_SIZE];
+};
+
+/*
+ * The ISHTP layer talks to hardware IPC message using the following
+ * callbacks
+ */
+struct ishtp_hw_ops {
+	int	(*hw_reset)(struct ishtp_device *dev);
+	int	(*ipc_reset)(struct ishtp_device *dev);
+	uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
+				   int busy);
+	int	(*write)(struct ishtp_device *dev,
+		void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+		unsigned char *msg, int length);
+	uint32_t	(*ishtp_read_hdr)(const struct ishtp_device *dev);
+	int	(*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
+			unsigned long buffer_length);
+	uint32_t	(*get_fw_status)(struct ishtp_device *dev);
+	void	(*sync_fw_clock)(struct ishtp_device *dev);
+};
+
+/**
+ * struct ishtp_device - ISHTP private device struct
+ */
+struct ishtp_device {
+	struct device *devc;	/* pointer to lowest device */
+	struct pci_dev *pdev;	/* PCI device to get device ids */
+
+	/* waitq for waiting for suspend response */
+	wait_queue_head_t suspend_wait;
+	bool suspend_flag;	/* Suspend is active */
+
+	/* waitq for waiting for resume response */
+	wait_queue_head_t resume_wait;
+	bool resume_flag;	/*Resume is active */
+
+	/*
+	 * lock for the device, for everything that doesn't have
+	 * a dedicated spinlock
+	 */
+	spinlock_t device_lock;
+
+	bool recvd_hw_ready;
+	struct hbm_version version;
+	int transfer_path; /* Choice of transfer path: IPC or DMA */
+
+	/* ishtp device states */
+	enum ishtp_dev_state dev_state;
+	enum ishtp_hbm_state hbm_state;
+
+	/* driver read queue */
+	struct ishtp_cl_rb read_list;
+	spinlock_t read_list_spinlock;
+
+	/* list of ishtp_cl's */
+	struct list_head cl_list;
+	spinlock_t cl_list_lock;
+	long open_handle_count;
+
+	/* List of bus devices */
+	struct list_head device_list;
+	spinlock_t device_list_lock;
+
+	/* waiting queues for receive message from FW */
+	wait_queue_head_t wait_hw_ready;
+	wait_queue_head_t wait_hbm_recvd_msg;
+
+	/* FIFO for input messages for BH processing */
+	unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
+	unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
+	spinlock_t rd_msg_spinlock;
+	struct work_struct bh_hbm_work;
+
+	/* IPC write queue */
+	struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+	/* For both processing list  and free list */
+	spinlock_t wr_processing_spinlock;
+
+	spinlock_t out_ipc_spinlock;
+
+	struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
+	DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
+	DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
+	uint8_t fw_clients_num;
+	uint8_t fw_client_presentation_num;
+	uint8_t fw_client_index;
+	spinlock_t fw_clients_lock;
+
+	/* TX DMA buffers and slots */
+	int ishtp_host_dma_enabled;
+	void *ishtp_host_dma_tx_buf;
+	unsigned int ishtp_host_dma_tx_buf_size;
+	uint64_t ishtp_host_dma_tx_buf_phys;
+	int ishtp_dma_num_slots;
+
+	/* map of 4k blocks in Tx dma buf: 0-free, 1-used */
+	uint8_t *ishtp_dma_tx_map;
+	spinlock_t ishtp_dma_tx_lock;
+
+	/* RX DMA buffers and slots */
+	void *ishtp_host_dma_rx_buf;
+	unsigned int ishtp_host_dma_rx_buf_size;
+	uint64_t ishtp_host_dma_rx_buf_phys;
+
+	/* Dump to trace buffers if enabled*/
+	void (*print_log)(struct ishtp_device *dev, char *format, ...);
+
+	/* Debug stats */
+	unsigned int	ipc_rx_cnt;
+	unsigned long long	ipc_rx_bytes_cnt;
+	unsigned int	ipc_tx_cnt;
+	unsigned long long	ipc_tx_bytes_cnt;
+
+	const struct ishtp_hw_ops *ops;
+	size_t	mtu;
+	uint32_t	ishtp_msg_hdr;
+	char hw[0] __aligned(sizeof(void *));
+};
+
+void	ishtp_device_init(struct ishtp_device *dev);
+int	ishtp_start(struct ishtp_device *dev);
+void	ishtp_device_disable(struct ishtp_device *dev);
+void	ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev);
+int	ishtp_register(struct ishtp_device *dev);
+void	ishtp_deregister(struct ishtp_device *dev);
+
+static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
+{
+	return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+/*
+ * Register Access Function
+ */
+static inline int ish_ipc_reset(struct ishtp_device *dev)
+{
+	return dev->ops->ipc_reset(dev);
+}
+
+static inline int ish_hw_reset(struct ishtp_device *dev)
+{
+	return dev->ops->hw_reset(dev);
+}
+
+#endif /*_ISHTP_DEV_H_*/
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ