lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 16 Sep 2016 15:23:57 +0200
From:   Greg KH <gregkh@...uxfoundation.org>
To:     Arnd Bergmann <arnd@...db.de>, linux-kernel@...r.kernel.org
Cc:     Johan Hovold <johan@...oldconsulting.com>,
        Rui Miguel Silva <rmfrfs@...il.com>,
        Laurent Pinchart <laurent.pinchart@...asonboard.com>,
        Sandeep Patil <sspatil@...gle.com>,
        Matt Porter <mporter@...nel.crashing.org>,
        John Stultz <john.stultz@...aro.org>,
        Rob Herring <robh@...nel.org>,
        Viresh Kumar <viresh.kumar@...aro.org>,
        Alex Elder <elder@...aro.org>, David Lin <dtwlin@...gle.com>,
        Bryan O'Donoghue <pure.logic@...us-software.ie>,
        Vaibhav Agarwal <vaibhav.agarwal@...aro.org>,
        Mark Greer <mgreer@...malcreek.com>
Subject: [patch 07/32] greybus: core code

This is the core Greybus code.  It implements the greybus protocol and
is the base of where all other greybus drivers tie into.

Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
 drivers/greybus/arpc.h                   |  109 +++
 drivers/greybus/authentication.c         |  429 ++++++++++++++
 drivers/greybus/bundle.c                 |  253 ++++++++
 drivers/greybus/bundle.h                 |   90 ++
 drivers/greybus/connection.c             |  938 +++++++++++++++++++++++++++++++
 drivers/greybus/connection.h             |  129 ++++
 drivers/greybus/control.c                |  635 ++++++++++++++++++++
 drivers/greybus/control.h                |   65 ++
 drivers/greybus/core.c                   |  361 +++++++++++
 drivers/greybus/debugfs.c                |   31 +
 drivers/greybus/greybus.h                |  154 +++++
 drivers/greybus/greybus_authentication.h |  120 +++
 drivers/greybus/greybus_id.h             |   26 
 drivers/greybus/greybus_manifest.h       |  177 +++++
 drivers/greybus/manifest.c               |  535 +++++++++++++++++
 drivers/greybus/manifest.h               |   16 
 drivers/greybus/module.c                 |  238 +++++++
 drivers/greybus/module.h                 |   34 +
 18 files changed, 4340 insertions(+)

--- /dev/null
+++ b/drivers/greybus/arpc.h
@@ -0,0 +1,109 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARPC_H
+#define __ARPC_H
+
+/* APBridgeA RPC (ARPC) */
+
+enum arpc_result {
+	ARPC_SUCCESS		= 0x00,
+	ARPC_NO_MEMORY		= 0x01,
+	ARPC_INVALID		= 0x02,
+	ARPC_TIMEOUT		= 0x03,
+	ARPC_UNKNOWN_ERROR	= 0xff,
+};
+
+struct arpc_request_message {
+	__le16	id;		/* RPC unique id */
+	__le16	size;		/* Size in bytes of header + payload */
+	__u8	type;		/* RPC type */
+	__u8	data[0];	/* ARPC data */
+} __packed;
+
+struct arpc_response_message {
+	__le16	id;		/* RPC unique id */
+	__u8	result;		/* Result of RPC */
+} __packed;
+
+
+/* ARPC requests */
+#define ARPC_TYPE_CPORT_CONNECTED		0x01
+#define ARPC_TYPE_CPORT_QUIESCE			0x02
+#define ARPC_TYPE_CPORT_CLEAR			0x03
+#define ARPC_TYPE_CPORT_FLUSH			0x04
+#define ARPC_TYPE_CPORT_SHUTDOWN		0x05
+
+struct arpc_cport_connected_req {
+	__le16 cport_id;
+} __packed;
+
+struct arpc_cport_quiesce_req {
+	__le16 cport_id;
+	__le16 peer_space;
+	__le16 timeout;
+} __packed;
+
+struct arpc_cport_clear_req {
+	__le16 cport_id;
+} __packed;
+
+struct arpc_cport_flush_req {
+	__le16 cport_id;
+} __packed;
+
+struct arpc_cport_shutdown_req {
+	__le16 cport_id;
+	__le16 timeout;
+	__u8 phase;
+} __packed;
+
+#endif	/* __ARPC_H */
--- /dev/null
+++ b/drivers/greybus/authentication.c
@@ -0,0 +1,429 @@
+/*
+ * Greybus Component Authentication Protocol (CAP) Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "greybus_authentication.h"
+#include "firmware.h"
+#include "greybus.h"
+
+#define CAP_TIMEOUT_MS		1000
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS		U8_MAX
+
+struct gb_cap {
+	struct device		*parent;
+	struct gb_connection	*connection;
+	struct kref		kref;
+	struct list_head	node;
+	bool			disabled; /* connection getting disabled */
+
+	struct mutex		mutex;
+	struct cdev		cdev;
+	struct device		*class_device;
+	dev_t			dev_num;
+};
+
+static struct class *cap_class;
+static dev_t cap_dev_num;
+static DEFINE_IDA(cap_minors_map);
+static LIST_HEAD(cap_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void cap_kref_release(struct kref *kref)
+{
+	struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
+
+	kfree(cap);
+}
+
+/*
+ * All users of cap take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_cap(struct gb_cap *cap)
+{
+	kref_put(&cap->kref, cap_kref_release);
+}
+
+/* Caller must call put_cap() after using struct gb_cap */
+static struct gb_cap *get_cap(struct cdev *cdev)
+{
+	struct gb_cap *cap;
+
+	mutex_lock(&list_mutex);
+
+	list_for_each_entry(cap, &cap_list, node) {
+		if (&cap->cdev == cdev) {
+			kref_get(&cap->kref);
+			goto unlock;
+		}
+	}
+
+	cap = NULL;
+
+unlock:
+	mutex_unlock(&list_mutex);
+
+	return cap;
+}
+
+static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
+{
+	struct gb_connection *connection = cap->connection;
+	struct gb_cap_get_endpoint_uid_response response;
+	int ret;
+
+	ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
+				0, &response, sizeof(response));
+	if (ret) {
+		dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
+		return ret;
+	}
+
+	memcpy(euid, response.uid, sizeof(response.uid));
+
+	return 0;
+}
+
+static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
+				   u8 *certificate, u32 *size, u8 *result)
+{
+	struct gb_connection *connection = cap->connection;
+	struct gb_cap_get_ims_certificate_request *request;
+	struct gb_cap_get_ims_certificate_response *response;
+	size_t max_size = gb_operation_get_payload_size_max(connection);
+	struct gb_operation *op;
+	int ret;
+
+	op = gb_operation_create_flags(connection,
+				       GB_CAP_TYPE_GET_IMS_CERTIFICATE,
+				       sizeof(*request), max_size,
+				       GB_OPERATION_FLAG_SHORT_RESPONSE,
+				       GFP_KERNEL);
+	if (!op)
+		return -ENOMEM;
+
+	request = op->request->payload;
+	request->certificate_class = cpu_to_le32(class);
+	request->certificate_id = cpu_to_le32(id);
+
+	ret = gb_operation_request_send_sync(op);
+	if (ret) {
+		dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
+		goto done;
+	}
+
+	response = op->response->payload;
+	*result = response->result_code;
+	*size = op->response->payload_size - sizeof(*response);
+	memcpy(certificate, response->certificate, *size);
+
+done:
+	gb_operation_put(op);
+	return ret;
+}
+
+static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
+			    u8 *challenge, u8 *result, u8 *auth_response,
+			    u32 *signature_size, u8 *signature)
+{
+	struct gb_connection *connection = cap->connection;
+	struct gb_cap_authenticate_request *request;
+	struct gb_cap_authenticate_response *response;
+	size_t max_size = gb_operation_get_payload_size_max(connection);
+	struct gb_operation *op;
+	int ret;
+
+	op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
+				       sizeof(*request), max_size,
+				       GB_OPERATION_FLAG_SHORT_RESPONSE,
+				       GFP_KERNEL);
+	if (!op)
+		return -ENOMEM;
+
+	request = op->request->payload;
+	request->auth_type = cpu_to_le32(auth_type);
+	memcpy(request->uid, uid, sizeof(request->uid));
+	memcpy(request->challenge, challenge, sizeof(request->challenge));
+
+	ret = gb_operation_request_send_sync(op);
+	if (ret) {
+		dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
+		goto done;
+	}
+
+	response = op->response->payload;
+	*result = response->result_code;
+	*signature_size = op->response->payload_size - sizeof(*response);
+	memcpy(auth_response, response->response, sizeof(response->response));
+	memcpy(signature, response->signature, *signature_size);
+
+done:
+	gb_operation_put(op);
+	return ret;
+}
+
+/* Char device fops */
+
+static int cap_open(struct inode *inode, struct file *file)
+{
+	struct gb_cap *cap = get_cap(inode->i_cdev);
+
+	/* cap structure can't get freed until file descriptor is closed */
+	if (cap) {
+		file->private_data = cap;
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static int cap_release(struct inode *inode, struct file *file)
+{
+	struct gb_cap *cap = file->private_data;
+
+	put_cap(cap);
+	return 0;
+}
+
+static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
+			 void __user *buf)
+{
+	struct cap_ioc_get_endpoint_uid endpoint_uid;
+	struct cap_ioc_get_ims_certificate *ims_cert;
+	struct cap_ioc_authenticate *authenticate;
+	size_t size;
+	int ret;
+
+	switch (cmd) {
+	case CAP_IOC_GET_ENDPOINT_UID:
+		ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
+		if (ret)
+			return ret;
+
+		if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
+			return -EFAULT;
+
+		return 0;
+	case CAP_IOC_GET_IMS_CERTIFICATE:
+		size = sizeof(*ims_cert);
+		ims_cert = memdup_user(buf, size);
+		if (IS_ERR(ims_cert))
+			return PTR_ERR(ims_cert);
+
+		ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
+					      ims_cert->certificate_id,
+					      ims_cert->certificate,
+					      &ims_cert->cert_size,
+					      &ims_cert->result_code);
+		if (!ret && copy_to_user(buf, ims_cert, size))
+			ret = -EFAULT;
+		kfree(ims_cert);
+
+		return ret;
+	case CAP_IOC_AUTHENTICATE:
+		size = sizeof(*authenticate);
+		authenticate = memdup_user(buf, size);
+		if (IS_ERR(authenticate))
+			return PTR_ERR(authenticate);
+
+		ret = cap_authenticate(cap, authenticate->auth_type,
+				       authenticate->uid,
+				       authenticate->challenge,
+				       &authenticate->result_code,
+				       authenticate->response,
+				       &authenticate->signature_size,
+				       authenticate->signature);
+		if (!ret && copy_to_user(buf, authenticate, size))
+			ret = -EFAULT;
+		kfree(authenticate);
+
+		return ret;
+	default:
+		return -ENOTTY;
+	}
+}
+
+static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct gb_cap *cap = file->private_data;
+	struct gb_bundle *bundle = cap->connection->bundle;
+	int ret = -ENODEV;
+
+	/*
+	 * Serialize ioctls.
+	 *
+	 * We don't want the user to do multiple authentication operations in
+	 * parallel.
+	 *
+	 * This is also used to protect ->disabled, which is used to check if
+	 * the connection is getting disconnected, so that we don't start any
+	 * new operations.
+	 */
+	mutex_lock(&cap->mutex);
+	if (!cap->disabled) {
+		ret = gb_pm_runtime_get_sync(bundle);
+		if (!ret) {
+			ret = cap_ioctl(cap, cmd, (void __user *)arg);
+			gb_pm_runtime_put_autosuspend(bundle);
+		}
+	}
+	mutex_unlock(&cap->mutex);
+
+	return ret;
+}
+
+static const struct file_operations cap_fops = {
+	.owner		= THIS_MODULE,
+	.open		= cap_open,
+	.release	= cap_release,
+	.unlocked_ioctl	= cap_ioctl_unlocked,
+};
+
+int gb_cap_connection_init(struct gb_connection *connection)
+{
+	struct gb_cap *cap;
+	int ret, minor;
+
+	if (!connection)
+		return 0;
+
+	cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+	if (!cap)
+		return -ENOMEM;
+
+	cap->parent = &connection->bundle->dev;
+	cap->connection = connection;
+	mutex_init(&cap->mutex);
+	gb_connection_set_data(connection, cap);
+	kref_init(&cap->kref);
+
+	mutex_lock(&list_mutex);
+	list_add(&cap->node, &cap_list);
+	mutex_unlock(&list_mutex);
+
+	ret = gb_connection_enable(connection);
+	if (ret)
+		goto err_list_del;
+
+	minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+	if (minor < 0) {
+		ret = minor;
+		goto err_connection_disable;
+	}
+
+	/* Add a char device to allow userspace to interact with cap */
+	cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
+	cdev_init(&cap->cdev, &cap_fops);
+
+	ret = cdev_add(&cap->cdev, cap->dev_num, 1);
+	if (ret)
+		goto err_remove_ida;
+
+	/* Add a soft link to the previously added char-dev within the bundle */
+	cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
+					  NULL, "gb-authenticate-%d", minor);
+	if (IS_ERR(cap->class_device)) {
+		ret = PTR_ERR(cap->class_device);
+		goto err_del_cdev;
+	}
+
+	return 0;
+
+err_del_cdev:
+	cdev_del(&cap->cdev);
+err_remove_ida:
+	ida_simple_remove(&cap_minors_map, minor);
+err_connection_disable:
+	gb_connection_disable(connection);
+err_list_del:
+	mutex_lock(&list_mutex);
+	list_del(&cap->node);
+	mutex_unlock(&list_mutex);
+
+	put_cap(cap);
+
+	return ret;
+}
+
+void gb_cap_connection_exit(struct gb_connection *connection)
+{
+	struct gb_cap *cap;
+
+	if (!connection)
+		return;
+
+	cap = gb_connection_get_data(connection);
+
+	device_destroy(cap_class, cap->dev_num);
+	cdev_del(&cap->cdev);
+	ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
+
+	/*
+	 * Disallow any new ioctl operations on the char device and wait for
+	 * existing ones to finish.
+	 */
+	mutex_lock(&cap->mutex);
+	cap->disabled = true;
+	mutex_unlock(&cap->mutex);
+
+	/* All pending greybus operations should have finished by now */
+	gb_connection_disable(cap->connection);
+
+	/* Disallow new users to get access to the cap structure */
+	mutex_lock(&list_mutex);
+	list_del(&cap->node);
+	mutex_unlock(&list_mutex);
+
+	/*
+	 * All current users of cap would have taken a reference to it by
+	 * now, we can drop our reference and wait the last user will get
+	 * cap freed.
+	 */
+	put_cap(cap);
+}
+
+int cap_init(void)
+{
+	int ret;
+
+	cap_class = class_create(THIS_MODULE, "gb_authenticate");
+	if (IS_ERR(cap_class))
+		return PTR_ERR(cap_class);
+
+	ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
+				  "gb_authenticate");
+	if (ret)
+		goto err_remove_class;
+
+	return 0;
+
+err_remove_class:
+	class_destroy(cap_class);
+	return ret;
+}
+
+void cap_exit(void)
+{
+	unregister_chrdev_region(cap_dev_num, NUM_MINORS);
+	class_destroy(cap_class);
+	ida_destroy(&cap_minors_map);
+}
--- /dev/null
+++ b/drivers/greybus/bundle.c
@@ -0,0 +1,253 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+static ssize_t bundle_class_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+
+	return sprintf(buf, "0x%02x\n", bundle->class);
+}
+static DEVICE_ATTR_RO(bundle_class);
+
+static ssize_t bundle_id_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+
+	return sprintf(buf, "%u\n", bundle->id);
+}
+static DEVICE_ATTR_RO(bundle_id);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+
+	if (bundle->state == NULL)
+		return sprintf(buf, "\n");
+
+	return sprintf(buf, "%s\n", bundle->state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t size)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+
+	kfree(bundle->state);
+	bundle->state = kstrdup(buf, GFP_KERNEL);
+	if (!bundle->state)
+		return -ENOMEM;
+
+	/* Tell userspace that the file contents changed */
+	sysfs_notify(&bundle->dev.kobj, NULL, "state");
+
+	return size;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *bundle_attrs[] = {
+	&dev_attr_bundle_class.attr,
+	&dev_attr_bundle_id.attr,
+	&dev_attr_state.attr,
+	NULL,
+};
+
+ATTRIBUTE_GROUPS(bundle);
+
+static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
+							u8 bundle_id)
+{
+	struct gb_bundle *bundle;
+
+	list_for_each_entry(bundle, &intf->bundles, links) {
+		if (bundle->id == bundle_id)
+			return bundle;
+	}
+
+	return NULL;
+}
+
+static void gb_bundle_release(struct device *dev)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+
+	trace_gb_bundle_release(bundle);
+
+	kfree(bundle->state);
+	kfree(bundle->cport_desc);
+	kfree(bundle);
+}
+
+#ifdef CONFIG_PM
+static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
+{
+	struct gb_connection *connection;
+
+	list_for_each_entry(connection, &bundle->connections, bundle_links)
+		gb_connection_disable(connection);
+}
+
+static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
+{
+	struct gb_connection *connection;
+
+	list_for_each_entry(connection, &bundle->connections, bundle_links)
+		gb_connection_enable(connection);
+}
+
+static int gb_bundle_suspend(struct device *dev)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+	const struct dev_pm_ops *pm = dev->driver->pm;
+	int ret;
+
+	if (pm && pm->runtime_suspend) {
+		ret = pm->runtime_suspend(&bundle->dev);
+		if (ret)
+			return ret;
+	} else {
+		gb_bundle_disable_all_connections(bundle);
+	}
+
+	ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
+	if (ret) {
+		if (pm && pm->runtime_resume)
+			ret = pm->runtime_resume(dev);
+		else
+			gb_bundle_enable_all_connections(bundle);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gb_bundle_resume(struct device *dev)
+{
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+	const struct dev_pm_ops *pm = dev->driver->pm;
+	int ret;
+
+	ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
+	if (ret)
+		return ret;
+
+	if (pm && pm->runtime_resume) {
+		ret = pm->runtime_resume(dev);
+		if (ret)
+			return ret;
+	} else {
+		gb_bundle_enable_all_connections(bundle);
+	}
+
+	return 0;
+}
+
+static int gb_bundle_idle(struct device *dev)
+{
+	pm_runtime_mark_last_busy(dev);
+	pm_request_autosuspend(dev);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_bundle_pm_ops = {
+	SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
+};
+
+struct device_type greybus_bundle_type = {
+	.name =		"greybus_bundle",
+	.release =	gb_bundle_release,
+	.pm =		&gb_bundle_pm_ops,
+};
+
+/*
+ * Create a gb_bundle structure to represent a discovered
+ * bundle.  Returns a pointer to the new bundle or a null
+ * pointer if a failure occurs due to memory exhaustion.
+ */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+				   u8 class)
+{
+	struct gb_bundle *bundle;
+
+	if (bundle_id == BUNDLE_ID_NONE) {
+		dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
+		return NULL;
+	}
+
+	/*
+	 * Reject any attempt to reuse a bundle id.  We initialize
+	 * these serially, so there's no need to worry about keeping
+	 * the interface bundle list locked here.
+	 */
+	if (gb_bundle_find(intf, bundle_id)) {
+		dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
+		return NULL;
+	}
+
+	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+	if (!bundle)
+		return NULL;
+
+	bundle->intf = intf;
+	bundle->id = bundle_id;
+	bundle->class = class;
+	INIT_LIST_HEAD(&bundle->connections);
+
+	bundle->dev.parent = &intf->dev;
+	bundle->dev.bus = &greybus_bus_type;
+	bundle->dev.type = &greybus_bundle_type;
+	bundle->dev.groups = bundle_groups;
+	bundle->dev.dma_mask = intf->dev.dma_mask;
+	device_initialize(&bundle->dev);
+	dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
+
+	list_add(&bundle->links, &intf->bundles);
+
+	trace_gb_bundle_create(bundle);
+
+	return bundle;
+}
+
+int gb_bundle_add(struct gb_bundle *bundle)
+{
+	int ret;
+
+	ret = device_add(&bundle->dev);
+	if (ret) {
+		dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
+		return ret;
+	}
+
+	trace_gb_bundle_add(bundle);
+
+	return 0;
+}
+
+/*
+ * Tear down a previously set up bundle.
+ */
+void gb_bundle_destroy(struct gb_bundle *bundle)
+{
+	trace_gb_bundle_destroy(bundle);
+
+	if (device_is_registered(&bundle->dev))
+		device_del(&bundle->dev);
+
+	list_del(&bundle->links);
+
+	put_device(&bundle->dev);
+}
--- /dev/null
+++ b/drivers/greybus/bundle.h
@@ -0,0 +1,90 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __BUNDLE_H
+#define __BUNDLE_H
+
+#include <linux/list.h>
+
+#define	BUNDLE_ID_NONE	U8_MAX
+
+/* Greybus "public" definitions" */
+struct gb_bundle {
+	struct device		dev;
+	struct gb_interface	*intf;
+
+	u8			id;
+	u8			class;
+	u8			class_major;
+	u8			class_minor;
+
+	size_t			num_cports;
+	struct greybus_descriptor_cport *cport_desc;
+
+	struct list_head	connections;
+	u8			*state;
+
+	struct list_head	links;	/* interface->bundles */
+};
+#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
+
+/* Greybus "private" definitions" */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+				   u8 class);
+int gb_bundle_add(struct gb_bundle *bundle);
+void gb_bundle_destroy(struct gb_bundle *bundle);
+
+/* Bundle Runtime PM wrappers */
+#ifdef CONFIG_PM
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{
+	int retval;
+
+	retval = pm_runtime_get_sync(&bundle->dev);
+	if (retval < 0) {
+		dev_err(&bundle->dev,
+			"pm_runtime_get_sync failed: %d\n", retval);
+		pm_runtime_put_noidle(&bundle->dev);
+		return retval;
+	}
+
+	return 0;
+}
+
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{
+	int retval;
+
+	pm_runtime_mark_last_busy(&bundle->dev);
+	retval = pm_runtime_put_autosuspend(&bundle->dev);
+
+	return retval;
+}
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
+{
+	pm_runtime_get_noresume(&bundle->dev);
+}
+
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
+{
+	pm_runtime_put_noidle(&bundle->dev);
+}
+
+#else
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{ return 0; }
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{ return 0; }
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
+#endif
+
+#endif /* __BUNDLE_H */
--- /dev/null
+++ b/drivers/greybus/connection.c
@@ -0,0 +1,938 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT	1000
+
+
+static void gb_connection_kref_release(struct kref *kref);
+
+
+static DEFINE_SPINLOCK(gb_connections_lock);
+static DEFINE_MUTEX(gb_connection_mutex);
+
+
+/* Caller holds gb_connection_mutex. */
+static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
+{
+	struct gb_host_device *hd = intf->hd;
+	struct gb_connection *connection;
+
+	list_for_each_entry(connection, &hd->connections, hd_links) {
+		if (connection->intf == intf &&
+				connection->intf_cport_id == cport_id)
+			return true;
+	}
+
+	return false;
+}
+
+static void gb_connection_get(struct gb_connection *connection)
+{
+	kref_get(&connection->kref);
+
+	trace_gb_connection_get(connection);
+}
+
+static void gb_connection_put(struct gb_connection *connection)
+{
+	trace_gb_connection_put(connection);
+
+	kref_put(&connection->kref, gb_connection_kref_release);
+}
+
+/*
+ * Returns a reference-counted pointer to the connection if found.
+ */
+static struct gb_connection *
+gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
+{
+	struct gb_connection *connection;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gb_connections_lock, flags);
+	list_for_each_entry(connection, &hd->connections, hd_links)
+		if (connection->hd_cport_id == cport_id) {
+			gb_connection_get(connection);
+			goto found;
+		}
+	connection = NULL;
+found:
+	spin_unlock_irqrestore(&gb_connections_lock, flags);
+
+	return connection;
+}
+
+/*
+ * Callback from the host driver to let us know that data has been
+ * received on the bundle.
+ */
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+			u8 *data, size_t length)
+{
+	struct gb_connection *connection;
+
+	trace_gb_hd_in(hd);
+
+	connection = gb_connection_hd_find(hd, cport_id);
+	if (!connection) {
+		dev_err(&hd->dev,
+			"nonexistent connection (%zu bytes dropped)\n", length);
+		return;
+	}
+	gb_connection_recv(connection, data, length);
+	gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(greybus_data_rcvd);
+
+static void gb_connection_kref_release(struct kref *kref)
+{
+	struct gb_connection *connection;
+
+	connection = container_of(kref, struct gb_connection, kref);
+
+	trace_gb_connection_release(connection);
+
+	kfree(connection);
+}
+
+static void gb_connection_init_name(struct gb_connection *connection)
+{
+	u16 hd_cport_id = connection->hd_cport_id;
+	u16 cport_id = 0;
+	u8 intf_id = 0;
+
+	if (connection->intf) {
+		intf_id = connection->intf->interface_id;
+		cport_id = connection->intf_cport_id;
+	}
+
+	snprintf(connection->name, sizeof(connection->name),
+			"%u/%u:%u", hd_cport_id, intf_id, cport_id);
+}
+
+/*
+ * _gb_connection_create() - create a Greybus connection
+ * @hd:			host device of the connection
+ * @hd_cport_id:	host-device cport id, or -1 for dynamic allocation
+ * @intf:		remote interface, or NULL for static connections
+ * @bundle:		remote-interface bundle (may be NULL)
+ * @cport_id:		remote-interface cport id, or 0 for static connections
+ * @handler:		request handler (may be NULL)
+ * @flags:		connection flags
+ *
+ * Create a Greybus connection, representing the bidirectional link
+ * between a CPort on a (local) Greybus host device and a CPort on
+ * another Greybus interface.
+ *
+ * A connection also maintains the state of operations sent over the
+ * connection.
+ *
+ * Serialised against concurrent create and destroy using the
+ * gb_connection_mutex.
+ *
+ * Return: A pointer to the new connection if successful, or an ERR_PTR
+ * otherwise.
+ */
+static struct gb_connection *
+_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
+				struct gb_interface *intf,
+				struct gb_bundle *bundle, int cport_id,
+				gb_request_handler_t handler,
+				unsigned long flags)
+{
+	struct gb_connection *connection;
+	int ret;
+
+	mutex_lock(&gb_connection_mutex);
+
+	if (intf && gb_connection_cport_in_use(intf, cport_id)) {
+		dev_err(&intf->dev, "cport %u already in use\n", cport_id);
+		ret = -EBUSY;
+		goto err_unlock;
+	}
+
+	ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
+	if (ret < 0) {
+		dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
+		goto err_unlock;
+	}
+	hd_cport_id = ret;
+
+	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+	if (!connection) {
+		ret = -ENOMEM;
+		goto err_hd_cport_release;
+	}
+
+	connection->hd_cport_id = hd_cport_id;
+	connection->intf_cport_id = cport_id;
+	connection->hd = hd;
+	connection->intf = intf;
+	connection->bundle = bundle;
+	connection->handler = handler;
+	connection->flags = flags;
+	if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
+		connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
+	connection->state = GB_CONNECTION_STATE_DISABLED;
+
+	atomic_set(&connection->op_cycle, 0);
+	mutex_init(&connection->mutex);
+	spin_lock_init(&connection->lock);
+	INIT_LIST_HEAD(&connection->operations);
+
+	connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
+					 dev_name(&hd->dev), hd_cport_id);
+	if (!connection->wq) {
+		ret = -ENOMEM;
+		goto err_free_connection;
+	}
+
+	kref_init(&connection->kref);
+
+	gb_connection_init_name(connection);
+
+	spin_lock_irq(&gb_connections_lock);
+	list_add(&connection->hd_links, &hd->connections);
+
+	if (bundle)
+		list_add(&connection->bundle_links, &bundle->connections);
+	else
+		INIT_LIST_HEAD(&connection->bundle_links);
+
+	spin_unlock_irq(&gb_connections_lock);
+
+	mutex_unlock(&gb_connection_mutex);
+
+	trace_gb_connection_create(connection);
+
+	return connection;
+
+err_free_connection:
+	kfree(connection);
+err_hd_cport_release:
+	gb_hd_cport_release(hd, hd_cport_id);
+err_unlock:
+	mutex_unlock(&gb_connection_mutex);
+
+	return ERR_PTR(ret);
+}
+
+struct gb_connection *
+gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
+					gb_request_handler_t handler)
+{
+	return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
+					GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create_control(struct gb_interface *intf)
+{
+	return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
+					GB_CONNECTION_FLAG_CONTROL |
+					GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
+					gb_request_handler_t handler)
+{
+	struct gb_interface *intf = bundle->intf;
+
+	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+					handler, 0);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create);
+
+struct gb_connection *
+gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
+					gb_request_handler_t handler,
+					unsigned long flags)
+{
+	struct gb_interface *intf = bundle->intf;
+
+	if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
+		flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
+
+	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+					handler, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_flags);
+
+struct gb_connection *
+gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
+					unsigned long flags)
+{
+	flags |= GB_CONNECTION_FLAG_OFFLOADED;
+
+	return gb_connection_create_flags(bundle, cport_id, NULL, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
+
+static int gb_connection_hd_cport_enable(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->cport_enable)
+		return 0;
+
+	ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
+					connection->flags);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
+				connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void gb_connection_hd_cport_disable(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->cport_disable)
+		return;
+
+	ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
+				connection->name, ret);
+	}
+}
+
+static int gb_connection_hd_cport_connected(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->cport_connected)
+		return 0;
+
+	ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
+				connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gb_connection_hd_cport_flush(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->cport_flush)
+		return 0;
+
+	ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
+				connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	size_t peer_space;
+	int ret;
+
+	peer_space = sizeof(struct gb_operation_msg_hdr) +
+			sizeof(struct gb_cport_shutdown_request);
+
+	if (connection->mode_switch)
+		peer_space += sizeof(struct gb_operation_msg_hdr);
+
+	ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
+					peer_space,
+					GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
+				connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gb_connection_hd_cport_clear(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
+				connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Request the SVC to create a connection from AP's cport to interface's
+ * cport.
+ */
+static int
+gb_connection_svc_connection_create(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	struct gb_interface *intf;
+	u8 cport_flags;
+	int ret;
+
+	if (gb_connection_is_static(connection))
+		return 0;
+
+	intf = connection->intf;
+
+	/*
+	 * Enable either E2EFC or CSD, unless no flow control is requested.
+	 */
+	cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
+	if (gb_connection_flow_control_disabled(connection)) {
+		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
+	} else if (gb_connection_e2efc_enabled(connection)) {
+		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
+				GB_SVC_CPORT_FLAG_E2EFC;
+	}
+
+	ret = gb_svc_connection_create(hd->svc,
+			hd->svc->ap_intf_id,
+			connection->hd_cport_id,
+			intf->interface_id,
+			connection->intf_cport_id,
+			cport_flags);
+	if (ret) {
+		dev_err(&connection->hd->dev,
+			"%s: failed to create svc connection: %d\n",
+			connection->name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+gb_connection_svc_connection_destroy(struct gb_connection *connection)
+{
+	if (gb_connection_is_static(connection))
+		return;
+
+	gb_svc_connection_destroy(connection->hd->svc,
+				  connection->hd->svc->ap_intf_id,
+				  connection->hd_cport_id,
+				  connection->intf->interface_id,
+				  connection->intf_cport_id);
+}
+
+/* Inform Interface about active CPorts */
+static int gb_connection_control_connected(struct gb_connection *connection)
+{
+	struct gb_control *control;
+	u16 cport_id = connection->intf_cport_id;
+	int ret;
+
+	if (gb_connection_is_static(connection))
+		return 0;
+
+	if (gb_connection_is_control(connection))
+		return 0;
+
+	control = connection->intf->control;
+
+	ret = gb_control_connected_operation(control, cport_id);
+	if (ret) {
+		dev_err(&connection->bundle->dev,
+			"failed to connect cport: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+gb_connection_control_disconnecting(struct gb_connection *connection)
+{
+	struct gb_control *control;
+	u16 cport_id = connection->intf_cport_id;
+	int ret;
+
+	if (gb_connection_is_static(connection))
+		return;
+
+	control = connection->intf->control;
+
+	ret = gb_control_disconnecting_operation(control, cport_id);
+	if (ret) {
+		dev_err(&connection->hd->dev,
+				"%s: failed to send disconnecting: %d\n",
+				connection->name, ret);
+	}
+}
+
+static void
+gb_connection_control_disconnected(struct gb_connection *connection)
+{
+	struct gb_control *control;
+	u16 cport_id = connection->intf_cport_id;
+	int ret;
+
+	if (gb_connection_is_static(connection))
+		return;
+
+	control = connection->intf->control;
+
+	if (gb_connection_is_control(connection)) {
+		if (connection->mode_switch) {
+			ret = gb_control_mode_switch_operation(control);
+			if (ret) {
+				/*
+				 * Allow mode switch to time out waiting for
+				 * mailbox event.
+				 */
+				return;
+			}
+		}
+
+		return;
+	}
+
+	ret = gb_control_disconnected_operation(control, cport_id);
+	if (ret) {
+		dev_warn(&connection->bundle->dev,
+			 "failed to disconnect cport: %d\n", ret);
+	}
+}
+
+static int gb_connection_shutdown_operation(struct gb_connection *connection,
+						u8 phase)
+{
+	struct gb_cport_shutdown_request *req;
+	struct gb_operation *operation;
+	int ret;
+
+	operation = gb_operation_create_core(connection,
+						GB_REQUEST_TYPE_CPORT_SHUTDOWN,
+						sizeof(*req), 0, 0,
+						GFP_KERNEL);
+	if (!operation)
+		return -ENOMEM;
+
+	req = operation->request->payload;
+	req->phase = phase;
+
+	ret = gb_operation_request_send_sync(operation);
+
+	gb_operation_put(operation);
+
+	return ret;
+}
+
+static int gb_connection_cport_shutdown(struct gb_connection *connection,
+					u8 phase)
+{
+	struct gb_host_device *hd = connection->hd;
+	const struct gb_hd_driver *drv = hd->driver;
+	int ret;
+
+	if (gb_connection_is_static(connection))
+		return 0;
+
+	if (gb_connection_is_offloaded(connection)) {
+		if (!drv->cport_shutdown)
+			return 0;
+
+		ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
+						GB_OPERATION_TIMEOUT_DEFAULT);
+	} else {
+		ret = gb_connection_shutdown_operation(connection, phase);
+	}
+
+	if (ret) {
+		dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
+				connection->name, phase, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
+{
+	return gb_connection_cport_shutdown(connection, 1);
+}
+
+static int
+gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
+{
+	return gb_connection_cport_shutdown(connection, 2);
+}
+
+/*
+ * Cancel all active operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to DISABLED or
+ * DISCONNECTING.
+ */
+static void gb_connection_cancel_operations(struct gb_connection *connection,
+						int errno)
+	__must_hold(&connection->lock)
+{
+	struct gb_operation *operation;
+
+	while (!list_empty(&connection->operations)) {
+		operation = list_last_entry(&connection->operations,
+						struct gb_operation, links);
+		gb_operation_get(operation);
+		spin_unlock_irq(&connection->lock);
+
+		if (gb_operation_is_incoming(operation))
+			gb_operation_cancel_incoming(operation, errno);
+		else
+			gb_operation_cancel(operation, errno);
+
+		gb_operation_put(operation);
+
+		spin_lock_irq(&connection->lock);
+	}
+}
+
+/*
+ * Cancel all active incoming operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to ENABLED_TX.
+ */
+static void
+gb_connection_flush_incoming_operations(struct gb_connection *connection,
+						int errno)
+	__must_hold(&connection->lock)
+{
+	struct gb_operation *operation;
+	bool incoming;
+
+	while (!list_empty(&connection->operations)) {
+		incoming = false;
+		list_for_each_entry(operation, &connection->operations,
+								links) {
+			if (gb_operation_is_incoming(operation)) {
+				gb_operation_get(operation);
+				incoming = true;
+				break;
+			}
+		}
+
+		if (!incoming)
+			break;
+
+		spin_unlock_irq(&connection->lock);
+
+		/* FIXME: flush, not cancel? */
+		gb_operation_cancel_incoming(operation, errno);
+		gb_operation_put(operation);
+
+		spin_lock_irq(&connection->lock);
+	}
+}
+
+/*
+ * _gb_connection_enable() - enable a connection
+ * @connection:		connection to enable
+ * @rx:			whether to enable incoming requests
+ *
+ * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
+ * ENABLED_TX->ENABLED state transitions.
+ *
+ * Locking: Caller holds connection->mutex.
+ */
+static int _gb_connection_enable(struct gb_connection *connection, bool rx)
+{
+	int ret;
+
+	/* Handle ENABLED_TX -> ENABLED transitions. */
+	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
+		if (!(connection->handler && rx))
+			return 0;
+
+		spin_lock_irq(&connection->lock);
+		connection->state = GB_CONNECTION_STATE_ENABLED;
+		spin_unlock_irq(&connection->lock);
+
+		return 0;
+	}
+
+	ret = gb_connection_hd_cport_enable(connection);
+	if (ret)
+		return ret;
+
+	ret = gb_connection_svc_connection_create(connection);
+	if (ret)
+		goto err_hd_cport_clear;
+
+	ret = gb_connection_hd_cport_connected(connection);
+	if (ret)
+		goto err_svc_connection_destroy;
+
+	spin_lock_irq(&connection->lock);
+	if (connection->handler && rx)
+		connection->state = GB_CONNECTION_STATE_ENABLED;
+	else
+		connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+	spin_unlock_irq(&connection->lock);
+
+	ret = gb_connection_control_connected(connection);
+	if (ret)
+		goto err_control_disconnecting;
+
+	return 0;
+
+err_control_disconnecting:
+	spin_lock_irq(&connection->lock);
+	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+	gb_connection_cancel_operations(connection, -ESHUTDOWN);
+	spin_unlock_irq(&connection->lock);
+
+	/* Transmit queue should already be empty. */
+	gb_connection_hd_cport_flush(connection);
+
+	gb_connection_control_disconnecting(connection);
+	gb_connection_cport_shutdown_phase_1(connection);
+	gb_connection_hd_cport_quiesce(connection);
+	gb_connection_cport_shutdown_phase_2(connection);
+	gb_connection_control_disconnected(connection);
+	connection->state = GB_CONNECTION_STATE_DISABLED;
+err_svc_connection_destroy:
+	gb_connection_svc_connection_destroy(connection);
+err_hd_cport_clear:
+	gb_connection_hd_cport_clear(connection);
+
+	gb_connection_hd_cport_disable(connection);
+
+	return ret;
+}
+
+int gb_connection_enable(struct gb_connection *connection)
+{
+	int ret = 0;
+
+	mutex_lock(&connection->mutex);
+
+	if (connection->state == GB_CONNECTION_STATE_ENABLED)
+		goto out_unlock;
+
+	ret = _gb_connection_enable(connection, true);
+	if (!ret)
+		trace_gb_connection_enable(connection);
+
+out_unlock:
+	mutex_unlock(&connection->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable);
+
+int gb_connection_enable_tx(struct gb_connection *connection)
+{
+	int ret = 0;
+
+	mutex_lock(&connection->mutex);
+
+	if (connection->state == GB_CONNECTION_STATE_ENABLED) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
+		goto out_unlock;
+
+	ret = _gb_connection_enable(connection, false);
+	if (!ret)
+		trace_gb_connection_enable(connection);
+
+out_unlock:
+	mutex_unlock(&connection->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
+
+void gb_connection_disable_rx(struct gb_connection *connection)
+{
+	mutex_lock(&connection->mutex);
+
+	spin_lock_irq(&connection->lock);
+	if (connection->state != GB_CONNECTION_STATE_ENABLED) {
+		spin_unlock_irq(&connection->lock);
+		goto out_unlock;
+	}
+	connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+	gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+	spin_unlock_irq(&connection->lock);
+
+	trace_gb_connection_disable(connection);
+
+out_unlock:
+	mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection)
+{
+	connection->mode_switch = true;
+}
+
+void gb_connection_mode_switch_complete(struct gb_connection *connection)
+{
+	gb_connection_svc_connection_destroy(connection);
+	gb_connection_hd_cport_clear(connection);
+
+	gb_connection_hd_cport_disable(connection);
+
+	connection->mode_switch = false;
+}
+
+void gb_connection_disable(struct gb_connection *connection)
+{
+	mutex_lock(&connection->mutex);
+
+	if (connection->state == GB_CONNECTION_STATE_DISABLED)
+		goto out_unlock;
+
+	trace_gb_connection_disable(connection);
+
+	spin_lock_irq(&connection->lock);
+	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+	gb_connection_cancel_operations(connection, -ESHUTDOWN);
+	spin_unlock_irq(&connection->lock);
+
+	gb_connection_hd_cport_flush(connection);
+
+	gb_connection_control_disconnecting(connection);
+	gb_connection_cport_shutdown_phase_1(connection);
+	gb_connection_hd_cport_quiesce(connection);
+	gb_connection_cport_shutdown_phase_2(connection);
+	gb_connection_control_disconnected(connection);
+
+	connection->state = GB_CONNECTION_STATE_DISABLED;
+
+	/* control-connection tear down is deferred when mode switching */
+	if (!connection->mode_switch) {
+		gb_connection_svc_connection_destroy(connection);
+		gb_connection_hd_cport_clear(connection);
+
+		gb_connection_hd_cport_disable(connection);
+	}
+
+out_unlock:
+	mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable);
+
+/* Disable a connection without communicating with the remote end. */
+void gb_connection_disable_forced(struct gb_connection *connection)
+{
+	mutex_lock(&connection->mutex);
+
+	if (connection->state == GB_CONNECTION_STATE_DISABLED)
+		goto out_unlock;
+
+	trace_gb_connection_disable(connection);
+
+	spin_lock_irq(&connection->lock);
+	connection->state = GB_CONNECTION_STATE_DISABLED;
+	gb_connection_cancel_operations(connection, -ESHUTDOWN);
+	spin_unlock_irq(&connection->lock);
+
+	gb_connection_hd_cport_flush(connection);
+
+	gb_connection_svc_connection_destroy(connection);
+	gb_connection_hd_cport_clear(connection);
+
+	gb_connection_hd_cport_disable(connection);
+out_unlock:
+	mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
+
+/* Caller must have disabled the connection before destroying it. */
+void gb_connection_destroy(struct gb_connection *connection)
+{
+	if (!connection)
+		return;
+
+	if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
+		gb_connection_disable(connection);
+
+	mutex_lock(&gb_connection_mutex);
+
+	spin_lock_irq(&gb_connections_lock);
+	list_del(&connection->bundle_links);
+	list_del(&connection->hd_links);
+	spin_unlock_irq(&gb_connections_lock);
+
+	destroy_workqueue(connection->wq);
+
+	gb_hd_cport_release(connection->hd, connection->hd_cport_id);
+	connection->hd_cport_id = CPORT_ID_BAD;
+
+	mutex_unlock(&gb_connection_mutex);
+
+	gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(gb_connection_destroy);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->latency_tag_enable)
+		return;
+
+	ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&connection->hd->dev,
+			"%s: failed to enable latency tag: %d\n",
+			connection->name, ret);
+	}
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
+
+void gb_connection_latency_tag_disable(struct gb_connection *connection)
+{
+	struct gb_host_device *hd = connection->hd;
+	int ret;
+
+	if (!hd->driver->latency_tag_disable)
+		return;
+
+	ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
+	if (ret) {
+		dev_err(&connection->hd->dev,
+			"%s: failed to disable latency tag: %d\n",
+			connection->name, ret);
+	}
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
--- /dev/null
+++ b/drivers/greybus/connection.h
@@ -0,0 +1,129 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONNECTION_H
+#define __CONNECTION_H
+
+#include <linux/list.h>
+#include <linux/kfifo.h>
+
+#define GB_CONNECTION_FLAG_CSD		BIT(0)
+#define GB_CONNECTION_FLAG_NO_FLOWCTRL	BIT(1)
+#define GB_CONNECTION_FLAG_OFFLOADED	BIT(2)
+#define GB_CONNECTION_FLAG_CDSI1	BIT(3)
+#define GB_CONNECTION_FLAG_CONTROL	BIT(4)
+#define GB_CONNECTION_FLAG_HIGH_PRIO	BIT(5)
+
+#define GB_CONNECTION_FLAG_CORE_MASK	GB_CONNECTION_FLAG_CONTROL
+
+enum gb_connection_state {
+	GB_CONNECTION_STATE_DISABLED		= 0,
+	GB_CONNECTION_STATE_ENABLED_TX		= 1,
+	GB_CONNECTION_STATE_ENABLED		= 2,
+	GB_CONNECTION_STATE_DISCONNECTING	= 3,
+};
+
+struct gb_operation;
+
+typedef int (*gb_request_handler_t)(struct gb_operation *);
+
+struct gb_connection {
+	struct gb_host_device		*hd;
+	struct gb_interface		*intf;
+	struct gb_bundle		*bundle;
+	struct kref			kref;
+	u16				hd_cport_id;
+	u16				intf_cport_id;
+
+	struct list_head		hd_links;
+	struct list_head		bundle_links;
+
+	gb_request_handler_t		handler;
+	unsigned long			flags;
+
+	struct mutex			mutex;
+	spinlock_t			lock;
+	enum gb_connection_state	state;
+	struct list_head		operations;
+
+	char				name[16];
+	struct workqueue_struct		*wq;
+
+	atomic_t			op_cycle;
+
+	void				*private;
+
+	bool				mode_switch;
+};
+
+struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
+				u16 hd_cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
+struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
+				u16 cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
+				u16 cport_id, gb_request_handler_t handler,
+				unsigned long flags);
+struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
+				u16 cport_id, unsigned long flags);
+void gb_connection_destroy(struct gb_connection *connection);
+
+static inline bool gb_connection_is_static(struct gb_connection *connection)
+{
+	return !connection->intf;
+}
+
+int gb_connection_enable(struct gb_connection *connection);
+int gb_connection_enable_tx(struct gb_connection *connection);
+void gb_connection_disable_rx(struct gb_connection *connection);
+void gb_connection_disable(struct gb_connection *connection);
+void gb_connection_disable_forced(struct gb_connection *connection);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection);
+void gb_connection_mode_switch_complete(struct gb_connection *connection);
+
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+			u8 *data, size_t length);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection);
+void gb_connection_latency_tag_disable(struct gb_connection *connection);
+
+static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
+{
+	return !(connection->flags & GB_CONNECTION_FLAG_CSD);
+}
+
+static inline bool
+gb_connection_flow_control_disabled(struct gb_connection *connection)
+{
+	return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
+}
+
+static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
+{
+	return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
+}
+
+static inline bool gb_connection_is_control(struct gb_connection *connection)
+{
+	return connection->flags & GB_CONNECTION_FLAG_CONTROL;
+}
+
+static inline void *gb_connection_get_data(struct gb_connection *connection)
+{
+	return connection->private;
+}
+
+static inline void gb_connection_set_data(struct gb_connection *connection,
+					  void *data)
+{
+	connection->private = data;
+}
+
+#endif /* __CONNECTION_H */
--- /dev/null
+++ b/drivers/greybus/control.c
@@ -0,0 +1,635 @@
+/*
+ * Greybus CPort control protocol.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "greybus.h"
+
+/* Highest control-protocol version supported */
+#define GB_CONTROL_VERSION_MAJOR	0
+#define GB_CONTROL_VERSION_MINOR	1
+
+
+static int gb_control_get_version(struct gb_control *control)
+{
+	struct gb_interface *intf = control->connection->intf;
+	struct gb_control_version_request request;
+	struct gb_control_version_response response;
+	int ret;
+
+	request.major = GB_CONTROL_VERSION_MAJOR;
+	request.minor = GB_CONTROL_VERSION_MINOR;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_VERSION,
+				&request, sizeof(request), &response,
+				sizeof(response));
+	if (ret) {
+		dev_err(&intf->dev,
+				"failed to get control-protocol version: %d\n",
+				ret);
+		return ret;
+	}
+
+	if (response.major > request.major) {
+		dev_err(&intf->dev,
+				"unsupported major control-protocol version (%u > %u)\n",
+				response.major, request.major);
+		return -ENOTSUPP;
+	}
+
+	control->protocol_major = response.major;
+	control->protocol_minor = response.minor;
+
+	dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
+			response.minor);
+
+	return 0;
+}
+
+static int gb_control_get_bundle_version(struct gb_control *control,
+						struct gb_bundle *bundle)
+{
+	struct gb_interface *intf = control->connection->intf;
+	struct gb_control_bundle_version_request request;
+	struct gb_control_bundle_version_response response;
+	int ret;
+
+	request.bundle_id = bundle->id;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_BUNDLE_VERSION,
+				&request, sizeof(request),
+				&response, sizeof(response));
+	if (ret) {
+		dev_err(&intf->dev,
+				"failed to get bundle %u class version: %d\n",
+				bundle->id, ret);
+		return ret;
+	}
+
+	bundle->class_major = response.major;
+	bundle->class_minor = response.minor;
+
+	dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
+			response.major, response.minor);
+
+	return 0;
+}
+
+int gb_control_get_bundle_versions(struct gb_control *control)
+{
+	struct gb_interface *intf = control->connection->intf;
+	struct gb_bundle *bundle;
+	int ret;
+
+	if (!control->has_bundle_version)
+		return 0;
+
+	list_for_each_entry(bundle, &intf->bundles, links) {
+		ret = gb_control_get_bundle_version(control, bundle);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/* Get Manifest's size from the interface */
+int gb_control_get_manifest_size_operation(struct gb_interface *intf)
+{
+	struct gb_control_get_manifest_size_response response;
+	struct gb_connection *connection = intf->control->connection;
+	int ret;
+
+	ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
+				NULL, 0, &response, sizeof(response));
+	if (ret) {
+		dev_err(&connection->intf->dev,
+				"failed to get manifest size: %d\n", ret);
+		return ret;
+	}
+
+	return le16_to_cpu(response.size);
+}
+
+/* Reads Manifest from the interface */
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+				      size_t size)
+{
+	struct gb_connection *connection = intf->control->connection;
+
+	return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
+				NULL, 0, manifest, size);
+}
+
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
+{
+	struct gb_control_connected_request request;
+
+	request.cport_id = cpu_to_le16(cport_id);
+	return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
+				 &request, sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
+{
+	struct gb_control_disconnected_request request;
+
+	request.cport_id = cpu_to_le16(cport_id);
+	return gb_operation_sync(control->connection,
+				 GB_CONTROL_TYPE_DISCONNECTED, &request,
+				 sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnecting_operation(struct gb_control *control,
+					u16 cport_id)
+{
+	struct gb_control_disconnecting_request *request;
+	struct gb_operation *operation;
+	int ret;
+
+	operation = gb_operation_create_core(control->connection,
+					GB_CONTROL_TYPE_DISCONNECTING,
+					sizeof(*request), 0, 0,
+					GFP_KERNEL);
+	if (!operation)
+		return -ENOMEM;
+
+	request = operation->request->payload;
+	request->cport_id = cpu_to_le16(cport_id);
+
+	ret = gb_operation_request_send_sync(operation);
+	if (ret) {
+		dev_err(&control->dev, "failed to send disconnecting: %d\n",
+				ret);
+	}
+
+	gb_operation_put(operation);
+
+	return ret;
+}
+
+int gb_control_mode_switch_operation(struct gb_control *control)
+{
+	struct gb_operation *operation;
+	int ret;
+
+	operation = gb_operation_create_core(control->connection,
+					GB_CONTROL_TYPE_MODE_SWITCH,
+					0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
+					GFP_KERNEL);
+	if (!operation)
+		return -ENOMEM;
+
+	ret = gb_operation_request_send_sync(operation);
+	if (ret)
+		dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
+
+	gb_operation_put(operation);
+
+	return ret;
+}
+
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+			       u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+	struct gb_control_timesync_enable_request request;
+
+	request.count = count;
+	request.frame_time = cpu_to_le64(frame_time);
+	request.strobe_delay = cpu_to_le32(strobe_delay);
+	request.refclk = cpu_to_le32(refclk);
+	return gb_operation_sync(control->connection,
+				 GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
+				 sizeof(request), NULL, 0);
+}
+
+int gb_control_timesync_disable(struct gb_control *control)
+{
+	return gb_operation_sync(control->connection,
+				 GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
+				 NULL, 0);
+}
+
+int gb_control_timesync_get_last_event(struct gb_control *control,
+				       u64 *frame_time)
+{
+	struct gb_control_timesync_get_last_event_response response;
+	int ret;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
+				NULL, 0, &response, sizeof(response));
+	if (!ret)
+		*frame_time = le64_to_cpu(response.frame_time);
+	return ret;
+}
+
+int gb_control_timesync_authoritative(struct gb_control *control,
+				      u64 *frame_time)
+{
+	struct gb_control_timesync_authoritative_request request;
+	int i;
+
+	for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+		request.frame_time[i] = cpu_to_le64(frame_time[i]);
+
+	return gb_operation_sync(control->connection,
+				 GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
+				 &request, sizeof(request),
+				 NULL, 0);
+}
+
+static int gb_control_bundle_pm_status_map(u8 status)
+{
+	switch (status) {
+	case GB_CONTROL_BUNDLE_PM_INVAL:
+		return -EINVAL;
+	case GB_CONTROL_BUNDLE_PM_BUSY:
+		return -EBUSY;
+	case GB_CONTROL_BUNDLE_PM_NA:
+		return -ENOMSG;
+	case GB_CONTROL_BUNDLE_PM_FAIL:
+	default:
+		return -EREMOTEIO;
+	}
+}
+
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
+{
+	struct gb_control_bundle_pm_request request;
+	struct gb_control_bundle_pm_response response;
+	int ret;
+
+	request.bundle_id = bundle_id;
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
+				sizeof(request), &response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
+			bundle_id, ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+		dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
+			bundle_id, response.status);
+		return gb_control_bundle_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
+{
+	struct gb_control_bundle_pm_request request;
+	struct gb_control_bundle_pm_response response;
+	int ret;
+
+	request.bundle_id = bundle_id;
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
+				sizeof(request), &response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
+			bundle_id, ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+		dev_err(&control->dev, "failed to resume bundle %u: %d\n",
+			bundle_id, response.status);
+		return gb_control_bundle_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
+{
+	struct gb_control_bundle_pm_request request;
+	struct gb_control_bundle_pm_response response;
+	int ret;
+
+	request.bundle_id = bundle_id;
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
+				sizeof(request), &response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev,
+			"failed to send bundle %u deactivate: %d\n", bundle_id,
+			ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+		dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
+			bundle_id, response.status);
+		return gb_control_bundle_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
+{
+	struct gb_control_bundle_pm_request request;
+	struct gb_control_bundle_pm_response response;
+	int ret;
+
+	if (!control->has_bundle_activate)
+		return 0;
+
+	request.bundle_id = bundle_id;
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
+				sizeof(request), &response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev,
+			"failed to send bundle %u activate: %d\n", bundle_id,
+			ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+		dev_err(&control->dev, "failed to activate bundle %u: %d\n",
+			bundle_id, response.status);
+		return gb_control_bundle_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+static int gb_control_interface_pm_status_map(u8 status)
+{
+	switch (status) {
+	case GB_CONTROL_INTF_PM_BUSY:
+		return -EBUSY;
+	case GB_CONTROL_INTF_PM_NA:
+		return -ENOMSG;
+	default:
+		return -EREMOTEIO;
+	}
+}
+
+int gb_control_interface_suspend_prepare(struct gb_control *control)
+{
+	struct gb_control_intf_pm_response response;
+	int ret;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
+				&response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev,
+			"failed to send interface suspend prepare: %d\n", ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_INTF_PM_OK) {
+		dev_err(&control->dev, "interface error while preparing suspend: %d\n",
+			response.status);
+		return gb_control_interface_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+int gb_control_interface_deactivate_prepare(struct gb_control *control)
+{
+	struct gb_control_intf_pm_response response;
+	int ret;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
+				0, &response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
+			ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_INTF_PM_OK) {
+		dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
+			response.status);
+		return gb_control_interface_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+int gb_control_interface_hibernate_abort(struct gb_control *control)
+{
+	struct gb_control_intf_pm_response response;
+	int ret;
+
+	ret = gb_operation_sync(control->connection,
+				GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
+				&response, sizeof(response));
+	if (ret) {
+		dev_err(&control->dev,
+			"failed to send interface aborting hibernate: %d\n",
+			ret);
+		return ret;
+	}
+
+	if (response.status != GB_CONTROL_INTF_PM_OK) {
+		dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
+			response.status);
+		return gb_control_interface_pm_status_map(response.status);
+	}
+
+	return 0;
+}
+
+static ssize_t vendor_string_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct gb_control *control = to_gb_control(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
+}
+static DEVICE_ATTR_RO(vendor_string);
+
+static ssize_t product_string_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct gb_control *control = to_gb_control(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
+}
+static DEVICE_ATTR_RO(product_string);
+
+static struct attribute *control_attrs[] = {
+	&dev_attr_vendor_string.attr,
+	&dev_attr_product_string.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(control);
+
+static void gb_control_release(struct device *dev)
+{
+	struct gb_control *control = to_gb_control(dev);
+
+	gb_connection_destroy(control->connection);
+
+	kfree(control->vendor_string);
+	kfree(control->product_string);
+
+	kfree(control);
+}
+
+struct device_type greybus_control_type = {
+	.name =		"greybus_control",
+	.release =	gb_control_release,
+};
+
+struct gb_control *gb_control_create(struct gb_interface *intf)
+{
+	struct gb_connection *connection;
+	struct gb_control *control;
+
+	control = kzalloc(sizeof(*control), GFP_KERNEL);
+	if (!control)
+		return ERR_PTR(-ENOMEM);
+
+	control->intf = intf;
+
+	connection = gb_connection_create_control(intf);
+	if (IS_ERR(connection)) {
+		dev_err(&intf->dev,
+				"failed to create control connection: %ld\n",
+				PTR_ERR(connection));
+		kfree(control);
+		return ERR_CAST(connection);
+	}
+
+	control->connection = connection;
+
+	control->dev.parent = &intf->dev;
+	control->dev.bus = &greybus_bus_type;
+	control->dev.type = &greybus_control_type;
+	control->dev.groups = control_groups;
+	control->dev.dma_mask = intf->dev.dma_mask;
+	device_initialize(&control->dev);
+	dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
+
+	gb_connection_set_data(control->connection, control);
+
+	return control;
+}
+
+int gb_control_enable(struct gb_control *control)
+{
+	int ret;
+
+	dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+	ret = gb_connection_enable_tx(control->connection);
+	if (ret) {
+		dev_err(&control->connection->intf->dev,
+				"failed to enable control connection: %d\n",
+				ret);
+		return ret;
+	}
+
+	ret = gb_control_get_version(control);
+	if (ret)
+		goto err_disable_connection;
+
+	if (control->protocol_major > 0 || control->protocol_minor > 1)
+		control->has_bundle_version = true;
+
+	/* FIXME: use protocol version instead */
+	if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
+		control->has_bundle_activate = true;
+
+	return 0;
+
+err_disable_connection:
+	gb_connection_disable(control->connection);
+
+	return ret;
+}
+
+void gb_control_disable(struct gb_control *control)
+{
+	dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+	if (control->intf->disconnected)
+		gb_connection_disable_forced(control->connection);
+	else
+		gb_connection_disable(control->connection);
+}
+
+int gb_control_suspend(struct gb_control *control)
+{
+	gb_connection_disable(control->connection);
+
+	return 0;
+}
+
+int gb_control_resume(struct gb_control *control)
+{
+	int ret;
+
+	ret = gb_connection_enable_tx(control->connection);
+	if (ret) {
+		dev_err(&control->connection->intf->dev,
+			"failed to enable control connection: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int gb_control_add(struct gb_control *control)
+{
+	int ret;
+
+	ret = device_add(&control->dev);
+	if (ret) {
+		dev_err(&control->dev,
+				"failed to register control device: %d\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void gb_control_del(struct gb_control *control)
+{
+	if (device_is_registered(&control->dev))
+		device_del(&control->dev);
+}
+
+struct gb_control *gb_control_get(struct gb_control *control)
+{
+	get_device(&control->dev);
+
+	return control;
+}
+
+void gb_control_put(struct gb_control *control)
+{
+	put_device(&control->dev);
+}
+
+void gb_control_mode_switch_prepare(struct gb_control *control)
+{
+	gb_connection_mode_switch_prepare(control->connection);
+}
+
+void gb_control_mode_switch_complete(struct gb_control *control)
+{
+	gb_connection_mode_switch_complete(control->connection);
+}
--- /dev/null
+++ b/drivers/greybus/control.h
@@ -0,0 +1,65 @@
+/*
+ * Greybus CPort control protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONTROL_H
+#define __CONTROL_H
+
+struct gb_control {
+	struct device dev;
+	struct gb_interface *intf;
+
+	struct gb_connection *connection;
+
+	u8 protocol_major;
+	u8 protocol_minor;
+
+	bool has_bundle_activate;
+	bool has_bundle_version;
+
+	char *vendor_string;
+	char *product_string;
+};
+#define to_gb_control(d) container_of(d, struct gb_control, dev)
+
+struct gb_control *gb_control_create(struct gb_interface *intf);
+int gb_control_enable(struct gb_control *control);
+void gb_control_disable(struct gb_control *control);
+int gb_control_suspend(struct gb_control *control);
+int gb_control_resume(struct gb_control *control);
+int gb_control_add(struct gb_control *control);
+void gb_control_del(struct gb_control *control);
+struct gb_control *gb_control_get(struct gb_control *control);
+void gb_control_put(struct gb_control *control);
+
+int gb_control_get_bundle_versions(struct gb_control *control);
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnecting_operation(struct gb_control *control,
+					u16 cport_id);
+int gb_control_mode_switch_operation(struct gb_control *control);
+void gb_control_mode_switch_prepare(struct gb_control *control);
+void gb_control_mode_switch_complete(struct gb_control *control);
+int gb_control_get_manifest_size_operation(struct gb_interface *intf);
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+				      size_t size);
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+			       u64 frame_time, u32 strobe_delay, u32 refclk);
+int gb_control_timesync_disable(struct gb_control *control);
+int gb_control_timesync_get_last_event(struct gb_control *control,
+				       u64 *frame_time);
+int gb_control_timesync_authoritative(struct gb_control *control,
+				      u64 *frame_time);
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
+int gb_control_interface_suspend_prepare(struct gb_control *control);
+int gb_control_interface_deactivate_prepare(struct gb_control *control);
+int gb_control_interface_hibernate_abort(struct gb_control *control);
+#endif /* __CONTROL_H */
--- /dev/null
+++ b/drivers/greybus/core.c
@@ -0,0 +1,361 @@
+/*
+ * Greybus "Core"
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define CREATE_TRACE_POINTS
+#include "greybus.h"
+#include "greybus_trace.h"
+
+#define GB_BUNDLE_AUTOSUSPEND_MS	3000
+
+/* Allow greybus to be disabled at boot if needed */
+static bool nogreybus;
+#ifdef MODULE
+module_param(nogreybus, bool, 0444);
+#else
+core_param(nogreybus, nogreybus, bool, 0444);
+#endif
+int greybus_disabled(void)
+{
+	return nogreybus;
+}
+EXPORT_SYMBOL_GPL(greybus_disabled);
+
+static bool greybus_match_one_id(struct gb_bundle *bundle,
+				     const struct greybus_bundle_id *id)
+{
+	if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
+	    (id->vendor != bundle->intf->vendor_id))
+		return false;
+
+	if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
+	    (id->product != bundle->intf->product_id))
+		return false;
+
+	if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
+	    (id->class != bundle->class))
+		return false;
+
+	return true;
+}
+
+static const struct greybus_bundle_id *
+greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
+{
+	if (id == NULL)
+		return NULL;
+
+	for (; id->vendor || id->product || id->class || id->driver_info;
+									id++) {
+		if (greybus_match_one_id(bundle, id))
+			return id;
+	}
+
+	return NULL;
+}
+
+static int greybus_match_device(struct device *dev, struct device_driver *drv)
+{
+	struct greybus_driver *driver = to_greybus_driver(drv);
+	struct gb_bundle *bundle;
+	const struct greybus_bundle_id *id;
+
+	if (!is_gb_bundle(dev))
+		return 0;
+
+	bundle = to_gb_bundle(dev);
+
+	id = greybus_match_id(bundle, driver->id_table);
+	if (id)
+		return 1;
+	/* FIXME - Dynamic ids? */
+	return 0;
+}
+
+static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct gb_host_device *hd;
+	struct gb_module *module = NULL;
+	struct gb_interface *intf = NULL;
+	struct gb_control *control = NULL;
+	struct gb_bundle *bundle = NULL;
+	struct gb_svc *svc = NULL;
+
+	if (is_gb_host_device(dev)) {
+		hd = to_gb_host_device(dev);
+	} else if (is_gb_module(dev)) {
+		module = to_gb_module(dev);
+		hd = module->hd;
+	} else if (is_gb_interface(dev)) {
+		intf = to_gb_interface(dev);
+		module = intf->module;
+		hd = intf->hd;
+	} else if (is_gb_control(dev)) {
+		control = to_gb_control(dev);
+		intf = control->intf;
+		module = intf->module;
+		hd = intf->hd;
+	} else if (is_gb_bundle(dev)) {
+		bundle = to_gb_bundle(dev);
+		intf = bundle->intf;
+		module = intf->module;
+		hd = intf->hd;
+	} else if (is_gb_svc(dev)) {
+		svc = to_gb_svc(dev);
+		hd = svc->hd;
+	} else {
+		dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
+		return -EINVAL;
+	}
+
+	if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+		return -ENOMEM;
+
+	if (module) {
+		if (add_uevent_var(env, "MODULE=%u", module->module_id))
+			return -ENOMEM;
+	}
+
+	if (intf) {
+		if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+			return -ENOMEM;
+		if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+				   intf->vendor_id, intf->product_id))
+			return -ENOMEM;
+	}
+
+	if (bundle) {
+		// FIXME
+		// add a uevent that can "load" a bundle type
+		// This is what we need to bind a driver to so use the info
+		// in gmod here as well
+
+		if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
+			return -ENOMEM;
+		if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void greybus_shutdown(struct device *dev)
+{
+	if (is_gb_host_device(dev)) {
+		struct gb_host_device *hd;
+
+		hd = to_gb_host_device(dev);
+		gb_hd_shutdown(hd);
+	}
+}
+
+struct bus_type greybus_bus_type = {
+	.name =		"greybus",
+	.match =	greybus_match_device,
+	.uevent =	greybus_uevent,
+	.shutdown =	greybus_shutdown,
+};
+
+static int greybus_probe(struct device *dev)
+{
+	struct greybus_driver *driver = to_greybus_driver(dev->driver);
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+	const struct greybus_bundle_id *id;
+	int retval;
+
+	/* match id */
+	id = greybus_match_id(bundle, driver->id_table);
+	if (!id)
+		return -ENODEV;
+
+	retval = pm_runtime_get_sync(&bundle->intf->dev);
+	if (retval < 0) {
+		pm_runtime_put_noidle(&bundle->intf->dev);
+		return retval;
+	}
+
+	retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
+	if (retval) {
+		pm_runtime_put(&bundle->intf->dev);
+		return retval;
+	}
+
+	/*
+	 * Unbound bundle devices are always deactivated. During probe, the
+	 * Runtime PM is set to enabled and active and the usage count is
+	 * incremented. If the driver supports runtime PM, it should call
+	 * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
+	 * in remove routine.
+	 */
+	pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	retval = driver->probe(bundle, id);
+	if (retval) {
+		/*
+		 * Catch buggy drivers that fail to destroy their connections.
+		 */
+		WARN_ON(!list_empty(&bundle->connections));
+
+		gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+		pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_dont_use_autosuspend(dev);
+		pm_runtime_put(&bundle->intf->dev);
+
+		return retval;
+	}
+
+	gb_timesync_schedule_synchronous(bundle->intf);
+
+	pm_runtime_put(&bundle->intf->dev);
+
+	return 0;
+}
+
+static int greybus_remove(struct device *dev)
+{
+	struct greybus_driver *driver = to_greybus_driver(dev->driver);
+	struct gb_bundle *bundle = to_gb_bundle(dev);
+	struct gb_connection *connection;
+	int retval;
+
+	retval = pm_runtime_get_sync(dev);
+	if (retval < 0)
+		dev_err(dev, "failed to resume bundle: %d\n", retval);
+
+	/*
+	 * Disable (non-offloaded) connections early in case the interface is
+	 * already gone to avoid unceccessary operation timeouts during
+	 * driver disconnect. Otherwise, only disable incoming requests.
+	 */
+	list_for_each_entry(connection, &bundle->connections, bundle_links) {
+		if (gb_connection_is_offloaded(connection))
+			continue;
+
+		if (bundle->intf->disconnected)
+			gb_connection_disable_forced(connection);
+		else
+			gb_connection_disable_rx(connection);
+	}
+
+	driver->disconnect(bundle);
+
+	/* Catch buggy drivers that fail to destroy their connections. */
+	WARN_ON(!list_empty(&bundle->connections));
+
+	if (!bundle->intf->disconnected)
+		gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_dont_use_autosuspend(dev);
+	pm_runtime_put_noidle(dev);
+
+	return 0;
+}
+
+int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
+		const char *mod_name)
+{
+	int retval;
+
+	if (greybus_disabled())
+		return -ENODEV;
+
+	driver->driver.bus = &greybus_bus_type;
+	driver->driver.name = driver->name;
+	driver->driver.probe = greybus_probe;
+	driver->driver.remove = greybus_remove;
+	driver->driver.owner = owner;
+	driver->driver.mod_name = mod_name;
+
+	retval = driver_register(&driver->driver);
+	if (retval)
+		return retval;
+
+	pr_info("registered new driver %s\n", driver->name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(greybus_register_driver);
+
+void greybus_deregister_driver(struct greybus_driver *driver)
+{
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(greybus_deregister_driver);
+
+static int __init gb_init(void)
+{
+	int retval;
+
+	if (greybus_disabled())
+		return -ENODEV;
+
+	BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
+
+	gb_debugfs_init();
+
+	retval = bus_register(&greybus_bus_type);
+	if (retval) {
+		pr_err("bus_register failed (%d)\n", retval);
+		goto error_bus;
+	}
+
+	retval = gb_hd_init();
+	if (retval) {
+		pr_err("gb_hd_init failed (%d)\n", retval);
+		goto error_hd;
+	}
+
+	retval = gb_operation_init();
+	if (retval) {
+		pr_err("gb_operation_init failed (%d)\n", retval);
+		goto error_operation;
+	}
+
+	retval = gb_timesync_init();
+	if (retval) {
+		pr_err("gb_timesync_init failed\n");
+		goto error_timesync;
+	}
+	return 0;	/* Success */
+
+error_timesync:
+	gb_operation_exit();
+error_operation:
+	gb_hd_exit();
+error_hd:
+	bus_unregister(&greybus_bus_type);
+error_bus:
+	gb_debugfs_cleanup();
+
+	return retval;
+}
+module_init(gb_init);
+
+static void __exit gb_exit(void)
+{
+	gb_timesync_exit();
+	gb_operation_exit();
+	gb_hd_exit();
+	bus_unregister(&greybus_bus_type);
+	gb_debugfs_cleanup();
+	tracepoint_synchronize_unregister();
+}
+module_exit(gb_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@...uxfoundation.org>");
--- /dev/null
+++ b/drivers/greybus/debugfs.c
@@ -0,0 +1,31 @@
+/*
+ * Greybus debugfs code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+
+#include "greybus.h"
+
+static struct dentry *gb_debug_root;
+
+void __init gb_debugfs_init(void)
+{
+	gb_debug_root = debugfs_create_dir("greybus", NULL);
+}
+
+void gb_debugfs_cleanup(void)
+{
+	debugfs_remove_recursive(gb_debug_root);
+	gb_debug_root = NULL;
+}
+
+struct dentry *gb_debugfs_get(void)
+{
+	return gb_debug_root;
+}
+EXPORT_SYMBOL_GPL(gb_debugfs_get);
--- /dev/null
+++ b/drivers/greybus/greybus.h
@@ -0,0 +1,154 @@
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __LINUX_GREYBUS_H
+#define __LINUX_GREYBUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include "greybus_id.h"
+#include "greybus_manifest.h"
+#include "greybus_protocols.h"
+#include "manifest.h"
+#include "hd.h"
+#include "svc.h"
+#include "control.h"
+#include "module.h"
+#include "interface.h"
+#include "bundle.h"
+#include "connection.h"
+#include "operation.h"
+#include "timesync.h"
+
+/* Matches up with the Greybus Protocol specification document */
+#define GREYBUS_VERSION_MAJOR	0x00
+#define GREYBUS_VERSION_MINOR	0x01
+
+#define GREYBUS_ID_MATCH_DEVICE \
+	(GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
+
+#define GREYBUS_DEVICE(v, p)					\
+	.match_flags	= GREYBUS_ID_MATCH_DEVICE,		\
+	.vendor		= (v),					\
+	.product	= (p),
+
+#define GREYBUS_DEVICE_CLASS(c)					\
+	.match_flags	= GREYBUS_ID_MATCH_CLASS,		\
+	.class		= (c),
+
+/* Maximum number of CPorts */
+#define CPORT_ID_MAX	4095		/* UniPro max id is 4095 */
+#define CPORT_ID_BAD	U16_MAX
+
+struct greybus_driver {
+	const char *name;
+
+	int (*probe)(struct gb_bundle *bundle,
+		     const struct greybus_bundle_id *id);
+	void (*disconnect)(struct gb_bundle *bundle);
+
+	const struct greybus_bundle_id *id_table;
+
+	struct device_driver driver;
+};
+#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+
+static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
+{
+	dev_set_drvdata(&bundle->dev, data);
+}
+
+static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
+{
+	return dev_get_drvdata(&bundle->dev);
+}
+
+/* Don't call these directly, use the module_greybus_driver() macro instead */
+int greybus_register_driver(struct greybus_driver *driver,
+			    struct module *module, const char *mod_name);
+void greybus_deregister_driver(struct greybus_driver *driver);
+
+/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
+#define greybus_register(driver) \
+	greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define greybus_deregister(driver) \
+	greybus_deregister_driver(driver)
+
+/**
+ * module_greybus_driver() - Helper macro for registering a Greybus driver
+ * @__greybus_driver: greybus_driver structure
+ *
+ * Helper macro for Greybus drivers to set up proper module init / exit
+ * functions.  Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_greybus_driver(__greybus_driver)	\
+	module_driver(__greybus_driver, greybus_register, greybus_deregister)
+
+int greybus_disabled(void);
+
+void gb_debugfs_init(void);
+void gb_debugfs_cleanup(void);
+struct dentry *gb_debugfs_get(void);
+
+extern struct bus_type greybus_bus_type;
+
+extern struct device_type greybus_hd_type;
+extern struct device_type greybus_module_type;
+extern struct device_type greybus_interface_type;
+extern struct device_type greybus_control_type;
+extern struct device_type greybus_bundle_type;
+extern struct device_type greybus_svc_type;
+
+static inline int is_gb_host_device(const struct device *dev)
+{
+	return dev->type == &greybus_hd_type;
+}
+
+static inline int is_gb_module(const struct device *dev)
+{
+	return dev->type == &greybus_module_type;
+}
+
+static inline int is_gb_interface(const struct device *dev)
+{
+	return dev->type == &greybus_interface_type;
+}
+
+static inline int is_gb_control(const struct device *dev)
+{
+	return dev->type == &greybus_control_type;
+}
+
+static inline int is_gb_bundle(const struct device *dev)
+{
+	return dev->type == &greybus_bundle_type;
+}
+
+static inline int is_gb_svc(const struct device *dev)
+{
+	return dev->type == &greybus_svc_type;
+}
+
+static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
+{
+	return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_GREYBUS_H */
--- /dev/null
+++ b/drivers/greybus/greybus_authentication.h
@@ -0,0 +1,120 @@
+/*
+ * Greybus Component Authentication User Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_AUTHENTICATION_USER_H
+#define __GREYBUS_AUTHENTICATION_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define CAP_CERTIFICATE_MAX_SIZE	1600
+#define CAP_SIGNATURE_MAX_SIZE		320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC		0x00000001
+#define CAP_CERT_IMS_EASC		0x00000002
+#define CAP_CERT_IMS_EARC		0x00000003
+#define CAP_CERT_IMS_IAPC		0x00000004
+#define CAP_CERT_IMS_IASC		0x00000005
+#define CAP_CERT_IMS_IARC		0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND	0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL	0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT	0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND	0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI		0x00000001
+#define CAP_AUTH_IMS_SEC		0x00000002
+#define CAP_AUTH_IMS_RSA		0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS	0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE	0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP	0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY	0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL	0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+	__u8			uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+	__u32			certificate_class;
+	__u32			certificate_id;
+
+	__u8			result_code;
+	__u32			cert_size;
+	__u8			certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+	__u32			auth_type;
+	__u8			uid[8];
+	__u8			challenge[32];
+
+	__u8			result_code;
+	__u8			response[64];
+	__u32			signature_size;
+	__u8			signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE			'C'
+#define CAP_IOC_GET_ENDPOINT_UID	_IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE	_IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE		_IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+#endif /* __GREYBUS_AUTHENTICATION_USER_H */
--- /dev/null
+++ b/drivers/greybus/greybus_id.h
@@ -0,0 +1,26 @@
+/* FIXME
+ * move this to include/linux/mod_devicetable.h when merging
+ */
+
+#ifndef __LINUX_GREYBUS_ID_H
+#define __LINUX_GREYBUS_ID_H
+
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+
+struct greybus_bundle_id {
+	__u16	match_flags;
+	__u32	vendor;
+	__u32	product;
+	__u8	class;
+
+	kernel_ulong_t	driver_info __aligned(sizeof(kernel_ulong_t));
+};
+
+/* Used to match the greybus_bundle_id */
+#define GREYBUS_ID_MATCH_VENDOR		BIT(0)
+#define GREYBUS_ID_MATCH_PRODUCT	BIT(1)
+#define GREYBUS_ID_MATCH_CLASS		BIT(2)
+
+#endif /* __LINUX_GREYBUS_ID_H */
--- /dev/null
+++ b/drivers/greybus/greybus_manifest.h
@@ -0,0 +1,177 @@
+/*
+ * Greybus manifest definition
+ *
+ * See "Greybus Application Protocol" document (version 0.1) for
+ * details on these values and structures.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 and BSD licenses.
+ */
+
+#ifndef __GREYBUS_MANIFEST_H
+#define __GREYBUS_MANIFEST_H
+
+enum greybus_descriptor_type {
+	GREYBUS_TYPE_INVALID		= 0x00,
+	GREYBUS_TYPE_INTERFACE		= 0x01,
+	GREYBUS_TYPE_STRING		= 0x02,
+	GREYBUS_TYPE_BUNDLE		= 0x03,
+	GREYBUS_TYPE_CPORT		= 0x04,
+};
+
+enum greybus_protocol {
+	GREYBUS_PROTOCOL_CONTROL	= 0x00,
+	/* 0x01 is unused */
+	GREYBUS_PROTOCOL_GPIO		= 0x02,
+	GREYBUS_PROTOCOL_I2C		= 0x03,
+	GREYBUS_PROTOCOL_UART		= 0x04,
+	GREYBUS_PROTOCOL_HID		= 0x05,
+	GREYBUS_PROTOCOL_USB		= 0x06,
+	GREYBUS_PROTOCOL_SDIO		= 0x07,
+	GREYBUS_PROTOCOL_POWER_SUPPLY	= 0x08,
+	GREYBUS_PROTOCOL_PWM		= 0x09,
+	/* 0x0a is unused */
+	GREYBUS_PROTOCOL_SPI		= 0x0b,
+	GREYBUS_PROTOCOL_DISPLAY	= 0x0c,
+	GREYBUS_PROTOCOL_CAMERA_MGMT	= 0x0d,
+	GREYBUS_PROTOCOL_SENSOR		= 0x0e,
+	GREYBUS_PROTOCOL_LIGHTS		= 0x0f,
+	GREYBUS_PROTOCOL_VIBRATOR	= 0x10,
+	GREYBUS_PROTOCOL_LOOPBACK	= 0x11,
+	GREYBUS_PROTOCOL_AUDIO_MGMT	= 0x12,
+	GREYBUS_PROTOCOL_AUDIO_DATA	= 0x13,
+	GREYBUS_PROTOCOL_SVC            = 0x14,
+	GREYBUS_PROTOCOL_BOOTROM	= 0x15,
+	GREYBUS_PROTOCOL_CAMERA_DATA	= 0x16,
+	GREYBUS_PROTOCOL_FW_DOWNLOAD	= 0x17,
+	GREYBUS_PROTOCOL_FW_MANAGEMENT	= 0x18,
+	GREYBUS_PROTOCOL_AUTHENTICATION	= 0x19,
+	GREYBUS_PROTOCOL_LOG		= 0x1a,
+		/* ... */
+	GREYBUS_PROTOCOL_RAW		= 0xfe,
+	GREYBUS_PROTOCOL_VENDOR		= 0xff,
+};
+
+enum greybus_class_type {
+	GREYBUS_CLASS_CONTROL		= 0x00,
+	/* 0x01 is unused */
+	/* 0x02 is unused */
+	/* 0x03 is unused */
+	/* 0x04 is unused */
+	GREYBUS_CLASS_HID		= 0x05,
+	/* 0x06 is unused */
+	/* 0x07 is unused */
+	GREYBUS_CLASS_POWER_SUPPLY	= 0x08,
+	/* 0x09 is unused */
+	GREYBUS_CLASS_BRIDGED_PHY	= 0x0a,
+	/* 0x0b is unused */
+	GREYBUS_CLASS_DISPLAY		= 0x0c,
+	GREYBUS_CLASS_CAMERA		= 0x0d,
+	GREYBUS_CLASS_SENSOR		= 0x0e,
+	GREYBUS_CLASS_LIGHTS		= 0x0f,
+	GREYBUS_CLASS_VIBRATOR		= 0x10,
+	GREYBUS_CLASS_LOOPBACK		= 0x11,
+	GREYBUS_CLASS_AUDIO		= 0x12,
+	/* 0x13 is unused */
+	/* 0x14 is unused */
+	GREYBUS_CLASS_BOOTROM		= 0x15,
+	GREYBUS_CLASS_FW_MANAGEMENT	= 0x16,
+	GREYBUS_CLASS_LOG		= 0x17,
+		/* ... */
+	GREYBUS_CLASS_RAW		= 0xfe,
+	GREYBUS_CLASS_VENDOR		= 0xff,
+};
+
+enum {
+	GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
+};
+
+/*
+ * The string in a string descriptor is not NUL-terminated.  The
+ * size of the descriptor will be rounded up to a multiple of 4
+ * bytes, by padding the string with 0x00 bytes if necessary.
+ */
+struct greybus_descriptor_string {
+	__u8	length;
+	__u8	id;
+	__u8	string[0];
+} __packed;
+
+/*
+ * An interface descriptor describes information about an interface as a whole,
+ * *not* the functions within it.
+ */
+struct greybus_descriptor_interface {
+	__u8	vendor_stringid;
+	__u8	product_stringid;
+	__u8	features;
+	__u8	pad;
+} __packed;
+
+/*
+ * An bundle descriptor defines an identification number and a class for
+ * each bundle.
+ *
+ * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
+ * allow CPort descriptors to specify which bundle they are associated with.
+ * The first bundle will have id 0, second will have 1 and so on.
+ *
+ * The largest CPort id associated with an bundle (defined by a
+ * CPort descriptor in the manifest) is used to determine how to
+ * encode the device id and module number in UniPro packets
+ * that use the bundle.
+ *
+ * @class: It is used by kernel to know the functionality provided by the
+ * bundle and will be matched against drivers functinality while probing greybus
+ * driver. It should contain one of the values defined in
+ * 'enum greybus_class_type'.
+ *
+ */
+struct greybus_descriptor_bundle {
+	__u8	id;	/* interface-relative id (0..) */
+	__u8	class;
+	__u8	pad[2];
+} __packed;
+
+/*
+ * A CPort descriptor indicates the id of the bundle within the
+ * module it's associated with, along with the CPort id used to
+ * address the CPort.  The protocol id defines the format of messages
+ * exchanged using the CPort.
+ */
+struct greybus_descriptor_cport {
+	__le16	id;
+	__u8	bundle;
+	__u8	protocol_id;	/* enum greybus_protocol */
+} __packed;
+
+struct greybus_descriptor_header {
+	__le16	size;
+	__u8	type;		/* enum greybus_descriptor_type */
+	__u8	pad;
+} __packed;
+
+struct greybus_descriptor {
+	struct greybus_descriptor_header		header;
+	union {
+		struct greybus_descriptor_string	string;
+		struct greybus_descriptor_interface	interface;
+		struct greybus_descriptor_bundle	bundle;
+		struct greybus_descriptor_cport		cport;
+	};
+} __packed;
+
+struct greybus_manifest_header {
+	__le16	size;
+	__u8	version_major;
+	__u8	version_minor;
+} __packed;
+
+struct greybus_manifest {
+	struct greybus_manifest_header		header;
+	struct greybus_descriptor		descriptors[0];
+} __packed;
+
+#endif /* __GREYBUS_MANIFEST_H */
--- /dev/null
+++ b/drivers/greybus/manifest.c
@@ -0,0 +1,535 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+static const char *get_descriptor_type_string(u8 type)
+{
+	switch(type) {
+	case GREYBUS_TYPE_INVALID:
+		return "invalid";
+	case GREYBUS_TYPE_STRING:
+		return "string";
+	case GREYBUS_TYPE_INTERFACE:
+		return "interface";
+	case GREYBUS_TYPE_CPORT:
+		return "cport";
+	case GREYBUS_TYPE_BUNDLE:
+		return "bundle";
+	default:
+		WARN_ON(1);
+		return "unknown";
+	}
+}
+
+/*
+ * We scan the manifest once to identify where all the descriptors
+ * are.  The result is a list of these manifest_desc structures.  We
+ * then pick through them for what we're looking for (starting with
+ * the interface descriptor).  As each is processed we remove it from
+ * the list.  When we're done the list should (probably) be empty.
+ */
+struct manifest_desc {
+	struct list_head		links;
+
+	size_t				size;
+	void				*data;
+	enum greybus_descriptor_type	type;
+};
+
+static void release_manifest_descriptor(struct manifest_desc *descriptor)
+{
+	list_del(&descriptor->links);
+	kfree(descriptor);
+}
+
+static void release_manifest_descriptors(struct gb_interface *intf)
+{
+	struct manifest_desc *descriptor;
+	struct manifest_desc *next;
+
+	list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+		release_manifest_descriptor(descriptor);
+}
+
+static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
+{
+	struct manifest_desc *desc, *tmp;
+	struct greybus_descriptor_cport *desc_cport;
+
+	list_for_each_entry_safe(desc, tmp, head, links) {
+		desc_cport = desc->data;
+
+		if (desc->type != GREYBUS_TYPE_CPORT)
+			continue;
+
+		if (desc_cport->bundle == bundle_id)
+			release_manifest_descriptor(desc);
+	}
+}
+
+static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
+{
+	struct manifest_desc *descriptor;
+	struct manifest_desc *next;
+
+	list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+		if (descriptor->type == GREYBUS_TYPE_BUNDLE)
+			return descriptor;
+
+	return NULL;
+}
+
+/*
+ * Validate the given descriptor.  Its reported size must fit within
+ * the number of bytes remaining, and it must have a recognized
+ * type.  Check that the reported size is at least as big as what
+ * we expect to see.  (It could be bigger, perhaps for a new version
+ * of the format.)
+ *
+ * Returns the (non-zero) number of bytes consumed by the descriptor,
+ * or a negative errno.
+ */
+static int identify_descriptor(struct gb_interface *intf,
+			       struct greybus_descriptor *desc, size_t size)
+{
+	struct greybus_descriptor_header *desc_header = &desc->header;
+	struct manifest_desc *descriptor;
+	size_t desc_size;
+	size_t expected_size;
+
+	if (size < sizeof(*desc_header)) {
+		dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
+				size, sizeof(*desc_header));
+		return -EINVAL;		/* Must at least have header */
+	}
+
+	desc_size = le16_to_cpu(desc_header->size);
+	if (desc_size > size) {
+		dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
+				desc_size, size);
+		return -EINVAL;
+	}
+
+	/* Descriptor needs to at least have a header */
+	expected_size = sizeof(*desc_header);
+
+	switch (desc_header->type) {
+	case GREYBUS_TYPE_STRING:
+		expected_size += sizeof(struct greybus_descriptor_string);
+		expected_size += desc->string.length;
+
+		/* String descriptors are padded to 4 byte boundaries */
+		expected_size = ALIGN(expected_size, 4);
+		break;
+	case GREYBUS_TYPE_INTERFACE:
+		expected_size += sizeof(struct greybus_descriptor_interface);
+		break;
+	case GREYBUS_TYPE_BUNDLE:
+		expected_size += sizeof(struct greybus_descriptor_bundle);
+		break;
+	case GREYBUS_TYPE_CPORT:
+		expected_size += sizeof(struct greybus_descriptor_cport);
+		break;
+	case GREYBUS_TYPE_INVALID:
+	default:
+		dev_err(&intf->dev, "invalid descriptor type (%u)\n",
+				desc_header->type);
+		return -EINVAL;
+	}
+
+	if (desc_size < expected_size) {
+		dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
+				get_descriptor_type_string(desc_header->type),
+				desc_size, expected_size);
+		return -EINVAL;
+	}
+
+	/* Descriptor bigger than what we expect */
+	if (desc_size > expected_size) {
+		dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
+				get_descriptor_type_string(desc_header->type),
+				expected_size, desc_size);
+	}
+
+	descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+	if (!descriptor)
+		return -ENOMEM;
+
+	descriptor->size = desc_size;
+	descriptor->data = (char *)desc + sizeof(*desc_header);
+	descriptor->type = desc_header->type;
+	list_add_tail(&descriptor->links, &intf->manifest_descs);
+
+	/* desc_size is positive and is known to fit in a signed int */
+
+	return desc_size;
+}
+
+/*
+ * Find the string descriptor having the given id, validate it, and
+ * allocate a duplicate copy of it.  The duplicate has an extra byte
+ * which guarantees the returned string is NUL-terminated.
+ *
+ * String index 0 is valid (it represents "no string"), and for
+ * that a null pointer is returned.
+ *
+ * Otherwise returns a pointer to a newly-allocated copy of the
+ * descriptor string, or an error-coded pointer on failure.
+ */
+static char *gb_string_get(struct gb_interface *intf, u8 string_id)
+{
+	struct greybus_descriptor_string *desc_string;
+	struct manifest_desc *descriptor;
+	bool found = false;
+	char *string;
+
+	/* A zero string id means no string (but no error) */
+	if (!string_id)
+		return NULL;
+
+	list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+		if (descriptor->type != GREYBUS_TYPE_STRING)
+			continue;
+
+		desc_string = descriptor->data;
+		if (desc_string->id == string_id) {
+			found = true;
+			break;
+		}
+	}
+	if (!found)
+		return ERR_PTR(-ENOENT);
+
+	/* Allocate an extra byte so we can guarantee it's NUL-terminated */
+	string = kmemdup(&desc_string->string, desc_string->length + 1,
+				GFP_KERNEL);
+	if (!string)
+		return ERR_PTR(-ENOMEM);
+	string[desc_string->length] = '\0';
+
+	/* Ok we've used this string, so we're done with it */
+	release_manifest_descriptor(descriptor);
+
+	return string;
+}
+
+/*
+ * Find cport descriptors in the manifest associated with the given
+ * bundle, and set up data structures for the functions that use
+ * them.  Returns the number of cports set up for the bundle, or 0
+ * if there is an error.
+ */
+static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
+{
+	struct gb_interface *intf = bundle->intf;
+	struct greybus_descriptor_cport *desc_cport;
+	struct manifest_desc *desc, *next, *tmp;
+	LIST_HEAD(list);
+	u8 bundle_id = bundle->id;
+	u16 cport_id;
+	u32 count = 0;
+	int i;
+
+	/* Set up all cport descriptors associated with this bundle */
+	list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
+		if (desc->type != GREYBUS_TYPE_CPORT)
+			continue;
+
+		desc_cport = desc->data;
+		if (desc_cport->bundle != bundle_id)
+			continue;
+
+		cport_id = le16_to_cpu(desc_cport->id);
+		if (cport_id > CPORT_ID_MAX)
+			goto exit;
+
+		/* Nothing else should have its cport_id as control cport id */
+		if (cport_id == GB_CONTROL_CPORT_ID) {
+			dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
+				cport_id);
+			goto exit;
+		}
+
+		/*
+		 * Found one, move it to our temporary list after checking for
+		 * duplicates.
+		 */
+		list_for_each_entry(tmp, &list, links) {
+			desc_cport = tmp->data;
+			if (cport_id == le16_to_cpu(desc_cport->id)) {
+				dev_err(&bundle->dev,
+						"duplicate CPort %u found\n",
+						cport_id);
+				goto exit;
+			}
+		}
+		list_move_tail(&desc->links, &list);
+		count++;
+	}
+
+	if (!count)
+		return 0;
+
+	bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
+					GFP_KERNEL);
+	if (!bundle->cport_desc)
+		goto exit;
+
+	bundle->num_cports = count;
+
+	i = 0;
+	list_for_each_entry_safe(desc, next, &list, links) {
+		desc_cport = desc->data;
+		memcpy(&bundle->cport_desc[i++], desc_cport,
+				sizeof(*desc_cport));
+
+		/* Release the cport descriptor */
+		release_manifest_descriptor(desc);
+	}
+
+	return count;
+exit:
+	release_cport_descriptors(&list, bundle_id);
+	/*
+	 * Free all cports for this bundle to avoid 'excess descriptors'
+	 * warnings.
+	 */
+	release_cport_descriptors(&intf->manifest_descs, bundle_id);
+
+	return 0;	/* Error; count should also be 0 */
+}
+
+/*
+ * Find bundle descriptors in the manifest and set up their data
+ * structures.  Returns the number of bundles set up for the
+ * given interface.
+ */
+static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
+{
+	struct manifest_desc *desc;
+	struct gb_bundle *bundle;
+	struct gb_bundle *bundle_next;
+	u32 count = 0;
+	u8 bundle_id;
+	u8 class;
+
+	while ((desc = get_next_bundle_desc(intf))) {
+		struct greybus_descriptor_bundle *desc_bundle;
+
+		/* Found one.  Set up its bundle structure*/
+		desc_bundle = desc->data;
+		bundle_id = desc_bundle->id;
+		class = desc_bundle->class;
+
+		/* Done with this bundle descriptor */
+		release_manifest_descriptor(desc);
+
+		/* Ignore any legacy control bundles */
+		if (bundle_id == GB_CONTROL_BUNDLE_ID) {
+			dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
+					__func__);
+			release_cport_descriptors(&intf->manifest_descs,
+								bundle_id);
+			continue;
+		}
+
+		/* Nothing else should have its class set to control class */
+		if (class == GREYBUS_CLASS_CONTROL) {
+			dev_err(&intf->dev,
+				"bundle %u cannot use control class\n",
+				bundle_id);
+			goto cleanup;
+		}
+
+		bundle = gb_bundle_create(intf, bundle_id, class);
+		if (!bundle)
+			goto cleanup;
+
+		/*
+		 * Now go set up this bundle's functions and cports.
+		 *
+		 * A 'bundle' represents a device in greybus. It may require
+		 * multiple cports for its functioning. If we fail to setup any
+		 * cport of a bundle, we better reject the complete bundle as
+		 * the device may not be able to function properly then.
+		 *
+		 * But, failing to setup a cport of bundle X doesn't mean that
+		 * the device corresponding to bundle Y will not work properly.
+		 * Bundles should be treated as separate independent devices.
+		 *
+		 * While parsing manifest for an interface, treat bundles as
+		 * separate entities and don't reject entire interface and its
+		 * bundles on failing to initialize a cport. But make sure the
+		 * bundle which needs the cport, gets destroyed properly.
+		 */
+		if (!gb_manifest_parse_cports(bundle)) {
+			gb_bundle_destroy(bundle);
+			continue;
+		}
+
+		count++;
+	}
+
+	return count;
+cleanup:
+	/* An error occurred; undo any changes we've made */
+	list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
+		gb_bundle_destroy(bundle);
+		count--;
+	}
+	return 0;	/* Error; count should also be 0 */
+}
+
+static bool gb_manifest_parse_interface(struct gb_interface *intf,
+					struct manifest_desc *interface_desc)
+{
+	struct greybus_descriptor_interface *desc_intf = interface_desc->data;
+	struct gb_control *control = intf->control;
+	char *str;
+
+	/* Handle the strings first--they can fail */
+	str = gb_string_get(intf, desc_intf->vendor_stringid);
+	if (IS_ERR(str))
+		return false;
+	control->vendor_string = str;
+
+	str = gb_string_get(intf, desc_intf->product_stringid);
+	if (IS_ERR(str))
+		goto out_free_vendor_string;
+	control->product_string = str;
+
+	/* Assign feature flags communicated via manifest */
+	intf->features = desc_intf->features;
+
+	/* Release the interface descriptor, now that we're done with it */
+	release_manifest_descriptor(interface_desc);
+
+	/* An interface must have at least one bundle descriptor */
+	if (!gb_manifest_parse_bundles(intf)) {
+		dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
+		goto out_err;
+	}
+
+	return true;
+out_err:
+	kfree(control->product_string);
+	control->product_string = NULL;
+out_free_vendor_string:
+	kfree(control->vendor_string);
+	control->vendor_string = NULL;
+
+	return false;
+}
+
+/*
+ * Parse a buffer containing an interface manifest.
+ *
+ * If we find anything wrong with the content/format of the buffer
+ * we reject it.
+ *
+ * The first requirement is that the manifest's version is
+ * one we can parse.
+ *
+ * We make an initial pass through the buffer and identify all of
+ * the descriptors it contains, keeping track for each its type
+ * and the location size of its data in the buffer.
+ *
+ * Next we scan the descriptors, looking for an interface descriptor;
+ * there must be exactly one of those.  When found, we record the
+ * information it contains, and then remove that descriptor (and any
+ * string descriptors it refers to) from further consideration.
+ *
+ * After that we look for the interface's bundles--there must be at
+ * least one of those.
+ *
+ * Returns true if parsing was successful, false otherwise.
+ */
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
+{
+	struct greybus_manifest *manifest;
+	struct greybus_manifest_header *header;
+	struct greybus_descriptor *desc;
+	struct manifest_desc *descriptor;
+	struct manifest_desc *interface_desc = NULL;
+	u16 manifest_size;
+	u32 found = 0;
+	bool result;
+
+	/* Manifest descriptor list should be empty here */
+	if (WARN_ON(!list_empty(&intf->manifest_descs)))
+		return false;
+
+	/* we have to have at _least_ the manifest header */
+	if (size < sizeof(*header)) {
+		dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
+				size, sizeof(*header));
+		return false;
+	}
+
+	/* Make sure the size is right */
+	manifest = data;
+	header = &manifest->header;
+	manifest_size = le16_to_cpu(header->size);
+	if (manifest_size != size) {
+		dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
+				size, manifest_size);
+		return false;
+	}
+
+	/* Validate major/minor number */
+	if (header->version_major > GREYBUS_VERSION_MAJOR) {
+		dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
+				header->version_major, header->version_minor,
+				GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+		return false;
+	}
+
+	/* OK, find all the descriptors */
+	desc = manifest->descriptors;
+	size -= sizeof(*header);
+	while (size) {
+		int desc_size;
+
+		desc_size = identify_descriptor(intf, desc, size);
+		if (desc_size < 0) {
+			result = false;
+			goto out;
+		}
+		desc = (struct greybus_descriptor *)((char *)desc + desc_size);
+		size -= desc_size;
+	}
+
+	/* There must be a single interface descriptor */
+	list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+		if (descriptor->type == GREYBUS_TYPE_INTERFACE)
+			if (!found++)
+				interface_desc = descriptor;
+	}
+	if (found != 1) {
+		dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
+				found);
+		result = false;
+		goto out;
+	}
+
+	/* Parse the manifest, starting with the interface descriptor */
+	result = gb_manifest_parse_interface(intf, interface_desc);
+
+	/*
+	 * We really should have no remaining descriptors, but we
+	 * don't know what newer format manifests might leave.
+	 */
+	if (result && !list_empty(&intf->manifest_descs))
+		dev_info(&intf->dev, "excess descriptors in interface manifest\n");
+out:
+	release_manifest_descriptors(intf);
+
+	return result;
+}
--- /dev/null
+++ b/drivers/greybus/manifest.h
@@ -0,0 +1,16 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MANIFEST_H
+#define __MANIFEST_H
+
+struct gb_interface;
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
+
+#endif /* __MANIFEST_H */
--- /dev/null
+++ b/drivers/greybus/module.c
@@ -0,0 +1,238 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+static ssize_t eject_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	struct gb_module *module = to_gb_module(dev);
+	struct gb_interface *intf;
+	size_t i;
+	long val;
+	int ret;
+
+	ret = kstrtol(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	if (!val)
+		return len;
+
+	for (i = 0; i < module->num_interfaces; ++i) {
+		intf = module->interfaces[i];
+
+		mutex_lock(&intf->mutex);
+		/* Set flag to prevent concurrent activation. */
+		intf->ejected = true;
+		gb_interface_disable(intf);
+		gb_interface_deactivate(intf);
+		mutex_unlock(&intf->mutex);
+	}
+
+	/* Tell the SVC to eject the primary interface. */
+	ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
+	if (ret)
+		return ret;
+
+	return len;
+}
+static DEVICE_ATTR_WO(eject);
+
+static ssize_t module_id_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct gb_module *module = to_gb_module(dev);
+
+	return sprintf(buf, "%u\n", module->module_id);
+}
+static DEVICE_ATTR_RO(module_id);
+
+static ssize_t num_interfaces_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct gb_module *module = to_gb_module(dev);
+
+	return sprintf(buf, "%zu\n", module->num_interfaces);
+}
+static DEVICE_ATTR_RO(num_interfaces);
+
+static struct attribute *module_attrs[] = {
+	&dev_attr_eject.attr,
+	&dev_attr_module_id.attr,
+	&dev_attr_num_interfaces.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(module);
+
+static void gb_module_release(struct device *dev)
+{
+	struct gb_module *module = to_gb_module(dev);
+
+	trace_gb_module_release(module);
+
+	kfree(module);
+}
+
+struct device_type greybus_module_type = {
+	.name		= "greybus_module",
+	.release	= gb_module_release,
+};
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+					size_t num_interfaces)
+{
+	struct gb_interface *intf;
+	struct gb_module *module;
+	int i;
+
+	module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
+				GFP_KERNEL);
+	if (!module)
+		return NULL;
+
+	module->hd = hd;
+	module->module_id = module_id;
+	module->num_interfaces = num_interfaces;
+
+	module->dev.parent = &hd->dev;
+	module->dev.bus = &greybus_bus_type;
+	module->dev.type = &greybus_module_type;
+	module->dev.groups = module_groups;
+	module->dev.dma_mask = hd->dev.dma_mask;
+	device_initialize(&module->dev);
+	dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
+
+	trace_gb_module_create(module);
+
+	for (i = 0; i < num_interfaces; ++i) {
+		intf = gb_interface_create(module, module_id + i);
+		if (!intf) {
+			dev_err(&module->dev, "failed to create interface %u\n",
+					module_id + i);
+			goto err_put_interfaces;
+		}
+		module->interfaces[i] = intf;
+	}
+
+	return module;
+
+err_put_interfaces:
+	for (--i; i > 0; --i)
+		gb_interface_put(module->interfaces[i]);
+
+	put_device(&module->dev);
+
+	return NULL;
+}
+
+/*
+ * Register and enable an interface after first attempting to activate it.
+ */
+static void gb_module_register_interface(struct gb_interface *intf)
+{
+	struct gb_module *module = intf->module;
+	u8 intf_id = intf->interface_id;
+	int ret;
+
+	mutex_lock(&intf->mutex);
+
+	ret = gb_interface_activate(intf);
+	if (ret) {
+		if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
+			dev_err(&module->dev,
+					"failed to activate interface %u: %d\n",
+					intf_id, ret);
+		}
+
+		gb_interface_add(intf);
+		goto err_unlock;
+	}
+
+	ret = gb_interface_add(intf);
+	if (ret)
+		goto err_interface_deactivate;
+
+	ret = gb_interface_enable(intf);
+	if (ret) {
+		dev_err(&module->dev, "failed to enable interface %u: %d\n",
+				intf_id, ret);
+		goto err_interface_deactivate;
+	}
+
+	mutex_unlock(&intf->mutex);
+
+	return;
+
+err_interface_deactivate:
+	gb_interface_deactivate(intf);
+err_unlock:
+	mutex_unlock(&intf->mutex);
+}
+
+static void gb_module_deregister_interface(struct gb_interface *intf)
+{
+	/* Mark as disconnected to prevent I/O during disable. */
+	if (intf->module->disconnected)
+		intf->disconnected = true;
+
+	mutex_lock(&intf->mutex);
+	intf->removed = true;
+	gb_interface_disable(intf);
+	gb_interface_deactivate(intf);
+	mutex_unlock(&intf->mutex);
+
+	gb_interface_del(intf);
+}
+
+/* Register a module and its interfaces. */
+int gb_module_add(struct gb_module *module)
+{
+	size_t i;
+	int ret;
+
+	ret = device_add(&module->dev);
+	if (ret) {
+		dev_err(&module->dev, "failed to register module: %d\n", ret);
+		return ret;
+	}
+
+	trace_gb_module_add(module);
+
+	for (i = 0; i < module->num_interfaces; ++i)
+		gb_module_register_interface(module->interfaces[i]);
+
+	return 0;
+}
+
+/* Deregister a module and its interfaces. */
+void gb_module_del(struct gb_module *module)
+{
+	size_t i;
+
+	for (i = 0; i < module->num_interfaces; ++i)
+		gb_module_deregister_interface(module->interfaces[i]);
+
+	trace_gb_module_del(module);
+
+	device_del(&module->dev);
+}
+
+void gb_module_put(struct gb_module *module)
+{
+	size_t i;
+
+	for (i = 0; i < module->num_interfaces; ++i)
+		gb_interface_put(module->interfaces[i]);
+
+	put_device(&module->dev);
+}
--- /dev/null
+++ b/drivers/greybus/module.h
@@ -0,0 +1,34 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MODULE_H
+#define __MODULE_H
+
+struct gb_module {
+	struct device dev;
+	struct gb_host_device *hd;
+
+	struct list_head hd_node;
+
+	u8 module_id;
+	size_t num_interfaces;
+
+	bool disconnected;
+
+	struct gb_interface *interfaces[0];
+};
+#define to_gb_module(d) container_of(d, struct gb_module, dev)
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+				   size_t num_interfaces);
+int gb_module_add(struct gb_module *module);
+void gb_module_del(struct gb_module *module);
+void gb_module_put(struct gb_module *module);
+
+#endif /* __MODULE_H */


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ