lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160831175007-mutt-send-email-mst@kernel.org>
Date:   Wed, 31 Aug 2016 17:54:04 +0300
From:   "Michael S. Tsirkin" <mst@...hat.com>
To:     Namhyung Kim <namhyung@...nel.org>
Cc:     virtio-dev@...ts.oasis-open.org,
        virtualization@...ts.linux-foundation.org, kvm@...r.kernel.org,
        qemu-devel@...gnu.org, LKML <linux-kernel@...r.kernel.org>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        Anthony Liguori <aliguori@...zon.com>,
        Anton Vorontsov <anton@...msg.org>,
        Colin Cross <ccross@...roid.com>,
        Kees Cook <keescook@...omium.org>,
        Tony Luck <tony.luck@...el.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...nel.org>,
        Minchan Kim <minchan@...nel.org>,
        Will Deacon <will.deacon@....com>
Subject: Re: [PATCH 1/3] virtio: Basic implementation of virtio pstore driver

On Wed, Aug 31, 2016 at 05:08:00PM +0900, Namhyung Kim wrote:
> The virtio pstore driver provides interface to the pstore subsystem so
> that the guest kernel's log/dump message can be saved on the host
> machine.  Users can access the log file directly on the host, or on the
> guest at the next boot using pstore filesystem.  It currently deals with
> kernel log (printk) buffer only, but we can extend it to have other
> information (like ftrace dump) later.
> 
> It supports legacy PCI device using single order-2 page buffer.  It uses
> two virtqueues - one for (sync) read and another for (async) write.
> Since it cannot wait for write finished, it supports up to 128
> concurrent IO.  The buffer size is configurable now.
> 
> Cc: Paolo Bonzini <pbonzini@...hat.com>
> Cc: Radim Krčmář <rkrcmar@...hat.com>
> Cc: "Michael S. Tsirkin" <mst@...hat.com>
> Cc: Anthony Liguori <aliguori@...zon.com>
> Cc: Anton Vorontsov <anton@...msg.org>
> Cc: Colin Cross <ccross@...roid.com>
> Cc: Kees Cook <keescook@...omium.org>
> Cc: Tony Luck <tony.luck@...el.com>
> Cc: Steven Rostedt <rostedt@...dmis.org>
> Cc: Ingo Molnar <mingo@...nel.org>
> Cc: Minchan Kim <minchan@...nel.org>
> Cc: Will Deacon <will.deacon@....com>
> Cc: kvm@...r.kernel.org
> Cc: qemu-devel@...gnu.org
> Cc: virtualization@...ts.linux-foundation.org
> Cc: virtio-dev@...ts.oasis-open.org
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> ---
>  drivers/virtio/Kconfig             |  10 +
>  drivers/virtio/Makefile            |   1 +
>  drivers/virtio/virtio_pstore.c     | 417 +++++++++++++++++++++++++++++++++++++
>  include/uapi/linux/Kbuild          |   1 +
>  include/uapi/linux/virtio_ids.h    |   1 +
>  include/uapi/linux/virtio_pstore.h |  74 +++++++
>  6 files changed, 504 insertions(+)
>  create mode 100644 drivers/virtio/virtio_pstore.c
>  create mode 100644 include/uapi/linux/virtio_pstore.h
> 
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index 77590320d44c..8f0e6c796c12 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -58,6 +58,16 @@ config VIRTIO_INPUT
>  
>  	 If unsure, say M.
>  
> +config VIRTIO_PSTORE
> +	tristate "Virtio pstore driver"
> +	depends on VIRTIO
> +	depends on PSTORE
> +	---help---
> +	 This driver supports virtio pstore devices to save/restore
> +	 panic and oops messages on the host.
> +
> +	 If unsure, say M.
> +
>   config VIRTIO_MMIO
>  	tristate "Platform bus driver for memory mapped virtio devices"
>  	depends on HAS_IOMEM && HAS_DMA
> diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
> index 41e30e3dc842..bee68cb26d48 100644
> --- a/drivers/virtio/Makefile
> +++ b/drivers/virtio/Makefile
> @@ -5,3 +5,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
>  virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
>  obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
>  obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
> +obj-$(CONFIG_VIRTIO_PSTORE) += virtio_pstore.o
> diff --git a/drivers/virtio/virtio_pstore.c b/drivers/virtio/virtio_pstore.c
> new file mode 100644
> index 000000000000..ec41f0d2f0b7
> --- /dev/null
> +++ b/drivers/virtio/virtio_pstore.c
> @@ -0,0 +1,417 @@
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/pstore.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <uapi/linux/virtio_ids.h>
> +#include <uapi/linux/virtio_pstore.h>
> +
> +#define VIRT_PSTORE_ORDER    2
> +#define VIRT_PSTORE_BUFSIZE  (4096 << VIRT_PSTORE_ORDER)
> +#define VIRT_PSTORE_NR_REQ   128
> +
> +struct virtio_pstore {
> +	struct virtio_device	*vdev;
> +	struct virtqueue	*vq[2];
> +	struct pstore_info	 pstore;
> +	struct virtio_pstore_req req[VIRT_PSTORE_NR_REQ];
> +	struct virtio_pstore_res res[VIRT_PSTORE_NR_REQ];
> +	unsigned int		 req_id;
> +
> +	/* Waiting for host to ack */
> +	wait_queue_head_t	acked;
> +	int			failed;
> +};
> +
> +#define TYPE_TABLE_ENTRY(_entry)				\
> +	{ PSTORE_TYPE_##_entry, VIRTIO_PSTORE_TYPE_##_entry }
> +
> +struct type_table {
> +	int pstore;
> +	u16 virtio;
> +} type_table[] = {
> +	TYPE_TABLE_ENTRY(DMESG),
> +};
> +
> +#undef TYPE_TABLE_ENTRY
> +
> +
> +static u16 to_virtio_type(struct virtio_pstore *vps, enum pstore_type_id type)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(type_table); i++) {
> +		if (type == type_table[i].pstore)
> +			return cpu_to_virtio16(vps->vdev, type_table[i].virtio);

Does this pass sparse checks? If yes I'm surprised - this clearly
returns a virtio16 type.


> +	}
> +
> +	return cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_TYPE_UNKNOWN);
> +}
> +
> +static enum pstore_type_id from_virtio_type(struct virtio_pstore *vps, u16 type)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(type_table); i++) {
> +		if (virtio16_to_cpu(vps->vdev, type) == type_table[i].virtio)
> +			return type_table[i].pstore;
> +	}
> +
> +	return PSTORE_TYPE_UNKNOWN;
> +}
> +
> +static void virtpstore_ack(struct virtqueue *vq)
> +{
> +	struct virtio_pstore *vps = vq->vdev->priv;
> +
> +	wake_up(&vps->acked);
> +}
> +
> +static void virtpstore_check(struct virtqueue *vq)
> +{
> +	struct virtio_pstore *vps = vq->vdev->priv;
> +	struct virtio_pstore_res *res;
> +	unsigned int len;
> +
> +	res = virtqueue_get_buf(vq, &len);
> +	if (res == NULL)
> +		return;
> +
> +	if (virtio32_to_cpu(vq->vdev, res->ret) < 0)
> +		vps->failed = 1;
> +}
> +
> +static void virt_pstore_get_reqs(struct virtio_pstore *vps,
> +				 struct virtio_pstore_req **preq,
> +				 struct virtio_pstore_res **pres)
> +{
> +	unsigned int idx = vps->req_id++ % VIRT_PSTORE_NR_REQ;
> +
> +	*preq = &vps->req[idx];
> +	*pres = &vps->res[idx];
> +
> +	memset(*preq, 0, sizeof(**preq));
> +	memset(*pres, 0, sizeof(**pres));
> +}
> +
> +static int virt_pstore_open(struct pstore_info *psi)
> +{
> +	struct virtio_pstore *vps = psi->data;
> +	struct virtio_pstore_req *req;
> +	struct virtio_pstore_res *res;
> +	struct scatterlist sgo[1], sgi[1];
> +	struct scatterlist *sgs[2] = { sgo, sgi };
> +	unsigned int len;
> +
> +	virt_pstore_get_reqs(vps, &req, &res);
> +
> +	req->cmd = cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_CMD_OPEN);
> +
> +	sg_init_one(sgo, req, sizeof(*req));
> +	sg_init_one(sgi, res, sizeof(*res));
> +	virtqueue_add_sgs(vps->vq[0], sgs, 1, 1, vps, GFP_KERNEL);
> +	virtqueue_kick(vps->vq[0]);
> +
> +	wait_event(vps->acked, virtqueue_get_buf(vps->vq[0], &len));
> +	return virtio32_to_cpu(vps->vdev, res->ret);
> +}
> +
> +static int virt_pstore_close(struct pstore_info *psi)
> +{
> +	struct virtio_pstore *vps = psi->data;
> +	struct virtio_pstore_req *req = &vps->req[vps->req_id];
> +	struct virtio_pstore_res *res = &vps->res[vps->req_id];
> +	struct scatterlist sgo[1], sgi[1];
> +	struct scatterlist *sgs[2] = { sgo, sgi };
> +	unsigned int len;
> +
> +	virt_pstore_get_reqs(vps, &req, &res);
> +
> +	req->cmd = cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_CMD_CLOSE);
> +
> +	sg_init_one(sgo, req, sizeof(*req));
> +	sg_init_one(sgi, res, sizeof(*res));
> +	virtqueue_add_sgs(vps->vq[0], sgs, 1, 1, vps, GFP_KERNEL);
> +	virtqueue_kick(vps->vq[0]);
> +
> +	wait_event(vps->acked, virtqueue_get_buf(vps->vq[0], &len));
> +	return virtio32_to_cpu(vps->vdev, res->ret);
> +}
> +
> +static ssize_t virt_pstore_read(u64 *id, enum pstore_type_id *type,
> +				int *count, struct timespec *time,
> +				char **buf, bool *compressed,
> +				ssize_t *ecc_notice_size,
> +				struct pstore_info *psi)
> +{
> +	struct virtio_pstore *vps = psi->data;
> +	struct virtio_pstore_req *req;
> +	struct virtio_pstore_res *res;
> +	struct virtio_pstore_fileinfo info;
> +	struct scatterlist sgo[1], sgi[3];
> +	struct scatterlist *sgs[2] = { sgo, sgi };
> +	unsigned int len;
> +	unsigned int flags;
> +	int ret;
> +	void *bf;
> +
> +	virt_pstore_get_reqs(vps, &req, &res);
> +
> +	req->cmd = cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_CMD_READ);
> +
> +	sg_init_one(sgo, req, sizeof(*req));
> +	sg_init_table(sgi, 3);
> +	sg_set_buf(&sgi[0], res, sizeof(*res));
> +	sg_set_buf(&sgi[1], &info, sizeof(info));
> +	sg_set_buf(&sgi[2], psi->buf, psi->bufsize);
> +	virtqueue_add_sgs(vps->vq[0], sgs, 1, 1, vps, GFP_KERNEL);
> +	virtqueue_kick(vps->vq[0]);
> +
> +	wait_event(vps->acked, virtqueue_get_buf(vps->vq[0], &len));
> +	if (len < sizeof(*res) + sizeof(info))
> +		return -1;
> +
> +	ret = virtio32_to_cpu(vps->vdev, res->ret);
> +	if (ret < 0)
> +		return ret;
> +
> +	len = virtio32_to_cpu(vps->vdev, info.len);
> +
> +	bf = kmalloc(len, GFP_KERNEL);
> +	if (bf == NULL)
> +		return -ENOMEM;
> +
> +	*id    = virtio64_to_cpu(vps->vdev, info.id);
> +	*type  = from_virtio_type(vps, info.type);
> +	*count = virtio32_to_cpu(vps->vdev, info.count);
> +
> +	flags = virtio32_to_cpu(vps->vdev, info.flags);
> +	*compressed = flags & VIRTIO_PSTORE_FL_COMPRESSED;
> +
> +	time->tv_sec  = virtio64_to_cpu(vps->vdev, info.time_sec);
> +	time->tv_nsec = virtio32_to_cpu(vps->vdev, info.time_nsec);
> +
> +	memcpy(bf, psi->buf, len);
> +	*buf = bf;
> +
> +	return len;
> +}
> +
> +static int notrace virt_pstore_write(enum pstore_type_id type,
> +				     enum kmsg_dump_reason reason,
> +				     u64 *id, unsigned int part, int count,
> +				     bool compressed, size_t size,
> +				     struct pstore_info *psi)
> +{
> +	struct virtio_pstore *vps = psi->data;
> +	struct virtio_pstore_req *req;
> +	struct virtio_pstore_res *res;
> +	struct scatterlist sgo[2], sgi[1];
> +	struct scatterlist *sgs[2] = { sgo, sgi };
> +	unsigned int flags = compressed ? VIRTIO_PSTORE_FL_COMPRESSED : 0;
> +
> +	if (vps->failed)
> +		return -1;
> +
> +	*id = vps->req_id;
> +	virt_pstore_get_reqs(vps, &req, &res);
> +
> +	req->cmd   = cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_CMD_WRITE);
> +	req->type  = to_virtio_type(vps, type);
> +	req->flags = cpu_to_virtio32(vps->vdev, flags);
> +
> +	sg_init_table(sgo, 2);
> +	sg_set_buf(&sgo[0], req, sizeof(*req));
> +	sg_set_buf(&sgo[1], psi->buf, size);
> +	sg_init_one(sgi, res, sizeof(*res));
> +	virtqueue_add_sgs(vps->vq[1], sgs, 1, 1, vps, GFP_ATOMIC);
> +	virtqueue_kick(vps->vq[1]);
> +
> +	return 0;
> +}
> +
> +static int virt_pstore_erase(enum pstore_type_id type, u64 id, int count,
> +			     struct timespec time, struct pstore_info *psi)
> +{
> +	struct virtio_pstore *vps = psi->data;
> +	struct virtio_pstore_req *req;
> +	struct virtio_pstore_res *res;
> +	struct scatterlist sgo[1], sgi[1];
> +	struct scatterlist *sgs[2] = { sgo, sgi };
> +	unsigned int len;
> +
> +	virt_pstore_get_reqs(vps, &req, &res);
> +
> +	req->cmd   = cpu_to_virtio16(vps->vdev, VIRTIO_PSTORE_CMD_ERASE);
> +	req->type  = to_virtio_type(vps, type);
> +	req->id	   = cpu_to_virtio64(vps->vdev, id);
> +	req->count = cpu_to_virtio32(vps->vdev, count);
> +
> +	sg_init_one(sgo, req, sizeof(*req));
> +	sg_init_one(sgi, res, sizeof(*res));
> +	virtqueue_add_sgs(vps->vq[0], sgs, 1, 1, vps, GFP_KERNEL);
> +	virtqueue_kick(vps->vq[0]);
> +
> +	wait_event(vps->acked, virtqueue_get_buf(vps->vq[0], &len));
> +	return virtio32_to_cpu(vps->vdev, res->ret);
> +}
> +
> +static int virt_pstore_init(struct virtio_pstore *vps)
> +{
> +	struct pstore_info *psinfo = &vps->pstore;
> +	int err;
> +
> +	if (!psinfo->bufsize)
> +		psinfo->bufsize = VIRT_PSTORE_BUFSIZE;
> +
> +	psinfo->buf = alloc_pages_exact(psinfo->bufsize, GFP_KERNEL);
> +	if (!psinfo->buf) {
> +		pr_err("cannot allocate pstore buffer\n");
> +		return -ENOMEM;
> +	}
> +
> +	psinfo->owner = THIS_MODULE;
> +	psinfo->name  = "virtio";
> +	psinfo->open  = virt_pstore_open;
> +	psinfo->close = virt_pstore_close;
> +	psinfo->read  = virt_pstore_read;
> +	psinfo->erase = virt_pstore_erase;
> +	psinfo->write = virt_pstore_write;
> +	psinfo->flags = PSTORE_FLAGS_FRAGILE;
> +
> +	psinfo->data  = vps;
> +	spin_lock_init(&psinfo->buf_lock);
> +
> +	err = pstore_register(psinfo);
> +	if (err)
> +		kfree(psinfo->buf);
> +
> +	return err;
> +}
> +
> +static int virt_pstore_exit(struct virtio_pstore *vps)
> +{
> +	struct pstore_info *psinfo = &vps->pstore;
> +
> +	pstore_unregister(psinfo);
> +
> +	free_pages_exact(psinfo->buf, psinfo->bufsize);
> +	psinfo->buf = NULL;
> +	psinfo->bufsize = 0;
> +
> +	return 0;
> +}
> +
> +static int virtpstore_init_vqs(struct virtio_pstore *vps)
> +{
> +	vq_callback_t *callbacks[] = { virtpstore_ack, virtpstore_check };
> +	const char *names[] = { "pstore_read", "pstore_write" };
> +
> +	return vps->vdev->config->find_vqs(vps->vdev, 2, vps->vq,
> +					   callbacks, names);
> +}
> +
> +static void virtpstore_init_config(struct virtio_pstore *vps)
> +{
> +	u32 bufsize;
> +
> +	virtio_cread(vps->vdev, struct virtio_pstore_config, bufsize, &bufsize);
> +
> +	vps->pstore.bufsize = PAGE_ALIGN(bufsize);
> +}
> +
> +static void virtpstore_confirm_config(struct virtio_pstore *vps)
> +{
> +	u32 bufsize = vps->pstore.bufsize;
> +
> +	virtio_cwrite(vps->vdev, struct virtio_pstore_config, bufsize,
> +		     &bufsize);
> +}
> +
> +static int virtpstore_probe(struct virtio_device *vdev)
> +{
> +	struct virtio_pstore *vps;
> +	int err;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "driver init: config access disabled\n");
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vps = kzalloc(sizeof(*vps), GFP_KERNEL);
> +	if (!vps) {
> +		err = -ENOMEM;
> +		goto out;
> +	}
> +	vps->vdev = vdev;
> +
> +	err = virtpstore_init_vqs(vps);
> +	if (err < 0)
> +		goto out_free;
> +
> +	virtpstore_init_config(vps);
> +
> +	err = virt_pstore_init(vps);
> +	if (err)
> +		goto out_del_vq;
> +
> +	virtpstore_confirm_config(vps);
> +
> +	init_waitqueue_head(&vps->acked);
> +
> +	virtio_device_ready(vdev);
> +
> +	dev_info(&vdev->dev, "driver init: ok (bufsize = %luK, flags = %x)\n",
> +		 vps->pstore.bufsize >> 10, vps->pstore.flags);
> +
> +	return 0;
> +
> +out_del_vq:
> +	vdev->config->del_vqs(vdev);
> +out_free:
> +	kfree(vps);
> +out:
> +	dev_err(&vdev->dev, "driver init: failed with %d\n", err);
> +	return err;
> +}
> +
> +static void virtpstore_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pstore *vps = vdev->priv;
> +
> +	virt_pstore_exit(vps);
> +
> +	/* Now we reset the device so we can clean up the queues. */
> +	vdev->config->reset(vdev);
> +
> +	vdev->config->del_vqs(vdev);
> +
> +	kfree(vps);
> +}
> +
> +static unsigned int features[] = {
> +};
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PSTORE, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static struct virtio_driver virtio_pstore_driver = {
> +	.driver.name         = KBUILD_MODNAME,
> +	.driver.owner        = THIS_MODULE,
> +	.feature_table       = features,
> +	.feature_table_size  = ARRAY_SIZE(features),
> +	.id_table            = id_table,
> +	.probe               = virtpstore_probe,
> +	.remove              = virtpstore_remove,
> +};
> +
> +module_virtio_driver(virtio_pstore_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR("Namhyung Kim <namhyung@...nel.org>");
> +MODULE_DESCRIPTION("Virtio pstore driver");
> diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
> index 6d4e92ccdc91..9bbb1554d8b2 100644
> --- a/include/uapi/linux/Kbuild
> +++ b/include/uapi/linux/Kbuild
> @@ -449,6 +449,7 @@ header-y += virtio_ids.h
>  header-y += virtio_input.h
>  header-y += virtio_net.h
>  header-y += virtio_pci.h
> +header-y += virtio_pstore.h
>  header-y += virtio_ring.h
>  header-y += virtio_rng.h
>  header-y += virtio_scsi.h
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 77925f587b15..c72a9ab588c0 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -41,5 +41,6 @@
>  #define VIRTIO_ID_CAIF	       12 /* Virtio caif */
>  #define VIRTIO_ID_GPU          16 /* virtio GPU */
>  #define VIRTIO_ID_INPUT        18 /* virtio input */
> +#define VIRTIO_ID_PSTORE       22 /* virtio pstore */
>  
>  #endif /* _LINUX_VIRTIO_IDS_H */
> diff --git a/include/uapi/linux/virtio_pstore.h b/include/uapi/linux/virtio_pstore.h
> new file mode 100644
> index 000000000000..f4b0d204d8ae
> --- /dev/null
> +++ b/include/uapi/linux/virtio_pstore.h
> @@ -0,0 +1,74 @@
> +#ifndef _LINUX_VIRTIO_PSTORE_H
> +#define _LINUX_VIRTIO_PSTORE_H
> +/* This header is BSD licensed so anyone can use the definitions to implement
> + * compatible drivers/servers.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + * 3. Neither the name of IBM nor the names of its contributors
> + *    may be used to endorse or promote products derived from this software
> + *    without specific prior written permission.
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE. */
> +#include <linux/types.h>
> +#include <linux/virtio_types.h>
> +
> +#define VIRTIO_PSTORE_CMD_NULL   0
> +#define VIRTIO_PSTORE_CMD_OPEN   1
> +#define VIRTIO_PSTORE_CMD_READ   2
> +#define VIRTIO_PSTORE_CMD_WRITE  3
> +#define VIRTIO_PSTORE_CMD_ERASE  4
> +#define VIRTIO_PSTORE_CMD_CLOSE  5
> +
> +#define VIRTIO_PSTORE_TYPE_UNKNOWN  0
> +#define VIRTIO_PSTORE_TYPE_DMESG    1
> +
> +#define VIRTIO_PSTORE_FL_COMPRESSED  1
> +
> +struct virtio_pstore_req {
> +	__virtio16		cmd;
> +	__virtio16		type;
> +	__virtio32		flags;
> +	__virtio64		id;
> +	__virtio32		count;
> +	__virtio32		reserved;
> +};
> +
> +struct virtio_pstore_res {
> +	__virtio16		cmd;
> +	__virtio16		type;
> +	__virtio32		ret;
> +};

Is there a reason to support legacy endian-ness?
If not, you can just use __le formats.


> +struct virtio_pstore_fileinfo {
> +	__virtio64		id;
> +	__virtio32		count;
> +	__virtio16		type;
> +	__virtio16		unused;
> +	__virtio32		flags;
> +	__virtio32		len;
> +	__virtio64		time_sec;
> +	__virtio32		time_nsec;
> +	__virtio32		reserved;
> +};
> +
> +struct virtio_pstore_config {
> +	__virtio32		bufsize;
> +};
> +
> +#endif /* _LINUX_VIRTIO_PSTORE_H */
> -- 
> 2.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ