[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180209070511.GD2783@xz-mi>
Date: Fri, 9 Feb 2018 15:05:11 +0800
From: Peter Xu <peterx@...hat.com>
To: Alex Williamson <alex.williamson@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
qemu-devel@...gnu.org
Subject: Re: [Qemu-devel] [RFC PATCH] vfio/pci: Add ioeventfd support
On Tue, Feb 06, 2018 at 05:08:14PM -0700, Alex Williamson wrote:
[...]
> +long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
> + uint64_t data, int count, int fd)
> +{
> + struct pci_dev *pdev = vdev->pdev;
> + loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
> + int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
> + struct vfio_pci_ioeventfd *ioeventfd;
> + int (*handler)(void *, void *);
> + unsigned long val;
> +
> + /* Only support ioeventfds into BARs */
> + if (bar > VFIO_PCI_BAR5_REGION_INDEX)
> + return -EINVAL;
> +
> + if (pos + count > pci_resource_len(pdev, bar))
> + return -EINVAL;
> +
> + /* Disallow ioeventfds working around MSI-X table writes */
> + if (bar == vdev->msix_bar &&
> + !(pos + count <= vdev->msix_offset ||
> + pos >= vdev->msix_offset + vdev->msix_size))
> + return -EINVAL;
> +
> + switch (count) {
> + case 1:
> + handler = &vfio_pci_ioeventfd_handler8;
> + val = data;
> + break;
> + case 2:
> + handler = &vfio_pci_ioeventfd_handler16;
> + val = le16_to_cpu(data);
> + break;
> + case 4:
> + handler = &vfio_pci_ioeventfd_handler32;
> + val = le32_to_cpu(data);
> + break;
> +#ifdef iowrite64
> + case 8:
> + handler = &vfio_pci_ioeventfd_handler64;
> + val = le64_to_cpu(data);
> + break;
> +#endif
> + default:
> + return -EINVAL;
> + }
> +
> + ret = vfio_pci_setup_barmap(vdev, bar);
> + if (ret)
> + return ret;
> +
> + mutex_lock(&vdev->ioeventfds_lock);
> +
> + list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
> + if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
> + ioeventfd->data == data && ioeventfd->count == count) {
> + if (fd == -1) {
> + vfio_virqfd_disable(&ioeventfd->virqfd);
> + list_del(&ioeventfd->next);
> + kfree(ioeventfd);
> + ret = 0;
> + } else
> + ret = -EEXIST;
> +
> + goto out_unlock;
> + }
> + }
> +
> + if (fd < 0) {
> + ret = -ENODEV;
> + goto out_unlock;
> + }
> +
> + ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
> + if (!ioeventfd) {
> + ret = -ENOMEM;
> + goto out_unlock;
> + }
> +
> + ioeventfd->pos = pos;
> + ioeventfd->bar = bar;
> + ioeventfd->data = data;
> + ioeventfd->count = count;
> +
> + ret = vfio_virqfd_enable(vdev->barmap[ioeventfd->bar] + ioeventfd->pos,
> + handler, NULL, (void *)val,
> + &ioeventfd->virqfd, fd);
> + if (ret) {
> + kfree(ioeventfd);
> + goto out_unlock;
> + }
> +
> + list_add(&ioeventfd->next, &vdev->ioeventfds_list);
Is there a limit on how many ioeventfds that can be created?
IIUC we'll create this eventfd "automatically" if a MMIO addr/data
triggered continuously for N=10 times, then would it be safer we have
a limitation on maximum eventfds? Or not sure whether a malicious
guest can consume the host memory by sending:
- addr1/data1, 10 times
- addr2/data2, 10 times
- ...
To create unlimited ioeventfds? Thanks,
--
Peter Xu
Powered by blists - more mailing lists