[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191121173825.1527c3a5@suzdal.zaitcev.lan>
Date: Thu, 21 Nov 2019 17:38:25 -0600
From: Pete Zaitcev <zaitcev@...hat.com>
To: Alan Stern <stern@...land.harvard.edu>
Cc: syzbot <syzbot+56f9673bb4cdcbeb0e92@...kaller.appspotmail.com>,
<arnd@...db.de>, <gregkh@...uxfoundation.org>,
<jrdr.linux@...il.com>, <keescook@...omium.org>,
<kstewart@...uxfoundation.org>,
Kernel development list <linux-kernel@...r.kernel.org>,
USB list <linux-usb@...r.kernel.org>,
<syzkaller-bugs@...glegroups.com>, <tglx@...utronix.de>,
<viro@...iv.linux.org.uk>, zaitcev@...hat.com
Subject: Re: possible deadlock in mon_bin_vma_fault
On Thu, 21 Nov 2019 11:20:20 -0500 (EST)
Alan Stern <stern@...land.harvard.edu> wrote:
> On Thu, 21 Nov 2019, Pete Zaitcev wrote:
>
> > Anyway... If you are looking at it too, what do you think about not using
> > any locks in mon_bin_vma_fault() at all? Isn't it valid? I think I tried
> > to be "safe", but it only uses things that are constants unless we're
> > opening and closing; a process cannot make page faults unless it has
> > some thing mapped; and that is only possible if device is open and stays
> > open. Can you find a hole in this reasoning?
>
> I think you're right. [...]
How about the appended patch, then? You like?
Do you happen to know how to refer to a syzbot report in a commit message?
-- Pete
commit 628f3bbf37eee21cce4cfbfaa6a796b129d7736d
Author: Pete Zaitcev <zaitcev@...ori.zaitcev.us>
Date: Thu Nov 21 17:24:00 2019 -0600
usb: Fix a deadlock in usbmon between mmap and read
Signed-off-by: Pete Zaitcev <zaitcev@...hat.com>
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ac2b4fcc265f..fb7df9810bad 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
- mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
- kfree(rp->b_vec);
- rp->b_vec = vec;
- rp->b_size = size;
- rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
- rp->cnt_lost = 0;
+ if (rp->mmap_active) {
+ mon_free_buff(vec, size/CHUNK_SIZE);
+ kfree(vec);
+ ret = -EBUSY;
+ } else {
+ mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+ kfree(rp->b_vec);
+ rp->b_vec = vec;
+ rp->b_size = size;
+ rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
+ rp->cnt_lost = 0;
+ }
spin_unlock_irqrestore(&rp->b_lock, flags);
mutex_unlock(&rp->fetch_lock);
}
@@ -1093,11 +1099,11 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
return ret;
if (put_user(ret, &uptr->nfetch))
return -EFAULT;
- ret = 0;
}
break;
- case MON_IOCG_STATS: {
+ case MON_IOCG_STATS:
+ {
struct mon_bin_stats __user *sp;
unsigned int nevents;
unsigned int ndropped;
@@ -1113,7 +1119,6 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
return -EFAULT;
if (put_user(nevents, &sp->queued))
return -EFAULT;
-
}
break;
@@ -1216,13 +1221,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait)
static void mon_bin_vma_open(struct vm_area_struct *vma)
{
struct mon_reader_bin *rp = vma->vm_private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active++;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
static void mon_bin_vma_close(struct vm_area_struct *vma)
{
+ unsigned long flags;
+
struct mon_reader_bin *rp = vma->vm_private_data;
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active--;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
/*
@@ -1234,16 +1247,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
- mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size) {
- mutex_unlock(&rp->fetch_lock);
+ if (offset >= rp->b_size)
return VM_FAULT_SIGBUS;
- }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
- mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
Powered by blists - more mailing lists