lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACT4Y+b72eard_VXaFLa9xh-OZWj7Y5LHsoFP0jvxVyLVxjoZQ@mail.gmail.com>
Date:	Mon, 18 Jan 2016 20:28:04 +0100
From:	Dmitry Vyukov <dvyukov@...gle.com>
To:	"Kirill A. Shutemov" <kirill@...temov.name>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	David Drysdale <drysdale@...gle.com>,
	Kees Cook <keescook@...gle.com>,
	Quentin Casasnovas <quentin.casasnovas@...cle.com>,
	Sasha Levin <sasha.levin@...cle.com>,
	Vegard Nossum <vegard.nossum@...cle.com>,
	LKML <linux-kernel@...r.kernel.org>,
	Eric Dumazet <edumazet@...gle.com>,
	Tavis Ormandy <taviso@...gle.com>,
	Bjorn Helgaas <bhelgaas@...gle.com>,
	syzkaller <syzkaller@...glegroups.com>,
	Kostya Serebryany <kcc@...gle.com>,
	Alexander Potapenko <glider@...gle.com>,
	Andrey Ryabinin <ryabinin.a.a@...il.com>
Subject: Re: [PATCH v3] kernel: add kcov code coverage

On Mon, Jan 18, 2016 at 1:42 PM, Kirill A. Shutemov
<kirill@...temov.name> wrote:
> On Thu, Jan 14, 2016 at 03:22:21PM +0100, Dmitry Vyukov wrote:
>> +/*
>> + * Entry point from instrumented code.
>> + * This is called once per basic-block/edge.
>> + */
>> +void __sanitizer_cov_trace_pc(void)
>> +{
>> +     struct task_struct *t;
>> +     enum kcov_mode mode;
>> +
>> +     t = current;
>> +     /*
>> +      * We are interested in code coverage as a function of a syscall inputs,
>> +      * so we ignore code executed in interrupts.
>> +      */
>> +     if (!t || in_interrupt())
>> +             return;
>> +     mode = READ_ONCE(t->kcov_mode);
>> +     if (mode == kcov_mode_trace) {
>> +             u32 *area;
>> +             u32 pos;
>> +
>> +             /*
>> +              * There is some code that runs in interrupts but for which
>> +              * in_interrupt() returns false (e.g. preempt_schedule_irq()).
>> +              * READ_ONCE()/barrier() effectively provides load-acquire wrt
>> +              * interrupts, there are paired barrier()/WRITE_ONCE() in
>> +              * kcov_ioctl_locked().
>> +              */
>> +             barrier();
>> +             area = t->kcov_area;
>> +             /* The first u32 is number of subsequent PCs. */
>> +             pos = READ_ONCE(area[0]) + 1;
>> +             if (likely(pos < t->kcov_size)) {
>> +                     area[pos] = (u32)_RET_IP_;
>
> If area[0] is UINT32_MAX the condition will be true and you'll make
> area[0] temporary set to (u32)_RET_IP_. That's may be fun. :)

Well, if user wants to corrupt this region, he is free to do so. But
it wont't corrupt kernel, right?


>> +                     WRITE_ONCE(area[0], pos);
>> +             }
>> +     }
>> +}
>> +EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
>
> ...
>
>> +static int kcov_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>> +{
>> +     struct kcov *kcov;
>> +     unsigned long off;
>> +     struct page *page;
>> +
>> +     /* Map the preallocated kcov->area. */
>> +     kcov = vma->vm_file->private_data;
>> +     off = vmf->pgoff << PAGE_SHIFT;
>> +     if (off >= kcov->size * sizeof(u32))
>> +             return VM_FAULT_SIGSEGV;
>
> SIGBUS.
>
>> +
>> +     page = vmalloc_to_page(kcov->area + off);
>> +     get_page(page);
>> +     vmf->page = page;
>> +     return 0;
>> +}
>
> BTW, since all pages pre-allocated, you don't really need fault handler --
> just setup all pages table enties during mmap. This would shorten warm up
> period for userspace. See vm_insert_page().

Done in v4
I don't need to get_page() before vm_insert_page(), right?


>> +
>> +static void kcov_unmap(struct vm_area_struct *vma)
>> +{
>> +     kcov_put(vma->vm_file->private_data);
>> +}
>> +
>> +static void kcov_map_copied(struct vm_area_struct *vma)
>> +{
>> +     struct kcov *kcov;
>> +
>> +     kcov = vma->vm_file->private_data;
>> +     kcov_get(kcov);
>> +}
>> +
>> +static const struct vm_operations_struct kcov_vm_ops = {
>> +     .fault = kcov_vm_fault,
>> +     .close = kcov_unmap,
>> +     /* Called on fork()/clone() when the mapping is copied. */
>> +     .open  = kcov_map_copied,
>> +};
>> +
>> +static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
>> +{
>> +     int res = 0;
>> +     void *area;
>> +     struct kcov *kcov = vma->vm_file->private_data;
>> +
>> +     area = vmalloc_user(vma->vm_end - vma->vm_start);
>> +     if (!area)
>> +             return -ENOMEM;
>> +
>> +     spin_lock(&kcov->lock);
>> +     if (kcov->mode == 0 || vma->vm_pgoff != 0 ||
>> +         vma->vm_end - vma->vm_start != kcov->size * sizeof(u32)) {
>
> You need VM_DONTEXPAND to keep the invariant. Not sure about VM_DONTDUMP.

Done in v4


>
>> +             res = -EINVAL;
>> +             goto exit;
>> +     }
>> +     if (!kcov->area) {
>> +             kcov->area = area;
>> +             area = NULL;
>> +     }
>> +     /*
>> +      * The file drops a reference on close, but the file
>> +      * descriptor can be closed with the mmaping still alive so we keep
>> +      * a reference for those.  This is put in kcov_unmap().
>> +      */
>> +     kcov_get(kcov);
>> +     vma->vm_ops = &kcov_vm_ops;
>> +exit:
>> +     spin_unlock(&kcov->lock);
>> +     vfree(area);
>> +     return res;
>> +}
> --
>  Kirill A. Shutemov

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ