lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEbi=3d46tXacnnU+RJGMw+KR1O=NaVjO+NW3cxUjAN==V51iw@mail.gmail.com>
Date:   Tue, 23 Jan 2018 19:52:55 +0800
From:   Greentime Hu <green.hu@...il.com>
To:     Arnd Bergmann <arnd@...db.de>
Cc:     Greentime <greentime@...estech.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        linux-arch <linux-arch@...r.kernel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Jason Cooper <jason@...edaemon.net>,
        Marc Zyngier <marc.zyngier@....com>,
        Rob Herring <robh+dt@...nel.org>,
        Networking <netdev@...r.kernel.org>,
        Vincent Chen <deanbo422@...il.com>,
        DTML <devicetree@...r.kernel.org>,
        Al Viro <viro@...iv.linux.org.uk>,
        David Howells <dhowells@...hat.com>,
        Will Deacon <will.deacon@....com>,
        Daniel Lezcano <daniel.lezcano@...aro.org>,
        linux-serial@...r.kernel.org,
        Geert Uytterhoeven <geert.uytterhoeven@...il.com>,
        Linus Walleij <linus.walleij@...aro.org>,
        Mark Rutland <mark.rutland@....com>, Greg KH <greg@...ah.com>,
        Guo Ren <ren_guo@...ky.com>,
        Randy Dunlap <rdunlap@...radead.org>,
        David Miller <davem@...emloft.net>,
        Jonas Bonn <jonas@...thpole.se>,
        Stefan Kristiansson <stefan.kristiansson@...nalahti.fi>,
        Stafford Horne <shorne@...il.com>,
        Vincent Chen <vincentc@...estech.com>
Subject: Re: [PATCH v6 16/36] nds32: DMA mapping API

Hi, Arnd:

2018-01-23 16:23 GMT+08:00 Greentime Hu <green.hu@...il.com>:
> Hi, Arnd:
>
> 2018-01-18 18:26 GMT+08:00 Arnd Bergmann <arnd@...db.de>:
>> On Mon, Jan 15, 2018 at 6:53 AM, Greentime Hu <green.hu@...il.com> wrote:
>>> From: Greentime Hu <greentime@...estech.com>
>>>
>>> This patch adds support for the DMA mapping API. It uses dma_map_ops for
>>> flexibility.
>>>
>>> Signed-off-by: Vincent Chen <vincentc@...estech.com>
>>> Signed-off-by: Greentime Hu <greentime@...estech.com>
>>
>> I'm still unhappy about the way the cache flushes are done here as discussed
>> before. It's not a show-stopped, but no Ack from me.
>
> How about this implementation?
>
> static void
> nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
>                               size_t size, enum dma_data_direction dir)
> {
>         switch (direction) {
>         case DMA_TO_DEVICE:     /* writeback only */
>                 break;
>         case DMA_FROM_DEVICE:   /* invalidate only */
>         case DMA_BIDIRECTIONAL: /* writeback and invalidate */
>                 cpu_dma_inval_range(start, end);
>                 break;
>         default:
>                 BUG();
>         }
> }
>
> static void
> nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
>                                  size_t size, enum dma_data_direction dir)
> {
>         switch (direction) {
>         case DMA_FROM_DEVICE:   /* invalidate only */
>                 break;
>         case DMA_TO_DEVICE:     /* writeback only */
>         case DMA_BIDIRECTIONAL: /* writeback and invalidate */
>                 cpu_dma_wb_range(start, end);
>                 break;
>         default:
>                 BUG();
>         }
> }

I am not sure if I understand it correctly.
I list all the combinations.

RAM to DEVICE
    before DMA => writeback cache
    after DMA => nop

DEVICE to RAM
    before DMA => nop
    after DMA => invalidate cache

static void consistent_sync(void *vaddr, size_t size, int direction, int master)
{
        unsigned long start = (unsigned long)vaddr;
        unsigned long end = start + size;

        if (master == FOR_CPU) {
                switch (direction) {
                case DMA_TO_DEVICE:
                        break;
                case DMA_FROM_DEVICE:
                case DMA_BIDIRECTIONAL:
                        cpu_dma_inval_range(start, end);
                        break;
                default:
                        BUG();
                }
        } else {
                /* FOR_DEVICE */
                switch (direction) {
                case DMA_FROM_DEVICE:
                        break;
                case DMA_TO_DEVICE:
                case DMA_BIDIRECTIONAL:
                        cpu_dma_wb_range(start, end);
                        break;
                default:
                        BUG();
                }
        }
}

static void
nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
                              size_t size, enum dma_data_direction dir)
{
        consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
}

static void
nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
                                 size_t size, enum dma_data_direction dir)
{
        consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
}

static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
                                     unsigned long attrs)
{
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                consistent_sync((void *)(page_address(page) + offset),
size, dir, FOR_DEVICE);
        return page_to_phys(page) + offset;
}

static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
                                 size_t size, enum dma_data_direction dir,
                                 unsigned long attrs)
{
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ