[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241107145034.GO5006@unreal>
Date: Thu, 7 Nov 2024 16:50:34 +0200
From: Leon Romanovsky <leon@...nel.org>
To: Christoph Hellwig <hch@....de>
Cc: Jason Gunthorpe <jgg@...pe.ca>, Robin Murphy <robin.murphy@....com>,
Jens Axboe <axboe@...nel.dk>, Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>, Sagi Grimberg <sagi@...mberg.me>,
Keith Busch <kbusch@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Logan Gunthorpe <logang@...tatee.com>,
Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Jonathan Corbet <corbet@....net>, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
linux-rdma@...r.kernel.org, iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org, linux-pci@...r.kernel.org,
kvm@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v1 07/17] dma-mapping: Implement link/unlink ranges API
On Mon, Nov 04, 2024 at 01:53:02PM +0100, Christoph Hellwig wrote:
> On Mon, Nov 04, 2024 at 08:19:24AM -0400, Jason Gunthorpe wrote:
> > > That's a good point. Only mapped through host bridge P2P can even
> > > end up here, so the address is a perfectly valid physical address
> > > in the host. But I'm not sure if all arch_sync_dma_for_device
> > > implementations handle IOMMU memory fine.
> >
> > I was told on x86 if you do a cache flush operation on MMIO there is a
> > chance it will MCE. Recently had some similar discussions about ARM
> > where it was asserted some platforms may have similar.
>
> On x86 we never flush caches for DMA operations anyway, so x86 isn't
> really the concern here, but architectures that do cache incoherent DMA
> to PCIe devices. Which isn't a whole lot as most SOCs try to avoid that
> for PCIe even if they lack DMA coherent for lesser peripherals, but I bet
> there are some on arm/arm64 and maybe riscv or mips.
>
> > It would be safest to only call arch flushing calls on memory that is
> > mapped cachable. We can assume that a P2P target is never CPU
> > mapped cachable, regardless of how the DMA is routed.
>
> Yes. I.e. force DMA_ATTR_SKIP_CPU_SYNC for P2P.
What do you think?
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 38bcb3ecceeb..065bdace3344 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -559,14 +559,19 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
{
enum dma_data_direction dir = rq_dma_dir(req);
unsigned int mapped = 0;
+ unsigned long attrs = 0;
int error = 0;
iter->addr = state->addr;
iter->len = dma_iova_size(state);
+ if (req->cmd_flags & REQ_P2PDMA) {
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ req->cmd_flags &= ~REQ_P2PDMA;
+ }
do {
error = dma_iova_link(dma_dev, state, vec->paddr, mapped,
- vec->len, dir, 0);
+ vec->len, dir, attrs);
if (error)
goto error_unmap;
mapped += vec->len;
@@ -578,7 +583,7 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
return true;
error_unmap:
- dma_iova_destroy(dma_dev, state, mapped, rq_dma_dir(req), 0);
+ dma_iova_destroy(dma_dev, state, mapped, rq_dma_dir(req), attrs);
iter->status = errno_to_blk_status(error);
return false;
}
@@ -633,7 +638,6 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
* P2P transfers through the host bridge are treated the
* same as non-P2P transfers below and during unmap.
*/
- req->cmd_flags &= ~REQ_P2PDMA;
break;
default:
iter->status = BLK_STS_INVAL;
@@ -644,6 +648,8 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
if (blk_can_dma_map_iova(req, dma_dev) &&
dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
+
+ req->cmd_flags &= ~REQ_P2PDMA;
return blk_dma_map_direct(req, dma_dev, iter, &vec);
}
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 62980ca8f3c5..5fe30fbc42b0 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -23,6 +23,7 @@ struct mmu_interval_notifier;
* HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
* HMM_PFN_ERROR - accessing the pfn is impossible and the device should
* fail. ie poisoned memory, special pages, no vma, etc
+ * HMM_PFN_P2PDMA - P@P page, not bus mapped
* HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer
* HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
* to mark that page is already DMA mapped
@@ -41,6 +42,7 @@ enum hmm_pfn_flags {
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
/* Sticky flag, carried from Input to Output */
+ HMM_PFN_P2PDMA = 1UL << (BITS_PER_LONG - 5),
HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6),
HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 7),
diff --git a/mm/hmm.c b/mm/hmm.c
index 4ef2b3815212..b2ec199c2ea8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -710,6 +710,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
struct page *page = hmm_pfn_to_page(pfns[idx]);
phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]);
size_t offset = idx * map->dma_entry_size;
+ unsigned long attrs = 0;
dma_addr_t dma_addr;
int ret;
@@ -740,6 +741,9 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
switch (pci_p2pdma_state(p2pdma_state, dev, page)) {
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ pfns[idx] |= HMM_PFN_P2PDMA;
+ fallthrough;
case PCI_P2PDMA_MAP_NONE:
break;
case PCI_P2PDMA_MAP_BUS_ADDR:
@@ -752,7 +756,8 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
if (dma_use_iova(state)) {
ret = dma_iova_link(dev, state, paddr, offset,
- map->dma_entry_size, DMA_BIDIRECTIONAL, 0);
+ map->dma_entry_size, DMA_BIDIRECTIONAL,
+ attrs);
if (ret)
return DMA_MAPPING_ERROR;
@@ -793,6 +798,7 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
struct dma_iova_state *state = &map->state;
dma_addr_t *dma_addrs = map->dma_list;
unsigned long *pfns = map->pfn_list;
+ unsigned long attrs = 0;
#define HMM_PFN_VALID_DMA (HMM_PFN_VALID | HMM_PFN_DMA_MAPPED)
if ((pfns[idx] & HMM_PFN_VALID_DMA) != HMM_PFN_VALID_DMA)
@@ -801,14 +807,16 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
; /* no need to unmap bus address P2P mappings */
- else if (dma_use_iova(state))
+ else if (dma_use_iova(state)) {
+ if (pfns[idx] & HMM_PFN_P2PDMA)
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
- map->dma_entry_size, DMA_BIDIRECTIONAL, 0);
- else if (dma_need_unmap(dev))
+ map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
+ } else if (dma_need_unmap(dev))
dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size,
DMA_BIDIRECTIONAL);
- pfns[idx] &= ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA_BUS);
+ pfns[idx] &= ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);
return true;
}
EXPORT_SYMBOL_GPL(hmm_dma_unmap_pfn);
Powered by blists - more mailing lists