[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191018092750.GK21344@kadam>
Date: Fri, 18 Oct 2019 12:27:50 +0300
From: Dan Carpenter <dan.carpenter@...cle.com>
To: Joerg Roedel <joro@...tes.org>
Cc: Qian Cai <cai@....pw>, iommu@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org, Joerg Roedel <jroedel@...e.de>
Subject: Re: [PATCH] iommu/amd: Pass gfp flags to iommu_map_page() in
amd_iommu_map()
Did you get a chance to look at iommu_dma_alloc_remap() as well?
drivers/iommu/dma-iommu.c
584 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
585 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
^^^^^^^^^
586 {
587 struct iommu_domain *domain = iommu_get_dma_domain(dev);
588 struct iommu_dma_cookie *cookie = domain->iova_cookie;
589 struct iova_domain *iovad = &cookie->iovad;
590 bool coherent = dev_is_dma_coherent(dev);
591 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
592 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
593 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
594 struct page **pages;
595 struct sg_table sgt;
596 dma_addr_t iova;
597 void *vaddr;
598
599 *dma_handle = DMA_MAPPING_ERROR;
600
601 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
602 return NULL;
603
604 min_size = alloc_sizes & -alloc_sizes;
605 if (min_size < PAGE_SIZE) {
606 min_size = PAGE_SIZE;
607 alloc_sizes |= PAGE_SIZE;
608 } else {
609 size = ALIGN(size, min_size);
610 }
611 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
612 alloc_sizes = min_size;
613
614 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
615 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
616 gfp);
617 if (!pages)
618 return NULL;
619
620 size = iova_align(iovad, size);
621 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
622 if (!iova)
623 goto out_free_pages;
624
625 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
^^^^^^^^^^
gfp here instead of GFP_KERNEL?
626 goto out_free_iova;
627
628 if (!(ioprot & IOMMU_CACHE)) {
regards,
dan carpenter
Powered by blists - more mailing lists