From: Leon Romanovsky <leon@kernel.org>
To: Christoph Hellwig <hch@lst.de>,
Robin Murphy <robin.murphy@arm.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
Jason Gunthorpe <jgg@ziepe.ca>,
Chaitanya Kulkarni <chaitanyak@nvidia.com>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
"Jonathan Corbet" <corbet@lwn.net>,
"Jens Axboe" <axboe@kernel.dk>, "Keith Busch" <kbusch@kernel.org>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-block@vger.kernel.org, linux-rdma@vger.kernel.org,
iommu@lists.linux.dev, linux-nvme@lists.infradead.org,
kvm@vger.kernel.org, linux-mm@kvack.org,
"Bart Van Assche" <bvanassche@acm.org>,
"Damien Le Moal" <damien.lemoal@opensource.wdc.com>,
"Amir Goldstein" <amir73il@gmail.com>,
"josef@toxicpanda.com" <josef@toxicpanda.com>,
"Martin K. Petersen" <martin.petersen@oracle.com>,
"daniel@iogearbox.net" <daniel@iogearbox.net>,
"Dan Williams" <dan.j.williams@intel.com>,
"jack@suse.com" <jack@suse.com>,
"Zhu Yanjun" <zyjzyj2000@gmail.com>
Subject: [RFC RESEND 05/16] iommu/dma: Prepare map/unmap page functions to receive IOVA
Date: Tue, 5 Mar 2024 13:18:36 +0200 [thread overview]
Message-ID: <13187a8682ab4f8708ca88cc4363f90e64e14ccc.1709635535.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1709635535.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Extend the existing map_page/unmap_page function implementations to get
preallocated IOVA. In such case, the IOVA allocation needs to be
skipped, but rest of the code stays the same.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/iommu/dma-iommu.c | 68 ++++++++++++++++++++++++++-------------
1 file changed, 45 insertions(+), 23 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index e55726783501..dbdd373a609a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -824,7 +824,7 @@ static void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
}
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
- size_t size)
+ size_t size, bool free_iova)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -843,17 +843,19 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
- __iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
+ if (free_iova)
+ __iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, u64 dma_mask)
+ dma_addr_t iova, size_t size, int prot,
+ u64 dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
- dma_addr_t iova;
+ bool no_iova = !iova;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -861,12 +863,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size = iova_align(iovad, size + iova_off);
- iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+ if (no_iova)
+ iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
- __iommu_dma_free_iova(cookie, iova, size, NULL);
+ if (no_iova)
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -1031,7 +1035,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
return vaddr;
out_unmap:
- __iommu_dma_unmap(dev, *dma_handle, size);
+ __iommu_dma_unmap(dev, *dma_handle, size, true);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
return NULL;
}
@@ -1060,7 +1064,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
{
struct dma_sgt_handle *sh = sgt_handle(sgt);
- __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
+ __iommu_dma_unmap(dev, sgt->sgl->dma_address, size, true);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
kfree(sh);
@@ -1131,9 +1135,11 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
-static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static dma_addr_t __iommu_dma_map_pages(struct device *dev, struct page *page,
+ unsigned long offset, dma_addr_t iova,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
@@ -1141,7 +1147,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- dma_addr_t iova, dma_mask = dma_get_mask(dev);
+ dma_addr_t addr, dma_mask = dma_get_mask(dev);
/*
* If both the physical buffer start address and size are
@@ -1182,14 +1188,23 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
- iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+ addr = __iommu_dma_map(dev, phys, iova, size, prot, dma_mask);
+ if (addr == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
- return iova;
+ return addr;
}
-static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
+static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return __iommu_dma_map_pages(dev, page, offset, 0, size, dir, attrs);
+}
+
+static void __iommu_dma_unmap_pages(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs, bool free_iova)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
phys_addr_t phys;
@@ -1201,12 +1216,19 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
- __iommu_dma_unmap(dev, dma_handle, size);
+ __iommu_dma_unmap(dev, dma_handle, size, free_iova);
if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
+static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __iommu_dma_unmap_pages(dev, dma_handle, size, dir, attrs, true);
+}
+
/*
* Prepare a successfully-mapped scatterlist to give back to the caller.
*
@@ -1509,13 +1531,13 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
}
if (end)
- __iommu_dma_unmap(dev, start, end - start);
+ __iommu_dma_unmap(dev, start, end - start, true);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- return __iommu_dma_map(dev, phys, size,
+ return __iommu_dma_map(dev, phys, 0, size,
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
dma_get_mask(dev));
}
@@ -1523,7 +1545,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_unmap(dev, handle, size, true);
}
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
@@ -1560,7 +1582,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
- __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_unmap(dev, handle, size, true);
__iommu_dma_free(dev, size, cpu_addr);
}
@@ -1626,7 +1648,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+ *handle = __iommu_dma_map(dev, page_to_phys(page), 0, size, ioprot,
dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
--
2.44.0
next prev parent reply other threads:[~2024-03-05 11:19 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-05 11:18 [RFC RESEND 00/16] Split IOMMU DMA mapping operation to two steps Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 01/16] mm/hmm: let users to tag specific PFNs Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 02/16] dma-mapping: provide an interface to allocate IOVA Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 03/16] dma-mapping: provide callbacks to link/unlink pages to specific IOVA Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 04/16] iommu/dma: Provide an interface to allow preallocate IOVA Leon Romanovsky
2024-03-05 11:18 ` Leon Romanovsky [this message]
2024-03-05 11:18 ` [RFC RESEND 06/16] iommu/dma: Implement link/unlink page callbacks Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 07/16] RDMA/umem: Preallocate and cache IOVA for UMEM ODP Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 08/16] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 09/16] RDMA/core: Separate DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 10/16] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 11/16] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 12/16] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 13/16] vfio/mlx5: Explicitly store page list Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 14/16] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 15/16] block: add dma_link_range() based API Leon Romanovsky
2024-03-05 11:18 ` [RFC RESEND 16/16] nvme-pci: use blk_rq_dma_map() for NVMe SGL Leon Romanovsky
2024-03-05 15:51 ` Keith Busch
2024-03-05 16:08 ` Jens Axboe
2024-03-05 16:39 ` Chaitanya Kulkarni
2024-03-05 16:46 ` Chaitanya Kulkarni
2024-03-06 14:33 ` Christoph Hellwig
2024-03-06 15:05 ` Jason Gunthorpe
2024-03-06 16:14 ` Christoph Hellwig
2024-05-03 14:41 ` Zhu Yanjun
2024-05-05 13:23 ` Leon Romanovsky
2024-05-06 7:25 ` Zhu Yanjun
2024-03-05 12:05 ` [RFC RESEND 00/16] Split IOMMU DMA mapping operation to two steps Robin Murphy
2024-03-05 12:29 ` Leon Romanovsky
2024-03-06 14:44 ` Christoph Hellwig
2024-03-06 15:43 ` Jason Gunthorpe
2024-03-06 16:20 ` Christoph Hellwig
2024-03-06 17:44 ` Jason Gunthorpe
2024-03-06 22:14 ` Christoph Hellwig
2024-03-07 0:00 ` Jason Gunthorpe
2024-03-07 15:05 ` Christoph Hellwig
2024-03-07 21:01 ` Jason Gunthorpe
2024-03-08 16:49 ` Christoph Hellwig
2024-03-08 20:23 ` Jason Gunthorpe
2024-03-09 16:14 ` Christoph Hellwig
2024-03-10 9:35 ` Leon Romanovsky
2024-03-12 21:28 ` Christoph Hellwig
2024-03-13 7:46 ` Leon Romanovsky
2024-03-13 21:44 ` Christoph Hellwig
2024-03-19 15:36 ` Jason Gunthorpe
2024-03-20 8:55 ` Leon Romanovsky
2024-03-21 22:40 ` Christoph Hellwig
2024-03-22 17:46 ` Leon Romanovsky
2024-03-24 23:16 ` Christoph Hellwig
2024-03-21 22:39 ` Christoph Hellwig
2024-03-22 18:43 ` Jason Gunthorpe
2024-03-24 23:22 ` Christoph Hellwig
2024-03-27 17:14 ` Jason Gunthorpe
2024-03-07 6:01 ` Zhu Yanjun
2024-04-09 20:39 ` Zhu Yanjun
2024-05-02 23:32 ` Zeng, Oak
2024-05-03 11:57 ` Zhu Yanjun
2024-05-03 16:42 ` Jason Gunthorpe
2024-05-03 20:59 ` Zeng, Oak
2024-06-10 15:12 ` Zeng, Oak
2024-06-10 15:19 ` Zhu Yanjun
2024-06-10 16:18 ` Leon Romanovsky
2024-06-10 16:40 ` Zeng, Oak
2024-06-10 17:25 ` Jason Gunthorpe
2024-06-10 21:28 ` Zeng, Oak
2024-06-11 7:49 ` Zhu Yanjun
2024-06-11 15:45 ` Leon Romanovsky
2024-06-11 18:26 ` Zeng, Oak
2024-06-11 19:11 ` Leon Romanovsky
2024-06-11 15:39 ` Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=13187a8682ab4f8708ca88cc4363f90e64e14ccc.1709635535.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=amir73il@gmail.com \
--cc=axboe@kernel.dk \
--cc=bvanassche@acm.org \
--cc=chaitanyak@nvidia.com \
--cc=corbet@lwn.net \
--cc=damien.lemoal@opensource.wdc.com \
--cc=dan.j.williams@intel.com \
--cc=daniel@iogearbox.net \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jack@suse.com \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=josef@toxicpanda.com \
--cc=kbusch@kernel.org \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-rdma@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=martin.petersen@oracle.com \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
--cc=yishaih@nvidia.com \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).