lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Mon, 18 Oct 2021 06:40:51 +0200 From: Christoph Hellwig <hch@....de> To: unlisted-recipients:; (no To-header on input) Cc: Dan Williams <dan.j.williams@...el.com>, Mike Snitzer <snitzer@...hat.com>, Ira Weiny <ira.weiny@...el.com>, dm-devel@...hat.com, linux-xfs@...r.kernel.org, nvdimm@...ts.linux.dev, linux-s390@...r.kernel.org, linux-fsdevel@...r.kernel.org, linux-erofs@...ts.ozlabs.org, linux-ext4@...r.kernel.org, virtualization@...ts.linux-foundation.org Subject: [PATCH 08/11] dm-linear: add a linear_dax_pgoff helper Add a helper to perform the entire remapping for DAX accesses. This helper open codes bdev_dax_pgoff given that the alignment checks have already been done by the submitting file system and don't need to be repeated. Signed-off-by: Christoph Hellwig <hch@....de> --- drivers/md/dm-linear.c | 49 +++++++++++++----------------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 32fbab11bf90c..bf03f73fd0f36 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -164,63 +164,44 @@ static int linear_iterate_devices(struct dm_target *ti, } #if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) +{ + struct linear_c *lc = ti->private; + sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT); + + *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT; + return lc->dev->dax_dev; +} + static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { - long ret; - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); } static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); - if (ret) - return ret; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + return dax_zero_page_range(dax_dev, pgoff, nr_pages); } -- 2.30.2
Powered by blists - more mailing lists