lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <45726756.eMkXEF23lS@nvdebian>
Date:   Tue, 1 Feb 2022 10:37:47 +1100
From:   Alistair Popple <apopple@...dia.com>
To:     <akpm@...ux-foundation.org>, <Felix.Kuehling@....com>,
        <linux-mm@...ck.org>, <rcampbell@...dia.com>,
        <linux-ext4@...r.kernel.org>, <linux-xfs@...r.kernel.org>,
        "Sierra Guiza, Alejandro (Alex)" <alex.sierra@....com>
CC:     <willy@...radead.org>, <dri-devel@...ts.freedesktop.org>,
        <jglisse@...hat.com>, <amd-gfx@...ts.freedesktop.org>,
        <jgg@...dia.com>, <hch@....de>
Subject: Re: [PATCH v5 09/10] tools: update hmm-test to support device coherent type

Oh sorry, I had looked at this but forgotten to add my reviewed by:

Reviewed-by: Alistair Popple <apopple@...dia.com>

On Tuesday, 1 February 2022 10:27:25 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
> Hi Alistair,
> This is the last patch to be reviewed from this series. It already has 
> the changes from
> your last feedback (V3). Would you mind to take a look?
> Thanks a lot for reviewing the rest!
> 
> Regards,
> Alex Sierra
> 
> On 1/28/2022 2:08 PM, Alex Sierra wrote:
> > Test cases such as migrate_fault and migrate_multiple, were modified to
> > explicit migrate from device to sys memory without the need of page
> > faults, when using device coherent type.
> >
> > Snapshot test case updated to read memory device type first and based
> > on that, get the proper returned results migrate_ping_pong test case
> > added to test explicit migration from device to sys memory for both
> > private and coherent zone types.
> >
> > Helpers to migrate from device to sys memory and vicerversa
> > were also added.
> >
> > Signed-off-by: Alex Sierra <alex.sierra@....com>
> > Acked-by: Felix Kuehling <Felix.Kuehling@....com>
> > ---
> > v2:
> > Set FIXTURE_VARIANT to add multiple device types to the FIXTURE. This
> > will run all the tests for each device type (private and coherent) in
> > case both existed during hmm-test driver probed.
> > v4:
> > Check for the number of pages successfully migrated from coherent
> > device to system at migrate_multiple test.
> > ---
> >   tools/testing/selftests/vm/hmm-tests.c | 123 ++++++++++++++++++++-----
> >   1 file changed, 102 insertions(+), 21 deletions(-)
> >
> > diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
> > index 203323967b50..84ec8c4a1dc7 100644
> > --- a/tools/testing/selftests/vm/hmm-tests.c
> > +++ b/tools/testing/selftests/vm/hmm-tests.c
> > @@ -44,6 +44,14 @@ struct hmm_buffer {
> >   	int		fd;
> >   	uint64_t	cpages;
> >   	uint64_t	faults;
> > +	int		zone_device_type;
> > +};
> > +
> > +enum {
> > +	HMM_PRIVATE_DEVICE_ONE,
> > +	HMM_PRIVATE_DEVICE_TWO,
> > +	HMM_COHERENCE_DEVICE_ONE,
> > +	HMM_COHERENCE_DEVICE_TWO,
> >   };
> >   
> >   #define TWOMEG		(1 << 21)
> > @@ -60,6 +68,21 @@ FIXTURE(hmm)
> >   	unsigned int	page_shift;
> >   };
> >   
> > +FIXTURE_VARIANT(hmm)
> > +{
> > +	int     device_number;
> > +};
> > +
> > +FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
> > +{
> > +	.device_number = HMM_PRIVATE_DEVICE_ONE,
> > +};
> > +
> > +FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
> > +{
> > +	.device_number = HMM_COHERENCE_DEVICE_ONE,
> > +};
> > +
> >   FIXTURE(hmm2)
> >   {
> >   	int		fd0;
> > @@ -68,6 +91,24 @@ FIXTURE(hmm2)
> >   	unsigned int	page_shift;
> >   };
> >   
> > +FIXTURE_VARIANT(hmm2)
> > +{
> > +	int     device_number0;
> > +	int     device_number1;
> > +};
> > +
> > +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
> > +{
> > +	.device_number0 = HMM_PRIVATE_DEVICE_ONE,
> > +	.device_number1 = HMM_PRIVATE_DEVICE_TWO,
> > +};
> > +
> > +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
> > +{
> > +	.device_number0 = HMM_COHERENCE_DEVICE_ONE,
> > +	.device_number1 = HMM_COHERENCE_DEVICE_TWO,
> > +};
> > +
> >   static int hmm_open(int unit)
> >   {
> >   	char pathname[HMM_PATH_MAX];
> > @@ -81,12 +122,19 @@ static int hmm_open(int unit)
> >   	return fd;
> >   }
> >   
> > +static bool hmm_is_coherent_type(int dev_num)
> > +{
> > +	return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
> > +}
> > +
> >   FIXTURE_SETUP(hmm)
> >   {
> >   	self->page_size = sysconf(_SC_PAGE_SIZE);
> >   	self->page_shift = ffs(self->page_size) - 1;
> >   
> > -	self->fd = hmm_open(0);
> > +	self->fd = hmm_open(variant->device_number);
> > +	if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
> > +		SKIP(exit(0), "DEVICE_COHERENT not available");
> >   	ASSERT_GE(self->fd, 0);
> >   }
> >   
> > @@ -95,9 +143,11 @@ FIXTURE_SETUP(hmm2)
> >   	self->page_size = sysconf(_SC_PAGE_SIZE);
> >   	self->page_shift = ffs(self->page_size) - 1;
> >   
> > -	self->fd0 = hmm_open(0);
> > +	self->fd0 = hmm_open(variant->device_number0);
> > +	if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
> > +		SKIP(exit(0), "DEVICE_COHERENT not available");
> >   	ASSERT_GE(self->fd0, 0);
> > -	self->fd1 = hmm_open(1);
> > +	self->fd1 = hmm_open(variant->device_number1);
> >   	ASSERT_GE(self->fd1, 0);
> >   }
> >   
> > @@ -144,6 +194,7 @@ static int hmm_dmirror_cmd(int fd,
> >   	}
> >   	buffer->cpages = cmd.cpages;
> >   	buffer->faults = cmd.faults;
> > +	buffer->zone_device_type = cmd.zone_device_type;
> >   
> >   	return 0;
> >   }
> > @@ -211,6 +262,20 @@ static void hmm_nanosleep(unsigned int n)
> >   	nanosleep(&t, NULL);
> >   }
> >   
> > +static int hmm_migrate_sys_to_dev(int fd,
> > +				   struct hmm_buffer *buffer,
> > +				   unsigned long npages)
> > +{
> > +	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
> > +}
> > +
> > +static int hmm_migrate_dev_to_sys(int fd,
> > +				   struct hmm_buffer *buffer,
> > +				   unsigned long npages)
> > +{
> > +	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
> > +}
> > +
> >   /*
> >    * Simple NULL test of device open/close.
> >    */
> > @@ -875,7 +940,7 @@ TEST_F(hmm, migrate)
> >   		ptr[i] = i;
> >   
> >   	/* Migrate memory to device. */
> > -	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
> > +	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, npages);
> >   
> > @@ -923,7 +988,7 @@ TEST_F(hmm, migrate_fault)
> >   		ptr[i] = i;
> >   
> >   	/* Migrate memory to device. */
> > -	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
> > +	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, npages);
> >   
> > @@ -936,7 +1001,7 @@ TEST_F(hmm, migrate_fault)
> >   		ASSERT_EQ(ptr[i], i);
> >   
> >   	/* Migrate memory to the device again. */
> > -	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
> > +	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, npages);
> >   
> > @@ -976,7 +1041,7 @@ TEST_F(hmm, migrate_shared)
> >   	ASSERT_NE(buffer->ptr, MAP_FAILED);
> >   
> >   	/* Migrate memory to device. */
> > -	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
> > +	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
> >   	ASSERT_EQ(ret, -ENOENT);
> >   
> >   	hmm_buffer_free(buffer);
> > @@ -1015,7 +1080,7 @@ TEST_F(hmm2, migrate_mixed)
> >   	p = buffer->ptr;
> >   
> >   	/* Migrating a protected area should be an error. */
> > -	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
> > +	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
> >   	ASSERT_EQ(ret, -EINVAL);
> >   
> >   	/* Punch a hole after the first page address. */
> > @@ -1023,7 +1088,7 @@ TEST_F(hmm2, migrate_mixed)
> >   	ASSERT_EQ(ret, 0);
> >   
> >   	/* We expect an error if the vma doesn't cover the range. */
> > -	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
> > +	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
> >   	ASSERT_EQ(ret, -EINVAL);
> >   
> >   	/* Page 2 will be a read-only zero page. */
> > @@ -1055,13 +1120,13 @@ TEST_F(hmm2, migrate_mixed)
> >   
> >   	/* Now try to migrate pages 2-5 to device 1. */
> >   	buffer->ptr = p + 2 * self->page_size;
> > -	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
> > +	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, 4);
> >   
> >   	/* Page 5 won't be migrated to device 0 because it's on device 1. */
> >   	buffer->ptr = p + 5 * self->page_size;
> > -	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
> > +	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
> >   	ASSERT_EQ(ret, -ENOENT);
> >   	buffer->ptr = p;
> >   
> > @@ -1070,8 +1135,12 @@ TEST_F(hmm2, migrate_mixed)
> >   }
> >   
> >   /*
> > - * Migrate anonymous memory to device private memory and fault it back to system
> > - * memory multiple times.
> > + * Migrate anonymous memory to device memory and back to system memory
> > + * multiple times. In case of private zone configuration, this is done
> > + * through fault pages accessed by CPU. In case of coherent zone configuration,
> > + * the pages from the device should be explicitly migrated back to system memory.
> > + * The reason is Coherent device zone has coherent access by CPU, therefore
> > + * it will not generate any page fault.
> >    */
> >   TEST_F(hmm, migrate_multiple)
> >   {
> > @@ -1107,8 +1176,7 @@ TEST_F(hmm, migrate_multiple)
> >   			ptr[i] = i;
> >   
> >   		/* Migrate memory to device. */
> > -		ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
> > -				      npages);
> > +		ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
> >   		ASSERT_EQ(ret, 0);
> >   		ASSERT_EQ(buffer->cpages, npages);
> >   
> > @@ -1116,7 +1184,13 @@ TEST_F(hmm, migrate_multiple)
> >   		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
> >   			ASSERT_EQ(ptr[i], i);
> >   
> > -		/* Fault pages back to system memory and check them. */
> > +		/* Migrate back to system memory and check them. */
> > +		if (hmm_is_coherent_type(variant->device_number)) {
> > +			ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
> > +			ASSERT_EQ(ret, 0);
> > +			ASSERT_EQ(buffer->cpages, npages);
> > +		}
> > +
> >   		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
> >   			ASSERT_EQ(ptr[i], i);
> >   
> > @@ -1354,13 +1428,13 @@ TEST_F(hmm2, snapshot)
> >   
> >   	/* Page 5 will be migrated to device 0. */
> >   	buffer->ptr = p + 5 * self->page_size;
> > -	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
> > +	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, 1);
> >   
> >   	/* Page 6 will be migrated to device 1. */
> >   	buffer->ptr = p + 6 * self->page_size;
> > -	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
> > +	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
> >   	ASSERT_EQ(ret, 0);
> >   	ASSERT_EQ(buffer->cpages, 1);
> >   
> > @@ -1377,9 +1451,16 @@ TEST_F(hmm2, snapshot)
> >   	ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
> >   	ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
> >   	ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
> > -	ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
> > -			HMM_DMIRROR_PROT_WRITE);
> > -	ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
> > +	if (!hmm_is_coherent_type(variant->device_number0)) {
> > +		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
> > +				HMM_DMIRROR_PROT_WRITE);
> > +		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
> > +	} else {
> > +		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
> > +				HMM_DMIRROR_PROT_WRITE);
> > +		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
> > +				HMM_DMIRROR_PROT_WRITE);
> > +	}
> >   
> >   	hmm_buffer_free(buffer);
> >   }
> 




Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ