lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Wed, 26 Jan 2022 21:22:13 -0600 From: "Sierra Guiza, Alejandro (Alex)" <alex.sierra@....com> To: Alistair Popple <apopple@...dia.com>, akpm@...ux-foundation.org, Felix.Kuehling@....com, linux-mm@...ck.org, rcampbell@...dia.com, linux-ext4@...r.kernel.org, linux-xfs@...r.kernel.org Cc: amd-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org, hch@....de, jgg@...dia.com, jglisse@...hat.com, willy@...radead.org Subject: Re: [PATCH v3 09/10] tools: update hmm-test to support device coherent type On 1/20/2022 12:14 AM, Alistair Popple wrote: > On Tuesday, 11 January 2022 9:32:00 AM AEDT Alex Sierra wrote: >> Test cases such as migrate_fault and migrate_multiple, were modified to >> explicit migrate from device to sys memory without the need of page >> faults, when using device coherent type. >> >> Snapshot test case updated to read memory device type first and based >> on that, get the proper returned results migrate_ping_pong test case > Where is the migrate_ping_pong test? Did you perhaps forget to add it? :-) Migration from device coherent to system is tested with migrate_multiple too. Therefore, I've removed migrate_ping_pong test. BTW, I just added the "number of pages migrated" checker after migrate from coherent to system on v4 series. Regards, Alejandro Sierra > >> added to test explicit migration from device to sys memory for both >> private and coherent zone types. >> >> Helpers to migrate from device to sys memory and vicerversa >> were also added. >> >> Signed-off-by: Alex Sierra <alex.sierra@....com> >> --- >> v2: >> Set FIXTURE_VARIANT to add multiple device types to the FIXTURE. This >> will run all the tests for each device type (private and coherent) in >> case both existed during hmm-test driver probed. >> --- >> tools/testing/selftests/vm/hmm-tests.c | 122 ++++++++++++++++++++----- >> 1 file changed, 101 insertions(+), 21 deletions(-) >> >> diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c >> index 864f126ffd78..8eb81dfba4b3 100644 >> --- a/tools/testing/selftests/vm/hmm-tests.c >> +++ b/tools/testing/selftests/vm/hmm-tests.c >> @@ -44,6 +44,14 @@ struct hmm_buffer { >> int fd; >> uint64_t cpages; >> uint64_t faults; >> + int zone_device_type; >> +}; >> + >> +enum { >> + HMM_PRIVATE_DEVICE_ONE, >> + HMM_PRIVATE_DEVICE_TWO, >> + HMM_COHERENCE_DEVICE_ONE, >> + HMM_COHERENCE_DEVICE_TWO, >> }; >> >> #define TWOMEG (1 << 21) >> @@ -60,6 +68,21 @@ FIXTURE(hmm) >> unsigned int page_shift; >> }; >> >> +FIXTURE_VARIANT(hmm) >> +{ >> + int device_number; >> +}; >> + >> +FIXTURE_VARIANT_ADD(hmm, hmm_device_private) >> +{ >> + .device_number = HMM_PRIVATE_DEVICE_ONE, >> +}; >> + >> +FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent) >> +{ >> + .device_number = HMM_COHERENCE_DEVICE_ONE, >> +}; >> + >> FIXTURE(hmm2) >> { >> int fd0; >> @@ -68,6 +91,24 @@ FIXTURE(hmm2) >> unsigned int page_shift; >> }; >> >> +FIXTURE_VARIANT(hmm2) >> +{ >> + int device_number0; >> + int device_number1; >> +}; >> + >> +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private) >> +{ >> + .device_number0 = HMM_PRIVATE_DEVICE_ONE, >> + .device_number1 = HMM_PRIVATE_DEVICE_TWO, >> +}; >> + >> +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent) >> +{ >> + .device_number0 = HMM_COHERENCE_DEVICE_ONE, >> + .device_number1 = HMM_COHERENCE_DEVICE_TWO, >> +}; >> + >> static int hmm_open(int unit) >> { >> char pathname[HMM_PATH_MAX]; >> @@ -81,12 +122,19 @@ static int hmm_open(int unit) >> return fd; >> } >> >> +static bool hmm_is_coherent_type(int dev_num) >> +{ >> + return (dev_num >= HMM_COHERENCE_DEVICE_ONE); >> +} >> + >> FIXTURE_SETUP(hmm) >> { >> self->page_size = sysconf(_SC_PAGE_SIZE); >> self->page_shift = ffs(self->page_size) - 1; >> >> - self->fd = hmm_open(0); >> + self->fd = hmm_open(variant->device_number); >> + if (self->fd < 0 && hmm_is_coherent_type(variant->device_number)) >> + SKIP(exit(0), "DEVICE_COHERENT not available"); >> ASSERT_GE(self->fd, 0); >> } >> >> @@ -95,9 +143,11 @@ FIXTURE_SETUP(hmm2) >> self->page_size = sysconf(_SC_PAGE_SIZE); >> self->page_shift = ffs(self->page_size) - 1; >> >> - self->fd0 = hmm_open(0); >> + self->fd0 = hmm_open(variant->device_number0); >> + if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0)) >> + SKIP(exit(0), "DEVICE_COHERENT not available"); >> ASSERT_GE(self->fd0, 0); >> - self->fd1 = hmm_open(1); >> + self->fd1 = hmm_open(variant->device_number1); >> ASSERT_GE(self->fd1, 0); >> } >> >> @@ -144,6 +194,7 @@ static int hmm_dmirror_cmd(int fd, >> } >> buffer->cpages = cmd.cpages; >> buffer->faults = cmd.faults; >> + buffer->zone_device_type = cmd.zone_device_type; >> >> return 0; >> } >> @@ -211,6 +262,20 @@ static void hmm_nanosleep(unsigned int n) >> nanosleep(&t, NULL); >> } >> >> +static int hmm_migrate_sys_to_dev(int fd, >> + struct hmm_buffer *buffer, >> + unsigned long npages) >> +{ >> + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); >> +} >> + >> +static int hmm_migrate_dev_to_sys(int fd, >> + struct hmm_buffer *buffer, >> + unsigned long npages) >> +{ >> + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); >> +} >> + >> /* >> * Simple NULL test of device open/close. >> */ >> @@ -875,7 +940,7 @@ TEST_F(hmm, migrate) >> ptr[i] = i; >> >> /* Migrate memory to device. */ >> - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); >> + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, npages); >> >> @@ -923,7 +988,7 @@ TEST_F(hmm, migrate_fault) >> ptr[i] = i; >> >> /* Migrate memory to device. */ >> - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); >> + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, npages); >> >> @@ -936,7 +1001,7 @@ TEST_F(hmm, migrate_fault) >> ASSERT_EQ(ptr[i], i); >> >> /* Migrate memory to the device again. */ >> - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); >> + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, npages); >> >> @@ -976,7 +1041,7 @@ TEST_F(hmm, migrate_shared) >> ASSERT_NE(buffer->ptr, MAP_FAILED); >> >> /* Migrate memory to device. */ >> - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); >> + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); >> ASSERT_EQ(ret, -ENOENT); >> >> hmm_buffer_free(buffer); >> @@ -1015,7 +1080,7 @@ TEST_F(hmm2, migrate_mixed) >> p = buffer->ptr; >> >> /* Migrating a protected area should be an error. */ >> - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages); >> + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages); >> ASSERT_EQ(ret, -EINVAL); >> >> /* Punch a hole after the first page address. */ >> @@ -1023,7 +1088,7 @@ TEST_F(hmm2, migrate_mixed) >> ASSERT_EQ(ret, 0); >> >> /* We expect an error if the vma doesn't cover the range. */ >> - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3); >> + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3); >> ASSERT_EQ(ret, -EINVAL); >> >> /* Page 2 will be a read-only zero page. */ >> @@ -1055,13 +1120,13 @@ TEST_F(hmm2, migrate_mixed) >> >> /* Now try to migrate pages 2-5 to device 1. */ >> buffer->ptr = p + 2 * self->page_size; >> - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4); >> + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, 4); >> >> /* Page 5 won't be migrated to device 0 because it's on device 1. */ >> buffer->ptr = p + 5 * self->page_size; >> - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); >> + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); >> ASSERT_EQ(ret, -ENOENT); >> buffer->ptr = p; >> >> @@ -1070,8 +1135,12 @@ TEST_F(hmm2, migrate_mixed) >> } >> >> /* >> - * Migrate anonymous memory to device private memory and fault it back to system >> - * memory multiple times. >> + * Migrate anonymous memory to device memory and back to system memory >> + * multiple times. In case of private zone configuration, this is done >> + * through fault pages accessed by CPU. In case of coherent zone configuration, >> + * the pages from the device should be explicitly migrated back to system memory. >> + * The reason is Coherent device zone has coherent access by CPU, therefore >> + * it will not generate any page fault. >> */ >> TEST_F(hmm, migrate_multiple) >> { >> @@ -1107,8 +1176,7 @@ TEST_F(hmm, migrate_multiple) >> ptr[i] = i; >> >> /* Migrate memory to device. */ >> - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, >> - npages); >> + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, npages); >> >> @@ -1116,7 +1184,12 @@ TEST_F(hmm, migrate_multiple) >> for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) >> ASSERT_EQ(ptr[i], i); >> >> - /* Fault pages back to system memory and check them. */ >> + /* Migrate back to system memory and check them. */ >> + if (hmm_is_coherent_type(variant->device_number)) { >> + ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages); > So I think this will still pass even if nothing migrates so as mentioned on > the previous patch I think we need to check for the number of pages that > actually migrated. Alternatively I suppose you could do a snapshot and check > that, but that seems like it would be harder. Otherwise I think this looks > good. > >> + ASSERT_EQ(ret, 0); >> + } >> + >> for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) >> ASSERT_EQ(ptr[i], i); >> >> @@ -1312,13 +1385,13 @@ TEST_F(hmm2, snapshot) >> >> /* Page 5 will be migrated to device 0. */ >> buffer->ptr = p + 5 * self->page_size; >> - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); >> + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, 1); >> >> /* Page 6 will be migrated to device 1. */ >> buffer->ptr = p + 6 * self->page_size; >> - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1); >> + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1); >> ASSERT_EQ(ret, 0); >> ASSERT_EQ(buffer->cpages, 1); >> >> @@ -1335,9 +1408,16 @@ TEST_F(hmm2, snapshot) >> ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ); >> ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ); >> ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE); >> - ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | >> - HMM_DMIRROR_PROT_WRITE); >> - ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); >> + if (!hmm_is_coherent_type(variant->device_number0)) { >> + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | >> + HMM_DMIRROR_PROT_WRITE); >> + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); >> + } else { >> + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | >> + HMM_DMIRROR_PROT_WRITE); >> + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE | >> + HMM_DMIRROR_PROT_WRITE); >> + } >> >> hmm_buffer_free(buffer); >> } >> > > >
Powered by blists - more mailing lists