Skip to content

Commit 3f6bfd4

Browse files
lianux-mmakpm00
authored andcommitted
selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));"
Patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup", v2. This series introduces a common FORCE_READ() macro to replace the cryptic asm volatile("" : "+r" (variable)); construct used in several mm selftests. This improves code readability and maintainability by removing duplicated, hard-to-understand code. This patch (of 2): Several mm selftests use the `asm volatile("" : "+r" (variable));` construct to force a read of a variable, preventing the compiler from optimizing away the memory access. This idiom is cryptic and duplicated across multiple test files. Following a suggestion from David[1], this patch refactors this common pattern into a FORCE_READ() macro Link: https://lkml.kernel.org/r/20250717131857.59909-1-lianux.mm@gmail.com Link: https://lkml.kernel.org/r/20250717131857.59909-2-lianux.mm@gmail.com Link: https://lore.kernel.org/lkml/4a3e0759-caa1-4cfa-bc3f-402593f1eee3@redhat.com/ [1] Signed-off-by: wang lian <lianux.mm@gmail.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Jann Horn <jannh@google.com> Cc: Kairui Song <ryncsn@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: SeongJae Park <sj@kernel.org> Cc: Shuah Khan <shuah@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 7efa1cd commit 3f6bfd4

File tree

7 files changed

+31
-39
lines changed

7 files changed

+31
-39
lines changed

tools/testing/selftests/mm/cow.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1534,7 +1534,7 @@ static void test_ro_fast_pin(char *mem, const char *smem, size_t size)
15341534

15351535
static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
15361536
{
1537-
char *mem, *smem, tmp;
1537+
char *mem, *smem;
15381538

15391539
log_test_start("%s ... with shared zeropage", desc);
15401540

@@ -1554,8 +1554,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
15541554
}
15551555

15561556
/* Read from the page to populate the shared zeropage. */
1557-
tmp = *mem + *smem;
1558-
asm volatile("" : "+r" (tmp));
1557+
FORCE_READ(mem);
1558+
FORCE_READ(smem);
15591559

15601560
fn(mem, smem, pagesize);
15611561
munmap:
@@ -1566,7 +1566,7 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
15661566

15671567
static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
15681568
{
1569-
char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
1569+
char *mem, *smem, *mmap_mem, *mmap_smem;
15701570
size_t mmap_size;
15711571
int ret;
15721572

@@ -1617,8 +1617,8 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
16171617
* the first sub-page and test if we get another sub-page populated
16181618
* automatically.
16191619
*/
1620-
tmp = *mem + *smem;
1621-
asm volatile("" : "+r" (tmp));
1620+
FORCE_READ(mem);
1621+
FORCE_READ(smem);
16221622
if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
16231623
!pagemap_is_populated(pagemap_fd, smem + pagesize)) {
16241624
ksft_test_result_skip("Did not get THPs populated\n");
@@ -1634,7 +1634,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
16341634

16351635
static void run_with_memfd(non_anon_test_fn fn, const char *desc)
16361636
{
1637-
char *mem, *smem, tmp;
1637+
char *mem, *smem;
16381638
int fd;
16391639

16401640
log_test_start("%s ... with memfd", desc);
@@ -1668,8 +1668,8 @@ static void run_with_memfd(non_anon_test_fn fn, const char *desc)
16681668
}
16691669

16701670
/* Fault the page in. */
1671-
tmp = *mem + *smem;
1672-
asm volatile("" : "+r" (tmp));
1671+
FORCE_READ(mem);
1672+
FORCE_READ(smem);
16731673

16741674
fn(mem, smem, pagesize);
16751675
munmap:
@@ -1682,7 +1682,7 @@ static void run_with_memfd(non_anon_test_fn fn, const char *desc)
16821682

16831683
static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
16841684
{
1685-
char *mem, *smem, tmp;
1685+
char *mem, *smem;
16861686
FILE *file;
16871687
int fd;
16881688

@@ -1724,8 +1724,8 @@ static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
17241724
}
17251725

17261726
/* Fault the page in. */
1727-
tmp = *mem + *smem;
1728-
asm volatile("" : "+r" (tmp));
1727+
FORCE_READ(mem);
1728+
FORCE_READ(smem);
17291729

17301730
fn(mem, smem, pagesize);
17311731
munmap:
@@ -1740,7 +1740,7 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
17401740
size_t hugetlbsize)
17411741
{
17421742
int flags = MFD_HUGETLB;
1743-
char *mem, *smem, tmp;
1743+
char *mem, *smem;
17441744
int fd;
17451745

17461746
log_test_start("%s ... with memfd hugetlb (%zu kB)", desc,
@@ -1778,8 +1778,8 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
17781778
}
17791779

17801780
/* Fault the page in. */
1781-
tmp = *mem + *smem;
1782-
asm volatile("" : "+r" (tmp));
1781+
FORCE_READ(mem);
1782+
FORCE_READ(smem);
17831783

17841784
fn(mem, smem, hugetlbsize);
17851785
munmap:

tools/testing/selftests/mm/guard-regions.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,6 @@
3535
static volatile sig_atomic_t signal_jump_set;
3636
static sigjmp_buf signal_jmp_buf;
3737

38-
/*
39-
* Ignore the checkpatch warning, we must read from x but don't want to do
40-
* anything with it in order to trigger a read page fault. We therefore must use
41-
* volatile to stop the compiler from optimising this away.
42-
*/
43-
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
44-
4538
/*
4639
* How is the test backing the mapping being tested?
4740
*/

tools/testing/selftests/mm/hugetlb-madvise.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,11 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
4747

4848
void read_fault_pages(void *addr, unsigned long nr_pages)
4949
{
50-
volatile unsigned long dummy = 0;
5150
unsigned long i;
5251

5352
for (i = 0; i < nr_pages; i++) {
54-
dummy += *((unsigned long *)(addr + (i * huge_page_size)));
55-
5653
/* Prevent the compiler from optimizing out the entire loop: */
57-
asm volatile("" : "+r" (dummy));
54+
FORCE_READ(((unsigned long *)(addr + (i * huge_page_size))));
5855
}
5956
}
6057

tools/testing/selftests/mm/migration.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <sys/types.h>
1717
#include <signal.h>
1818
#include <time.h>
19+
#include "vm_util.h"
1920

2021
#define TWOMEG (2<<20)
2122
#define RUNTIME (20)
@@ -103,15 +104,13 @@ int migrate(uint64_t *ptr, int n1, int n2)
103104

104105
void *access_mem(void *ptr)
105106
{
106-
volatile uint64_t y = 0;
107-
volatile uint64_t *x = ptr;
108-
109107
while (1) {
110108
pthread_testcancel();
111-
y += *x;
112-
113-
/* Prevent the compiler from optimizing out the writes to y: */
114-
asm volatile("" : "+r" (y));
109+
/* Force a read from the memory pointed to by ptr. This ensures
110+
* the memory access actually happens and prevents the compiler
111+
* from optimizing away this entire loop.
112+
*/
113+
FORCE_READ((uint64_t *)ptr);
115114
}
116115

117116
return NULL;

tools/testing/selftests/mm/pagemap_ioctl.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1525,9 +1525,7 @@ void zeropfn_tests(void)
15251525

15261526
ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
15271527
if (!ret) {
1528-
char tmp = *mem;
1529-
1530-
asm volatile("" : "+r" (tmp));
1528+
FORCE_READ(mem);
15311529

15321530
ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
15331531
0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);

tools/testing/selftests/mm/split_huge_page_test.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,6 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
398398
char **addr)
399399
{
400400
size_t i;
401-
int dummy = 0;
402401
unsigned char buf[1024];
403402

404403
srand(time(NULL));
@@ -440,8 +439,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
440439
madvise(*addr, fd_size, MADV_HUGEPAGE);
441440

442441
for (size_t i = 0; i < fd_size; i++)
443-
dummy += *(*addr + i);
444-
asm volatile("" : "+r" (dummy));
442+
FORCE_READ((*addr + i));
445443

446444
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
447445
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");

tools/testing/selftests/mm/vm_util.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,13 @@
1818
#define PM_SWAP BIT_ULL(62)
1919
#define PM_PRESENT BIT_ULL(63)
2020

21+
/*
22+
* Ignore the checkpatch warning, we must read from x but don't want to do
23+
* anything with it in order to trigger a read page fault. We therefore must use
24+
* volatile to stop the compiler from optimising this away.
25+
*/
26+
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
27+
2128
extern unsigned int __page_size;
2229
extern unsigned int __page_shift;
2330

0 commit comments

Comments
 (0)