Skip to content

Commit 4db0c3c

Browse files
Jason Lowtorvalds
authored andcommitted
mm: remove rest of ACCESS_ONCE() usages
We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/ tree since it doesn't work reliably on non-scalar types. This patch removes the rest of the usages of ACCESS_ONCE, and use the new READ_ONCE API for the read accesses. This makes things cleaner, instead of using separate/multiple sets of APIs. Signed-off-by: Jason Low <jason.low2@hp.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 9d8c47e commit 4db0c3c

File tree

11 files changed

+33
-33
lines changed

11 files changed

+33
-33
lines changed

mm/huge_memory.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ static struct page *get_huge_zero_page(void)
183183
struct page *zero_page;
184184
retry:
185185
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
186-
return ACCESS_ONCE(huge_zero_page);
186+
return READ_ONCE(huge_zero_page);
187187

188188
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
189189
HPAGE_PMD_ORDER);
@@ -202,7 +202,7 @@ static struct page *get_huge_zero_page(void)
202202
/* We take additional reference here. It will be put back by shrinker */
203203
atomic_set(&huge_zero_refcount, 2);
204204
preempt_enable();
205-
return ACCESS_ONCE(huge_zero_page);
205+
return READ_ONCE(huge_zero_page);
206206
}
207207

208208
static void put_huge_zero_page(void)

mm/internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,13 +224,13 @@ static inline unsigned long page_order(struct page *page)
224224
* PageBuddy() should be checked first by the caller to minimize race window,
225225
* and invalid values must be handled gracefully.
226226
*
227-
* ACCESS_ONCE is used so that if the caller assigns the result into a local
227+
* READ_ONCE is used so that if the caller assigns the result into a local
228228
* variable and e.g. tests it for valid range before using, the compiler cannot
229229
* decide to remove the variable and inline the page_private(page) multiple
230230
* times, potentially observing different values in the tests and the actual
231231
* use of the result.
232232
*/
233-
#define page_order_unsafe(page) ACCESS_ONCE(page_private(page))
233+
#define page_order_unsafe(page) READ_ONCE(page_private(page))
234234

235235
static inline bool is_cow_mapping(vm_flags_t flags)
236236
{

mm/ksm.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
542542
expected_mapping = (void *)stable_node +
543543
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
544544
again:
545-
kpfn = ACCESS_ONCE(stable_node->kpfn);
545+
kpfn = READ_ONCE(stable_node->kpfn);
546546
page = pfn_to_page(kpfn);
547547

548548
/*
@@ -551,7 +551,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
551551
* but on Alpha we need to be more careful.
552552
*/
553553
smp_read_barrier_depends();
554-
if (ACCESS_ONCE(page->mapping) != expected_mapping)
554+
if (READ_ONCE(page->mapping) != expected_mapping)
555555
goto stale;
556556

557557
/*
@@ -577,14 +577,14 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
577577
cpu_relax();
578578
}
579579

580-
if (ACCESS_ONCE(page->mapping) != expected_mapping) {
580+
if (READ_ONCE(page->mapping) != expected_mapping) {
581581
put_page(page);
582582
goto stale;
583583
}
584584

585585
if (lock_it) {
586586
lock_page(page);
587-
if (ACCESS_ONCE(page->mapping) != expected_mapping) {
587+
if (READ_ONCE(page->mapping) != expected_mapping) {
588588
unlock_page(page);
589589
put_page(page);
590590
goto stale;
@@ -600,7 +600,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
600600
* before checking whether node->kpfn has been changed.
601601
*/
602602
smp_rmb();
603-
if (ACCESS_ONCE(stable_node->kpfn) != kpfn)
603+
if (READ_ONCE(stable_node->kpfn) != kpfn)
604604
goto again;
605605
remove_node_from_stable_tree(stable_node);
606606
return NULL;

mm/memcontrol.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -674,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
674674
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
675675
{
676676
unsigned long nr_pages = page_counter_read(&memcg->memory);
677-
unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
677+
unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
678678
unsigned long excess = 0;
679679

680680
if (nr_pages > soft_limit)
@@ -1042,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
10421042
goto out_unlock;
10431043

10441044
do {
1045-
pos = ACCESS_ONCE(iter->position);
1045+
pos = READ_ONCE(iter->position);
10461046
/*
10471047
* A racing update may change the position and
10481048
* put the last reference, hence css_tryget(),
@@ -1359,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
13591359
unsigned long limit;
13601360

13611361
count = page_counter_read(&memcg->memory);
1362-
limit = ACCESS_ONCE(memcg->memory.limit);
1362+
limit = READ_ONCE(memcg->memory.limit);
13631363
if (count < limit)
13641364
margin = limit - count;
13651365

13661366
if (do_swap_account) {
13671367
count = page_counter_read(&memcg->memsw);
1368-
limit = ACCESS_ONCE(memcg->memsw.limit);
1368+
limit = READ_ONCE(memcg->memsw.limit);
13691369
if (count <= limit)
13701370
margin = min(margin, limit - count);
13711371
}
@@ -2637,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
26372637
return cachep;
26382638

26392639
memcg = get_mem_cgroup_from_mm(current->mm);
2640-
kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
2640+
kmemcg_id = READ_ONCE(memcg->kmemcg_id);
26412641
if (kmemcg_id < 0)
26422642
goto out;
26432643

@@ -5007,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
50075007
* tunable will only affect upcoming migrations, not the current one.
50085008
* So we need to save it, and keep it going.
50095009
*/
5010-
move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
5010+
move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
50115011
if (move_flags) {
50125012
struct mm_struct *mm;
50135013
struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5241,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
52415241
static int memory_low_show(struct seq_file *m, void *v)
52425242
{
52435243
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5244-
unsigned long low = ACCESS_ONCE(memcg->low);
5244+
unsigned long low = READ_ONCE(memcg->low);
52455245

52465246
if (low == PAGE_COUNTER_MAX)
52475247
seq_puts(m, "max\n");
@@ -5271,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
52715271
static int memory_high_show(struct seq_file *m, void *v)
52725272
{
52735273
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5274-
unsigned long high = ACCESS_ONCE(memcg->high);
5274+
unsigned long high = READ_ONCE(memcg->high);
52755275

52765276
if (high == PAGE_COUNTER_MAX)
52775277
seq_puts(m, "max\n");
@@ -5301,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
53015301
static int memory_max_show(struct seq_file *m, void *v)
53025302
{
53035303
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5304-
unsigned long max = ACCESS_ONCE(memcg->memory.limit);
5304+
unsigned long max = READ_ONCE(memcg->memory.limit);
53055305

53065306
if (max == PAGE_COUNTER_MAX)
53075307
seq_puts(m, "max\n");

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2845,7 +2845,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
28452845
struct vm_fault vmf;
28462846
int off;
28472847

2848-
nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT;
2848+
nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
28492849
mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
28502850

28512851
start_addr = max(address & mask, vma->vm_start);

mm/mmap.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
11331133
* by another page fault trying to merge _that_. But that's ok: if it
11341134
* is being set up, that automatically means that it will be a singleton
11351135
* acceptable for merging, so we can do all of this optimistically. But
1136-
* we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1136+
* we do that READ_ONCE() to make sure that we never re-load the pointer.
11371137
*
11381138
* IOW: that the "list_is_singular()" test on the anon_vma_chain only
11391139
* matters for the 'stable anon_vma' case (ie the thing we want to avoid
@@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
11471147
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
11481148
{
11491149
if (anon_vma_compatible(a, b)) {
1150-
struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1150+
struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
11511151

11521152
if (anon_vma && list_is_singular(&old->anon_vma_chain))
11531153
return anon_vma;
@@ -2100,15 +2100,15 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
21002100
actual_size = size;
21012101
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
21022102
actual_size -= PAGE_SIZE;
2103-
if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2103+
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
21042104
return -ENOMEM;
21052105

21062106
/* mlock limit tests */
21072107
if (vma->vm_flags & VM_LOCKED) {
21082108
unsigned long locked;
21092109
unsigned long limit;
21102110
locked = mm->locked_vm + grow;
2111-
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2111+
limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
21122112
limit >>= PAGE_SHIFT;
21132113
if (locked > limit && !capable(CAP_IPC_LOCK))
21142114
return -ENOMEM;

mm/page_alloc.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1371,7 +1371,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
13711371
int to_drain, batch;
13721372

13731373
local_irq_save(flags);
1374-
batch = ACCESS_ONCE(pcp->batch);
1374+
batch = READ_ONCE(pcp->batch);
13751375
to_drain = min(pcp->count, batch);
13761376
if (to_drain > 0) {
13771377
free_pcppages_bulk(zone, to_drain, pcp);
@@ -1570,7 +1570,7 @@ void free_hot_cold_page(struct page *page, bool cold)
15701570
list_add_tail(&page->lru, &pcp->lists[migratetype]);
15711571
pcp->count++;
15721572
if (pcp->count >= pcp->high) {
1573-
unsigned long batch = ACCESS_ONCE(pcp->batch);
1573+
unsigned long batch = READ_ONCE(pcp->batch);
15741574
free_pcppages_bulk(zone, batch, pcp);
15751575
pcp->count -= batch;
15761576
}
@@ -6207,7 +6207,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
62076207
mask <<= (BITS_PER_LONG - bitidx - 1);
62086208
flags <<= (BITS_PER_LONG - bitidx - 1);
62096209

6210-
word = ACCESS_ONCE(bitmap[word_bitidx]);
6210+
word = READ_ONCE(bitmap[word_bitidx]);
62116211
for (;;) {
62126212
old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
62136213
if (word == old_word)

mm/rmap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
456456
unsigned long anon_mapping;
457457

458458
rcu_read_lock();
459-
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
459+
anon_mapping = (unsigned long)READ_ONCE(page->mapping);
460460
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
461461
goto out;
462462
if (!page_mapped(page))
@@ -500,14 +500,14 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
500500
unsigned long anon_mapping;
501501

502502
rcu_read_lock();
503-
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
503+
anon_mapping = (unsigned long)READ_ONCE(page->mapping);
504504
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
505505
goto out;
506506
if (!page_mapped(page))
507507
goto out;
508508

509509
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
510-
root_anon_vma = ACCESS_ONCE(anon_vma->root);
510+
root_anon_vma = READ_ONCE(anon_vma->root);
511511
if (down_read_trylock(&root_anon_vma->rwsem)) {
512512
/*
513513
* If the page is still mapped, then this anon_vma is still

mm/slub.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4277,7 +4277,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
42774277
int node;
42784278
struct page *page;
42794279

4280-
page = ACCESS_ONCE(c->page);
4280+
page = READ_ONCE(c->page);
42814281
if (!page)
42824282
continue;
42834283

@@ -4292,7 +4292,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
42924292
total += x;
42934293
nodes[node] += x;
42944294

4295-
page = ACCESS_ONCE(c->partial);
4295+
page = READ_ONCE(c->partial);
42964296
if (page) {
42974297
node = page_to_nid(page);
42984298
if (flags & SO_TOTAL)

mm/swap_state.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
390390
unsigned int pages, max_pages, last_ra;
391391
static atomic_t last_readahead_pages;
392392

393-
max_pages = 1 << ACCESS_ONCE(page_cluster);
393+
max_pages = 1 << READ_ONCE(page_cluster);
394394
if (max_pages <= 1)
395395
return 1;
396396

0 commit comments

Comments
 (0)