71
71
#include <linux/dax.h>
72
72
#include <linux/oom.h>
73
73
#include <linux/numa.h>
74
+ #include <linux/vmalloc.h>
74
75
75
76
#include <trace/events/kmem.h>
76
77
@@ -2202,15 +2203,16 @@ EXPORT_SYMBOL(vm_iomap_memory);
2202
2203
2203
2204
static int apply_to_pte_range (struct mm_struct * mm , pmd_t * pmd ,
2204
2205
unsigned long addr , unsigned long end ,
2205
- pte_fn_t fn , void * data , bool create )
2206
+ pte_fn_t fn , void * data , bool create ,
2207
+ pgtbl_mod_mask * mask )
2206
2208
{
2207
2209
pte_t * pte ;
2208
2210
int err = 0 ;
2209
2211
spinlock_t * uninitialized_var (ptl );
2210
2212
2211
2213
if (create ) {
2212
2214
pte = (mm == & init_mm ) ?
2213
- pte_alloc_kernel (pmd , addr ) :
2215
+ pte_alloc_kernel_track (pmd , addr , mask ) :
2214
2216
pte_alloc_map_lock (mm , pmd , addr , & ptl );
2215
2217
if (!pte )
2216
2218
return - ENOMEM ;
@@ -2231,6 +2233,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2231
2233
break ;
2232
2234
}
2233
2235
} while (addr += PAGE_SIZE , addr != end );
2236
+ * mask |= PGTBL_PTE_MODIFIED ;
2234
2237
2235
2238
arch_leave_lazy_mmu_mode ();
2236
2239
@@ -2241,7 +2244,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2241
2244
2242
2245
static int apply_to_pmd_range (struct mm_struct * mm , pud_t * pud ,
2243
2246
unsigned long addr , unsigned long end ,
2244
- pte_fn_t fn , void * data , bool create )
2247
+ pte_fn_t fn , void * data , bool create ,
2248
+ pgtbl_mod_mask * mask )
2245
2249
{
2246
2250
pmd_t * pmd ;
2247
2251
unsigned long next ;
@@ -2250,7 +2254,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2250
2254
BUG_ON (pud_huge (* pud ));
2251
2255
2252
2256
if (create ) {
2253
- pmd = pmd_alloc (mm , pud , addr );
2257
+ pmd = pmd_alloc_track (mm , pud , addr , mask );
2254
2258
if (!pmd )
2255
2259
return - ENOMEM ;
2256
2260
} else {
@@ -2260,7 +2264,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2260
2264
next = pmd_addr_end (addr , end );
2261
2265
if (create || !pmd_none_or_clear_bad (pmd )) {
2262
2266
err = apply_to_pte_range (mm , pmd , addr , next , fn , data ,
2263
- create );
2267
+ create , mask );
2264
2268
if (err )
2265
2269
break ;
2266
2270
}
@@ -2270,14 +2274,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2270
2274
2271
2275
static int apply_to_pud_range (struct mm_struct * mm , p4d_t * p4d ,
2272
2276
unsigned long addr , unsigned long end ,
2273
- pte_fn_t fn , void * data , bool create )
2277
+ pte_fn_t fn , void * data , bool create ,
2278
+ pgtbl_mod_mask * mask )
2274
2279
{
2275
2280
pud_t * pud ;
2276
2281
unsigned long next ;
2277
2282
int err = 0 ;
2278
2283
2279
2284
if (create ) {
2280
- pud = pud_alloc (mm , p4d , addr );
2285
+ pud = pud_alloc_track (mm , p4d , addr , mask );
2281
2286
if (!pud )
2282
2287
return - ENOMEM ;
2283
2288
} else {
@@ -2287,7 +2292,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2287
2292
next = pud_addr_end (addr , end );
2288
2293
if (create || !pud_none_or_clear_bad (pud )) {
2289
2294
err = apply_to_pmd_range (mm , pud , addr , next , fn , data ,
2290
- create );
2295
+ create , mask );
2291
2296
if (err )
2292
2297
break ;
2293
2298
}
@@ -2297,14 +2302,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2297
2302
2298
2303
static int apply_to_p4d_range (struct mm_struct * mm , pgd_t * pgd ,
2299
2304
unsigned long addr , unsigned long end ,
2300
- pte_fn_t fn , void * data , bool create )
2305
+ pte_fn_t fn , void * data , bool create ,
2306
+ pgtbl_mod_mask * mask )
2301
2307
{
2302
2308
p4d_t * p4d ;
2303
2309
unsigned long next ;
2304
2310
int err = 0 ;
2305
2311
2306
2312
if (create ) {
2307
- p4d = p4d_alloc (mm , pgd , addr );
2313
+ p4d = p4d_alloc_track (mm , pgd , addr , mask );
2308
2314
if (!p4d )
2309
2315
return - ENOMEM ;
2310
2316
} else {
@@ -2314,7 +2320,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2314
2320
next = p4d_addr_end (addr , end );
2315
2321
if (create || !p4d_none_or_clear_bad (p4d )) {
2316
2322
err = apply_to_pud_range (mm , p4d , addr , next , fn , data ,
2317
- create );
2323
+ create , mask );
2318
2324
if (err )
2319
2325
break ;
2320
2326
}
@@ -2327,8 +2333,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2327
2333
void * data , bool create )
2328
2334
{
2329
2335
pgd_t * pgd ;
2330
- unsigned long next ;
2336
+ unsigned long start = addr , next ;
2331
2337
unsigned long end = addr + size ;
2338
+ pgtbl_mod_mask mask = 0 ;
2332
2339
int err = 0 ;
2333
2340
2334
2341
if (WARN_ON (addr >= end ))
@@ -2339,11 +2346,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2339
2346
next = pgd_addr_end (addr , end );
2340
2347
if (!create && pgd_none_or_clear_bad (pgd ))
2341
2348
continue ;
2342
- err = apply_to_p4d_range (mm , pgd , addr , next , fn , data , create );
2349
+ err = apply_to_p4d_range (mm , pgd , addr , next , fn , data , create , & mask );
2343
2350
if (err )
2344
2351
break ;
2345
2352
} while (pgd ++ , addr = next , addr != end );
2346
2353
2354
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK )
2355
+ arch_sync_kernel_mappings (start , start + size );
2356
+
2347
2357
return err ;
2348
2358
}
2349
2359
0 commit comments