|
114 | 114 | * returns the smallest TLB entry size unmapped in this range.
|
115 | 115 | *
|
116 | 116 | * If an architecture does not provide tlb_flush() a default implementation
|
117 |
| - * based on flush_tlb_range() will be used. |
| 117 | + * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is |
| 118 | + * specified, in which case we'll default to flush_tlb_mm(). |
118 | 119 | *
|
119 | 120 | * Additionally there are a few opt-in features:
|
120 | 121 | *
|
|
140 | 141 | * the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your
|
141 | 142 | * architecture uses the Linux page-tables natively.
|
142 | 143 | *
|
| 144 | + * MMU_GATHER_NO_RANGE |
| 145 | + * |
| 146 | + * Use this if your architecture lacks an efficient flush_tlb_range(). |
143 | 147 | */
|
144 | 148 | #define HAVE_GENERIC_MMU_GATHER
|
145 | 149 |
|
@@ -302,12 +306,45 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
302 | 306 | */
|
303 | 307 | }
|
304 | 308 |
|
| 309 | +#ifdef CONFIG_MMU_GATHER_NO_RANGE |
| 310 | + |
| 311 | +#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) |
| 312 | +#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() |
| 313 | +#endif |
| 314 | + |
| 315 | +/* |
| 316 | + * When an architecture does not have efficient means of range flushing TLBs |
| 317 | + * there is no point in doing intermediate flushes on tlb_end_vma() to keep the |
| 318 | + * range small. We equally don't have to worry about page granularity or other |
| 319 | + * things. |
| 320 | + * |
| 321 | + * All we need to do is issue a full flush for any !0 range. |
| 322 | + */ |
| 323 | +static inline void tlb_flush(struct mmu_gather *tlb) |
| 324 | +{ |
| 325 | + if (tlb->end) |
| 326 | + flush_tlb_mm(tlb->mm); |
| 327 | +} |
| 328 | + |
| 329 | +static inline void |
| 330 | +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
| 331 | + |
| 332 | +#define tlb_end_vma tlb_end_vma |
| 333 | +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
| 334 | + |
| 335 | +#else /* CONFIG_MMU_GATHER_NO_RANGE */ |
| 336 | + |
305 | 337 | #ifndef tlb_flush
|
306 | 338 |
|
307 | 339 | #if defined(tlb_start_vma) || defined(tlb_end_vma)
|
308 | 340 | #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
|
309 | 341 | #endif
|
310 | 342 |
|
| 343 | +/* |
| 344 | + * When an architecture does not provide its own tlb_flush() implementation |
| 345 | + * but does have a reasonably efficient flush_vma_range() implementation |
| 346 | + * use that. |
| 347 | + */ |
311 | 348 | static inline void tlb_flush(struct mmu_gather *tlb)
|
312 | 349 | {
|
313 | 350 | if (tlb->fullmm || tlb->need_flush_all) {
|
@@ -348,6 +385,8 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
|
348 | 385 |
|
349 | 386 | #endif
|
350 | 387 |
|
| 388 | +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ |
| 389 | + |
351 | 390 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
352 | 391 | {
|
353 | 392 | if (!tlb->end)
|
|
0 commit comments