|
15 | 15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
|
16 | 16 | #endif
|
17 | 17 |
|
| 18 | +struct tlb_state { |
| 19 | +#ifdef CONFIG_SMP |
| 20 | + struct mm_struct *active_mm; |
| 21 | + int state; |
| 22 | +#endif |
| 23 | + |
| 24 | + /* |
| 25 | + * Access to this CR4 shadow and to H/W CR4 is protected by |
| 26 | + * disabling interrupts when modifying either one. |
| 27 | + */ |
| 28 | + unsigned long cr4; |
| 29 | +}; |
| 30 | +DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 31 | + |
| 32 | +/* Initialize cr4 shadow for this CPU. */ |
| 33 | +static inline void cr4_init_shadow(void) |
| 34 | +{ |
| 35 | + this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
| 36 | +} |
| 37 | + |
18 | 38 | /* Set in this cpu's CR4. */
|
19 | 39 | static inline void cr4_set_bits(unsigned long mask)
|
20 | 40 | {
|
21 | 41 | unsigned long cr4;
|
22 | 42 |
|
23 |
| - cr4 = read_cr4(); |
24 |
| - cr4 |= mask; |
25 |
| - write_cr4(cr4); |
| 43 | + cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 44 | + if ((cr4 | mask) != cr4) { |
| 45 | + cr4 |= mask; |
| 46 | + this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 47 | + __write_cr4(cr4); |
| 48 | + } |
26 | 49 | }
|
27 | 50 |
|
28 | 51 | /* Clear in this cpu's CR4. */
|
29 | 52 | static inline void cr4_clear_bits(unsigned long mask)
|
30 | 53 | {
|
31 | 54 | unsigned long cr4;
|
32 | 55 |
|
33 |
| - cr4 = read_cr4(); |
34 |
| - cr4 &= ~mask; |
35 |
| - write_cr4(cr4); |
| 56 | + cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 57 | + if ((cr4 & ~mask) != cr4) { |
| 58 | + cr4 &= ~mask; |
| 59 | + this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 60 | + __write_cr4(cr4); |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +/* Read the CR4 shadow. */ |
| 65 | +static inline unsigned long cr4_read_shadow(void) |
| 66 | +{ |
| 67 | + return this_cpu_read(cpu_tlbstate.cr4); |
36 | 68 | }
|
37 | 69 |
|
38 | 70 | /*
|
@@ -61,7 +93,7 @@ static inline void __native_flush_tlb_global_irq_disabled(void)
|
61 | 93 | {
|
62 | 94 | unsigned long cr4;
|
63 | 95 |
|
64 |
| - cr4 = native_read_cr4(); |
| 96 | + cr4 = this_cpu_read(cpu_tlbstate.cr4); |
65 | 97 | /* clear PGE */
|
66 | 98 | native_write_cr4(cr4 & ~X86_CR4_PGE);
|
67 | 99 | /* write old PGE again and flush TLBs */
|
@@ -221,12 +253,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
221 | 253 | #define TLBSTATE_OK 1
|
222 | 254 | #define TLBSTATE_LAZY 2
|
223 | 255 |
|
224 |
| -struct tlb_state { |
225 |
| - struct mm_struct *active_mm; |
226 |
| - int state; |
227 |
| -}; |
228 |
| -DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
229 |
| - |
230 | 256 | static inline void reset_lazy_tlbstate(void)
|
231 | 257 | {
|
232 | 258 | this_cpu_write(cpu_tlbstate.state, 0);
|
|
0 commit comments