@@ -37,54 +37,6 @@ static inline u64 kvm_get_parange(u64 mmfr0)
3737
3838typedef u64 kvm_pte_t ;
3939
40- /*
41- * RCU cannot be used in a non-kernel context such as the hyp. As such, page
42- * table walkers used in hyp do not call into RCU and instead use other
43- * synchronization mechanisms (such as a spinlock).
44- */
45- #if defined(__KVM_NVHE_HYPERVISOR__ ) || defined(__KVM_VHE_HYPERVISOR__ )
46-
47- typedef kvm_pte_t * kvm_pteref_t ;
48-
49- static inline kvm_pte_t * kvm_dereference_pteref (kvm_pteref_t pteref , bool shared )
50- {
51- return pteref ;
52- }
53-
54- static inline void kvm_pgtable_walk_begin (void ) {}
55- static inline void kvm_pgtable_walk_end (void ) {}
56-
57- static inline bool kvm_pgtable_walk_lock_held (void )
58- {
59- return true;
60- }
61-
62- #else
63-
64- typedef kvm_pte_t __rcu * kvm_pteref_t ;
65-
66- static inline kvm_pte_t * kvm_dereference_pteref (kvm_pteref_t pteref , bool shared )
67- {
68- return rcu_dereference_check (pteref , !shared );
69- }
70-
71- static inline void kvm_pgtable_walk_begin (void )
72- {
73- rcu_read_lock ();
74- }
75-
76- static inline void kvm_pgtable_walk_end (void )
77- {
78- rcu_read_unlock ();
79- }
80-
81- static inline bool kvm_pgtable_walk_lock_held (void )
82- {
83- return rcu_read_lock_held ();
84- }
85-
86- #endif
87-
8840#define KVM_PTE_VALID BIT(0)
8941
9042#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
@@ -212,29 +164,6 @@ enum kvm_pgtable_prot {
212164typedef bool (* kvm_pgtable_force_pte_cb_t )(u64 addr , u64 end ,
213165 enum kvm_pgtable_prot prot );
214166
215- /**
216- * struct kvm_pgtable - KVM page-table.
217- * @ia_bits: Maximum input address size, in bits.
218- * @start_level: Level at which the page-table walk starts.
219- * @pgd: Pointer to the first top-level entry of the page-table.
220- * @mm_ops: Memory management callbacks.
221- * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
222- * @flags: Stage-2 page-table flags.
223- * @force_pte_cb: Function that returns true if page level mappings must
224- * be used instead of block mappings.
225- */
226- struct kvm_pgtable {
227- u32 ia_bits ;
228- u32 start_level ;
229- kvm_pteref_t pgd ;
230- struct kvm_pgtable_mm_ops * mm_ops ;
231-
232- /* Stage-2 only */
233- struct kvm_s2_mmu * mmu ;
234- enum kvm_pgtable_stage2_flags flags ;
235- kvm_pgtable_force_pte_cb_t force_pte_cb ;
236- };
237-
238167/**
239168 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
240169 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
@@ -285,6 +214,79 @@ struct kvm_pgtable_walker {
285214 const enum kvm_pgtable_walk_flags flags ;
286215};
287216
217+ /*
218+ * RCU cannot be used in a non-kernel context such as the hyp. As such, page
219+ * table walkers used in hyp do not call into RCU and instead use other
220+ * synchronization mechanisms (such as a spinlock).
221+ */
222+ #if defined(__KVM_NVHE_HYPERVISOR__ ) || defined(__KVM_VHE_HYPERVISOR__ )
223+
224+ typedef kvm_pte_t * kvm_pteref_t ;
225+
226+ static inline kvm_pte_t * kvm_dereference_pteref (struct kvm_pgtable_walker * walker ,
227+ kvm_pteref_t pteref )
228+ {
229+ return pteref ;
230+ }
231+
232+ static inline void kvm_pgtable_walk_begin (void ) {}
233+ static inline void kvm_pgtable_walk_end (void ) {}
234+
235+ static inline bool kvm_pgtable_walk_lock_held (void )
236+ {
237+ return true;
238+ }
239+
240+ #else
241+
242+ typedef kvm_pte_t __rcu * kvm_pteref_t ;
243+
244+ static inline kvm_pte_t * kvm_dereference_pteref (struct kvm_pgtable_walker * walker ,
245+ kvm_pteref_t pteref )
246+ {
247+ return rcu_dereference_check (pteref , !(walker -> flags & KVM_PGTABLE_WALK_SHARED ));
248+ }
249+
250+ static inline void kvm_pgtable_walk_begin (void )
251+ {
252+ rcu_read_lock ();
253+ }
254+
255+ static inline void kvm_pgtable_walk_end (void )
256+ {
257+ rcu_read_unlock ();
258+ }
259+
260+ static inline bool kvm_pgtable_walk_lock_held (void )
261+ {
262+ return rcu_read_lock_held ();
263+ }
264+
265+ #endif
266+
267+ /**
268+ * struct kvm_pgtable - KVM page-table.
269+ * @ia_bits: Maximum input address size, in bits.
270+ * @start_level: Level at which the page-table walk starts.
271+ * @pgd: Pointer to the first top-level entry of the page-table.
272+ * @mm_ops: Memory management callbacks.
273+ * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
274+ * @flags: Stage-2 page-table flags.
275+ * @force_pte_cb: Function that returns true if page level mappings must
276+ * be used instead of block mappings.
277+ */
278+ struct kvm_pgtable {
279+ u32 ia_bits ;
280+ u32 start_level ;
281+ kvm_pteref_t pgd ;
282+ struct kvm_pgtable_mm_ops * mm_ops ;
283+
284+ /* Stage-2 only */
285+ struct kvm_s2_mmu * mmu ;
286+ enum kvm_pgtable_stage2_flags flags ;
287+ kvm_pgtable_force_pte_cb_t force_pte_cb ;
288+ };
289+
288290/**
289291 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
290292 * @pgt: Uninitialised page-table structure to initialise.
0 commit comments