5
5
6
6
use crate :: TdxBacked ;
7
7
use crate :: UhProcessor ;
8
+ use atomic_ringbuf:: AtomicRingBuffer ;
8
9
use hcl:: GuestVtl ;
9
10
use hcl:: ioctl:: ProcessorRunner ;
10
11
use hcl:: ioctl:: tdx:: Tdx ;
11
12
use hvdef:: hypercall:: HvGvaRange ;
12
13
use inspect:: Inspect ;
13
- use parking_lot:: Mutex ;
14
- use parking_lot:: MutexGuard ;
15
14
use safeatomic:: AtomicSliceOps ;
16
15
use std:: num:: Wrapping ;
17
16
use std:: sync:: atomic:: AtomicU32 ;
18
- use std:: sync:: atomic:: AtomicU64 ;
19
- use std:: sync:: atomic:: AtomicUsize ;
20
17
use std:: sync:: atomic:: Ordering ;
21
18
use x86defs:: tdx:: TdGlaVmAndFlags ;
22
19
use x86defs:: tdx:: TdxGlaListInfo ;
@@ -29,7 +26,7 @@ pub(super) const FLUSH_GVA_LIST_SIZE: usize = 32;
29
26
#[ derive( Debug , Inspect ) ]
30
27
pub ( super ) struct TdxPartitionFlushState {
31
28
/// A fixed-size ring buffer of GVAs that need to be flushed.
32
- pub ( super ) gva_list : AtomicTlbRingBuffer ,
29
+ pub ( super ) gva_list : AtomicRingBuffer < FLUSH_GVA_LIST_SIZE , HvGvaRange > ,
33
30
/// The number of times an entire TLB flush has been requested.
34
31
pub ( super ) flush_entire_counter : AtomicU32 ,
35
32
/// The number of times a non-global TLB flush has been requested.
@@ -40,7 +37,7 @@ pub(super) struct TdxPartitionFlushState {
40
37
impl TdxPartitionFlushState {
41
38
pub ( super ) fn new ( ) -> Self {
42
39
Self {
43
- gva_list : AtomicTlbRingBuffer :: new ( ) ,
40
+ gva_list : AtomicRingBuffer :: new ( ) ,
44
41
flush_entire_counter : AtomicU32 :: new ( 0 ) ,
45
42
flush_entire_non_global_counter : AtomicU32 :: new ( 0 ) ,
46
43
}
@@ -112,7 +109,7 @@ impl UhProcessor<'_, TdxBacked> {
112
109
if flush_entire_required {
113
110
self_flush_state. flush_entire_counter = Wrapping ( partition_flush_entire) ;
114
111
self_flush_state. flush_entire_non_global_counter = Wrapping ( partition_flush_non_global) ;
115
- self_flush_state. gva_list_count = Wrapping ( partition_flush_state. gva_list . count ( ) ) ;
112
+ self_flush_state. gva_list_count = partition_flush_state. gva_list . count ( ) ;
116
113
Self :: set_flush_entire (
117
114
true ,
118
115
& mut self . backing . vtls [ target_vtl] . private_regs . vp_entry_flags ,
@@ -139,7 +136,7 @@ impl UhProcessor<'_, TdxBacked> {
139
136
flush_page : & user_driver:: memory:: MemoryBlock ,
140
137
) -> bool {
141
138
// Check quickly to see whether any new addresses are in the list.
142
- let partition_list_count = Wrapping ( partition_flush_state. gva_list . count ( ) ) ;
139
+ let partition_list_count = partition_flush_state. gva_list . count ( ) ;
143
140
if partition_list_count == * gva_list_count {
144
141
return true ;
145
142
}
@@ -151,10 +148,10 @@ impl UhProcessor<'_, TdxBacked> {
151
148
}
152
149
153
150
// The last `count_diff` addresses are the new ones, copy them locally.
154
- let flush_addrs = & mut [ 0 ; FLUSH_GVA_LIST_SIZE ] [ ..count_diff] ;
151
+ let flush_addrs = & mut [ HvGvaRange ( 0 ) ; FLUSH_GVA_LIST_SIZE ] [ ..count_diff] ;
155
152
if !partition_flush_state
156
153
. gva_list
157
- . try_copy ( * gva_list_count, flush_addrs)
154
+ . try_copy ( gva_list_count. 0 , flush_addrs)
158
155
{
159
156
return false ;
160
157
}
@@ -165,7 +162,7 @@ impl UhProcessor<'_, TdxBacked> {
165
162
166
163
if count_diff == 1 {
167
164
runner
168
- . invgla ( gla_flags, TdxGlaListInfo :: from ( flush_addrs[ 0 ] ) )
165
+ . invgla ( gla_flags, TdxGlaListInfo :: from ( flush_addrs[ 0 ] . 0 ) )
169
166
. unwrap ( ) ;
170
167
} else {
171
168
gla_flags. set_list ( true ) ;
@@ -198,91 +195,3 @@ impl UhProcessor<'_, TdxBacked> {
198
195
}
199
196
}
200
197
}
201
-
202
- #[ derive( Debug , Inspect ) ]
203
- pub ( super ) struct AtomicTlbRingBuffer {
204
- /// The contents of the buffer.
205
- #[ inspect( hex, with = "|x| inspect::iter_by_index(x.iter())" ) ]
206
- buffer : Box < [ AtomicU64 ; FLUSH_GVA_LIST_SIZE ] > ,
207
- /// The number of GVAs that have been added over the lifetime of the VM.
208
- gva_list_count : AtomicUsize ,
209
- /// The number of GVAs that have started being added to the list over the
210
- /// lifetime of the VM.
211
- in_progress_count : AtomicUsize ,
212
- /// A guard to ensure that only one thread is writing to the list at a time.
213
- write_lock : Mutex < ( ) > ,
214
- }
215
-
216
- pub ( super ) struct AtomicTlbRingBufferWriteGuard < ' a > {
217
- buf : & ' a AtomicTlbRingBuffer ,
218
- _write_lock : MutexGuard < ' a , ( ) > ,
219
- }
220
-
221
- impl AtomicTlbRingBuffer {
222
- fn new ( ) -> Self {
223
- Self {
224
- buffer : Box :: new ( std:: array:: from_fn ( |_| AtomicU64 :: new ( 0 ) ) ) ,
225
- gva_list_count : AtomicUsize :: new ( 0 ) ,
226
- in_progress_count : AtomicUsize :: new ( 0 ) ,
227
- write_lock : Mutex :: new ( ( ) ) ,
228
- }
229
- }
230
-
231
- fn count ( & self ) -> usize {
232
- self . gva_list_count . load ( Ordering :: Acquire )
233
- }
234
-
235
- pub fn write ( & self ) -> AtomicTlbRingBufferWriteGuard < ' _ > {
236
- let write_lock = self . write_lock . lock ( ) ;
237
- AtomicTlbRingBufferWriteGuard {
238
- buf : self ,
239
- _write_lock : write_lock,
240
- }
241
- }
242
-
243
- fn try_copy ( & self , start_count : Wrapping < usize > , flush_addrs : & mut [ u64 ] ) -> bool {
244
- let mut index = start_count;
245
- for flush_addr in flush_addrs. iter_mut ( ) {
246
- * flush_addr = self . buffer [ index. 0 % FLUSH_GVA_LIST_SIZE ] . load ( Ordering :: Relaxed ) ;
247
- index += 1 ;
248
- }
249
- std:: sync:: atomic:: fence ( Ordering :: Acquire ) ;
250
-
251
- // Check to see whether any additional entries have been added
252
- // that would have caused a wraparound. If so, the local list is
253
- // incomplete and the copy has failed.
254
- if ( Wrapping ( self . in_progress_count . load ( Ordering :: Acquire ) ) - start_count) . 0
255
- > FLUSH_GVA_LIST_SIZE
256
- {
257
- return false ;
258
- }
259
- true
260
- }
261
- }
262
-
263
- impl AtomicTlbRingBufferWriteGuard < ' _ > {
264
- pub fn extend ( & self , items : impl ExactSizeIterator < Item = HvGvaRange > ) {
265
- debug_assert_eq ! (
266
- self . buf. in_progress_count. load( Ordering :: Relaxed ) ,
267
- self . buf. gva_list_count. load( Ordering :: Relaxed )
268
- ) ;
269
- // Adding a new item to the buffer must be done in three steps:
270
- // 1. Indicate that an entry is about to be added so that any flush
271
- // code executing simultaneously will know that it might lose an
272
- // entry that it is expecting to see.
273
- // 2. Add the entry.
274
- // 3. Increment the valid entry count so that any flush code executing
275
- // simultaneously will know it is valid.
276
- let len = items. len ( ) ;
277
- let start_count = self . buf . in_progress_count . load ( Ordering :: Relaxed ) ;
278
- let end_count = start_count. wrapping_add ( len) ;
279
- self . buf
280
- . in_progress_count
281
- . store ( end_count, Ordering :: Relaxed ) ;
282
- for ( i, v) in items. enumerate ( ) {
283
- self . buf . buffer [ ( start_count. wrapping_add ( i) ) % FLUSH_GVA_LIST_SIZE ]
284
- . store ( v. 0 , Ordering :: Release ) ;
285
- }
286
- self . buf . gva_list_count . store ( end_count, Ordering :: Release ) ;
287
- }
288
- }
0 commit comments