@@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
6969 return 0 ;
7070}
7171
72- int kvm_riscv_vcpu_timer_next_event (struct kvm_vcpu * vcpu , u64 ncycles )
72+ static int kvm_riscv_vcpu_update_vstimecmp (struct kvm_vcpu * vcpu , u64 ncycles )
73+ {
74+ #if defined(CONFIG_32BIT )
75+ csr_write (CSR_VSTIMECMP , ncycles & 0xFFFFFFFF );
76+ csr_write (CSR_VSTIMECMPH , ncycles >> 32 );
77+ #else
78+ csr_write (CSR_VSTIMECMP , ncycles );
79+ #endif
80+ return 0 ;
81+ }
82+
83+ static int kvm_riscv_vcpu_update_hrtimer (struct kvm_vcpu * vcpu , u64 ncycles )
7384{
7485 struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
7586 struct kvm_guest_timer * gt = & vcpu -> kvm -> arch .timer ;
@@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
8899 return 0 ;
89100}
90101
102+ int kvm_riscv_vcpu_timer_next_event (struct kvm_vcpu * vcpu , u64 ncycles )
103+ {
104+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
105+
106+ return t -> timer_next_event (vcpu , ncycles );
107+ }
108+
109+ static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired (struct hrtimer * h )
110+ {
111+ u64 delta_ns ;
112+ struct kvm_vcpu_timer * t = container_of (h , struct kvm_vcpu_timer , hrt );
113+ struct kvm_vcpu * vcpu = container_of (t , struct kvm_vcpu , arch .timer );
114+ struct kvm_guest_timer * gt = & vcpu -> kvm -> arch .timer ;
115+
116+ if (kvm_riscv_current_cycles (gt ) < t -> next_cycles ) {
117+ delta_ns = kvm_riscv_delta_cycles2ns (t -> next_cycles , gt , t );
118+ hrtimer_forward_now (& t -> hrt , ktime_set (0 , delta_ns ));
119+ return HRTIMER_RESTART ;
120+ }
121+
122+ t -> next_set = false;
123+ kvm_vcpu_kick (vcpu );
124+
125+ return HRTIMER_NORESTART ;
126+ }
127+
128+ bool kvm_riscv_vcpu_timer_pending (struct kvm_vcpu * vcpu )
129+ {
130+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
131+ struct kvm_guest_timer * gt = & vcpu -> kvm -> arch .timer ;
132+
133+ if (!kvm_riscv_delta_cycles2ns (t -> next_cycles , gt , t ) ||
134+ kvm_riscv_vcpu_has_interrupts (vcpu , 1UL << IRQ_VS_TIMER ))
135+ return true;
136+ else
137+ return false;
138+ }
139+
140+ static void kvm_riscv_vcpu_timer_blocking (struct kvm_vcpu * vcpu )
141+ {
142+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
143+ struct kvm_guest_timer * gt = & vcpu -> kvm -> arch .timer ;
144+ u64 delta_ns ;
145+
146+ if (!t -> init_done )
147+ return ;
148+
149+ delta_ns = kvm_riscv_delta_cycles2ns (t -> next_cycles , gt , t );
150+ if (delta_ns ) {
151+ hrtimer_start (& t -> hrt , ktime_set (0 , delta_ns ), HRTIMER_MODE_REL );
152+ t -> next_set = true;
153+ }
154+ }
155+
156+ static void kvm_riscv_vcpu_timer_unblocking (struct kvm_vcpu * vcpu )
157+ {
158+ kvm_riscv_vcpu_timer_cancel (& vcpu -> arch .timer );
159+ }
160+
91161int kvm_riscv_vcpu_get_reg_timer (struct kvm_vcpu * vcpu ,
92162 const struct kvm_one_reg * reg )
93163{
@@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
180250 return - EINVAL ;
181251
182252 hrtimer_init (& t -> hrt , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
183- t -> hrt .function = kvm_riscv_vcpu_hrtimer_expired ;
184253 t -> init_done = true;
185254 t -> next_set = false;
186255
256+ /* Enable sstc for every vcpu if available in hardware */
257+ if (riscv_isa_extension_available (NULL , SSTC )) {
258+ t -> sstc_enabled = true;
259+ t -> hrt .function = kvm_riscv_vcpu_vstimer_expired ;
260+ t -> timer_next_event = kvm_riscv_vcpu_update_vstimecmp ;
261+ } else {
262+ t -> sstc_enabled = false;
263+ t -> hrt .function = kvm_riscv_vcpu_hrtimer_expired ;
264+ t -> timer_next_event = kvm_riscv_vcpu_update_hrtimer ;
265+ }
266+
187267 return 0 ;
188268}
189269
@@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
199279
200280int kvm_riscv_vcpu_timer_reset (struct kvm_vcpu * vcpu )
201281{
282+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
283+
284+ t -> next_cycles = -1ULL ;
202285 return kvm_riscv_vcpu_timer_cancel (& vcpu -> arch .timer );
203286}
204287
205- void kvm_riscv_vcpu_timer_restore (struct kvm_vcpu * vcpu )
288+ static void kvm_riscv_vcpu_update_timedelta (struct kvm_vcpu * vcpu )
206289{
207290 struct kvm_guest_timer * gt = & vcpu -> kvm -> arch .timer ;
208291
209- #ifdef CONFIG_64BIT
210- csr_write (CSR_HTIMEDELTA , gt -> time_delta );
211- #else
292+ #if defined(CONFIG_32BIT )
212293 csr_write (CSR_HTIMEDELTA , (u32 )(gt -> time_delta ));
213294 csr_write (CSR_HTIMEDELTAH , (u32 )(gt -> time_delta >> 32 ));
295+ #else
296+ csr_write (CSR_HTIMEDELTA , gt -> time_delta );
214297#endif
215298}
216299
300+ void kvm_riscv_vcpu_timer_restore (struct kvm_vcpu * vcpu )
301+ {
302+ struct kvm_vcpu_csr * csr ;
303+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
304+
305+ kvm_riscv_vcpu_update_timedelta (vcpu );
306+
307+ if (!t -> sstc_enabled )
308+ return ;
309+
310+ csr = & vcpu -> arch .guest_csr ;
311+ #if defined(CONFIG_32BIT )
312+ csr_write (CSR_VSTIMECMP , (u32 )t -> next_cycles );
313+ csr_write (CSR_VSTIMECMPH , (u32 )(t -> next_cycles >> 32 ));
314+ #else
315+ csr_write (CSR_VSTIMECMP , t -> next_cycles );
316+ #endif
317+
318+ /* timer should be enabled for the remaining operations */
319+ if (unlikely (!t -> init_done ))
320+ return ;
321+
322+ kvm_riscv_vcpu_timer_unblocking (vcpu );
323+ }
324+
325+ void kvm_riscv_vcpu_timer_save (struct kvm_vcpu * vcpu )
326+ {
327+ struct kvm_vcpu_csr * csr ;
328+ struct kvm_vcpu_timer * t = & vcpu -> arch .timer ;
329+
330+ if (!t -> sstc_enabled )
331+ return ;
332+
333+ csr = & vcpu -> arch .guest_csr ;
334+ t = & vcpu -> arch .timer ;
335+ #if defined(CONFIG_32BIT )
336+ t -> next_cycles = csr_read (CSR_VSTIMECMP );
337+ t -> next_cycles |= (u64 )csr_read (CSR_VSTIMECMPH ) << 32 ;
338+ #else
339+ t -> next_cycles = csr_read (CSR_VSTIMECMP );
340+ #endif
341+ /* timer should be enabled for the remaining operations */
342+ if (unlikely (!t -> init_done ))
343+ return ;
344+
345+ if (kvm_vcpu_is_blocking (vcpu ))
346+ kvm_riscv_vcpu_timer_blocking (vcpu );
347+ }
348+
217349void kvm_riscv_guest_timer_init (struct kvm * kvm )
218350{
219351 struct kvm_guest_timer * gt = & kvm -> arch .timer ;
0 commit comments