@@ -141,8 +141,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
141141
142142 kvm_riscv_vcpu_aia_reset (vcpu );
143143
144- WRITE_ONCE (vcpu -> arch .irqs_pending , 0 );
145- WRITE_ONCE (vcpu -> arch .irqs_pending_mask , 0 );
144+ bitmap_zero (vcpu -> arch .irqs_pending , KVM_RISCV_VCPU_NR_IRQS );
145+ bitmap_zero (vcpu -> arch .irqs_pending_mask , KVM_RISCV_VCPU_NR_IRQS );
146146
147147 kvm_riscv_vcpu_pmu_reset (vcpu );
148148
@@ -474,6 +474,7 @@ static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
474474 if (reg_num == KVM_REG_RISCV_CSR_REG (sip )) {
475475 kvm_riscv_vcpu_flush_interrupts (vcpu );
476476 * out_val = (csr -> hvip >> VSIP_TO_HVIP_SHIFT ) & VSIP_VALID_MASK ;
477+ * out_val |= csr -> hvip & ~IRQ_LOCAL_MASK ;
477478 } else
478479 * out_val = ((unsigned long * )csr )[reg_num ];
479480
@@ -497,7 +498,7 @@ static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
497498 ((unsigned long * )csr )[reg_num ] = reg_val ;
498499
499500 if (reg_num == KVM_REG_RISCV_CSR_REG (sip ))
500- WRITE_ONCE (vcpu -> arch .irqs_pending_mask , 0 );
501+ WRITE_ONCE (vcpu -> arch .irqs_pending_mask [ 0 ] , 0 );
501502
502503 return 0 ;
503504}
@@ -799,9 +800,9 @@ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
799800 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
800801 unsigned long mask , val ;
801802
802- if (READ_ONCE (vcpu -> arch .irqs_pending_mask )) {
803- mask = xchg_acquire (& vcpu -> arch .irqs_pending_mask , 0 );
804- val = READ_ONCE (vcpu -> arch .irqs_pending ) & mask ;
803+ if (READ_ONCE (vcpu -> arch .irqs_pending_mask [ 0 ] )) {
804+ mask = xchg_acquire (& vcpu -> arch .irqs_pending_mask [ 0 ] , 0 );
805+ val = READ_ONCE (vcpu -> arch .irqs_pending [ 0 ] ) & mask ;
805806
806807 csr -> hvip &= ~mask ;
807808 csr -> hvip |= val ;
@@ -825,12 +826,12 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
825826 if ((csr -> hvip ^ hvip ) & (1UL << IRQ_VS_SOFT )) {
826827 if (hvip & (1UL << IRQ_VS_SOFT )) {
827828 if (!test_and_set_bit (IRQ_VS_SOFT ,
828- & v -> irqs_pending_mask ))
829- set_bit (IRQ_VS_SOFT , & v -> irqs_pending );
829+ v -> irqs_pending_mask ))
830+ set_bit (IRQ_VS_SOFT , v -> irqs_pending );
830831 } else {
831832 if (!test_and_set_bit (IRQ_VS_SOFT ,
832- & v -> irqs_pending_mask ))
833- clear_bit (IRQ_VS_SOFT , & v -> irqs_pending );
833+ v -> irqs_pending_mask ))
834+ clear_bit (IRQ_VS_SOFT , v -> irqs_pending );
834835 }
835836 }
836837
@@ -843,14 +844,20 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
843844
844845int kvm_riscv_vcpu_set_interrupt (struct kvm_vcpu * vcpu , unsigned int irq )
845846{
846- if (irq != IRQ_VS_SOFT &&
847+ /*
848+ * We only allow VS-mode software, timer, and external
849+ * interrupts when irq is one of the local interrupts
850+ * defined by RISC-V privilege specification.
851+ */
852+ if (irq < IRQ_LOCAL_MAX &&
853+ irq != IRQ_VS_SOFT &&
847854 irq != IRQ_VS_TIMER &&
848855 irq != IRQ_VS_EXT )
849856 return - EINVAL ;
850857
851- set_bit (irq , & vcpu -> arch .irqs_pending );
858+ set_bit (irq , vcpu -> arch .irqs_pending );
852859 smp_mb__before_atomic ();
853- set_bit (irq , & vcpu -> arch .irqs_pending_mask );
860+ set_bit (irq , vcpu -> arch .irqs_pending_mask );
854861
855862 kvm_vcpu_kick (vcpu );
856863
@@ -859,25 +866,33 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
859866
860867int kvm_riscv_vcpu_unset_interrupt (struct kvm_vcpu * vcpu , unsigned int irq )
861868{
862- if (irq != IRQ_VS_SOFT &&
869+ /*
870+ * We only allow VS-mode software, timer, and external
871+ * interrupts when irq is one of the local interrupts
872+ * defined by RISC-V privilege specification.
873+ */
874+ if (irq < IRQ_LOCAL_MAX &&
875+ irq != IRQ_VS_SOFT &&
863876 irq != IRQ_VS_TIMER &&
864877 irq != IRQ_VS_EXT )
865878 return - EINVAL ;
866879
867- clear_bit (irq , & vcpu -> arch .irqs_pending );
880+ clear_bit (irq , vcpu -> arch .irqs_pending );
868881 smp_mb__before_atomic ();
869- set_bit (irq , & vcpu -> arch .irqs_pending_mask );
882+ set_bit (irq , vcpu -> arch .irqs_pending_mask );
870883
871884 return 0 ;
872885}
873886
874- bool kvm_riscv_vcpu_has_interrupts (struct kvm_vcpu * vcpu , unsigned long mask )
887+ bool kvm_riscv_vcpu_has_interrupts (struct kvm_vcpu * vcpu , u64 mask )
875888{
876889 unsigned long ie ;
877890
878891 ie = ((vcpu -> arch .guest_csr .vsie & VSIP_VALID_MASK )
879- << VSIP_TO_HVIP_SHIFT ) & mask ;
880- if (READ_ONCE (vcpu -> arch .irqs_pending ) & ie )
892+ << VSIP_TO_HVIP_SHIFT ) & (unsigned long )mask ;
893+ ie |= vcpu -> arch .guest_csr .vsie & ~IRQ_LOCAL_MASK &
894+ (unsigned long )mask ;
895+ if (READ_ONCE (vcpu -> arch .irqs_pending [0 ]) & ie )
881896 return true;
882897
883898 /* Check AIA high interrupts */
0 commit comments