@@ -271,7 +271,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
271271 struct sbiret ret ;
272272 int idx ;
273273 uint64_t cbase = 0 ;
274- uint64_t cmask = GENMASK_ULL (rvpmu -> num_counters - 1 , 0 );
275274 unsigned long cflags = 0 ;
276275
277276 if (event -> attr .exclude_kernel )
@@ -281,11 +280,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
281280
282281 /* retrieve the available counter index */
283282#if defined(CONFIG_32BIT )
284- ret = sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase , cmask ,
285- cflags , hwc -> event_base , hwc -> config , hwc -> config >> 32 );
283+ ret = sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase ,
284+ rvpmu -> cmask , cflags , hwc -> event_base , hwc -> config ,
285+ hwc -> config >> 32 );
286286#else
287- ret = sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase , cmask ,
288- cflags , hwc -> event_base , hwc -> config , 0 );
287+ ret = sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase ,
288+ rvpmu -> cmask , cflags , hwc -> event_base , hwc -> config , 0 );
289289#endif
290290 if (ret .error ) {
291291 pr_debug ("Not able to find a counter for event %lx config %llx\n" ,
@@ -294,7 +294,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
294294 }
295295
296296 idx = ret .value ;
297- if (idx >= rvpmu -> num_counters || !pmu_ctr_list [idx ].value )
297+ if (! test_bit ( idx , & rvpmu -> cmask ) || !pmu_ctr_list [idx ].value )
298298 return - ENOENT ;
299299
300300 /* Additional sanity check for the counter id */
@@ -463,7 +463,7 @@ static int pmu_sbi_find_num_ctrs(void)
463463 return sbi_err_map_linux_errno (ret .error );
464464}
465465
466- static int pmu_sbi_get_ctrinfo (int nctr )
466+ static int pmu_sbi_get_ctrinfo (int nctr , unsigned long * mask )
467467{
468468 struct sbiret ret ;
469469 int i , num_hw_ctr = 0 , num_fw_ctr = 0 ;
@@ -478,6 +478,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
478478 if (ret .error )
479479 /* The logical counter ids are not expected to be contiguous */
480480 continue ;
481+
482+ * mask |= BIT (i );
483+
481484 cinfo .value = ret .value ;
482485 if (cinfo .type == SBI_PMU_CTR_TYPE_FW )
483486 num_fw_ctr ++ ;
@@ -498,7 +501,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
498501 * which may include counters that are not enabled yet.
499502 */
500503 sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_STOP ,
501- 0 , GENMASK_ULL ( pmu -> num_counters - 1 , 0 ) , 0 , 0 , 0 , 0 );
504+ 0 , pmu -> cmask , 0 , 0 , 0 , 0 );
502505}
503506
504507static inline void pmu_sbi_stop_hw_ctrs (struct riscv_pmu * pmu )
@@ -788,8 +791,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
788791static int pmu_sbi_device_probe (struct platform_device * pdev )
789792{
790793 struct riscv_pmu * pmu = NULL ;
791- int num_counters ;
794+ unsigned long cmask = 0 ;
792795 int ret = - ENODEV ;
796+ int num_counters ;
793797
794798 pr_info ("SBI PMU extension is available\n" );
795799 pmu = riscv_pmu_alloc ();
@@ -803,7 +807,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
803807 }
804808
805809 /* cache all the information about counters now */
806- if (pmu_sbi_get_ctrinfo (num_counters ))
810+ if (pmu_sbi_get_ctrinfo (num_counters , & cmask ))
807811 goto out_free ;
808812
809813 ret = pmu_sbi_setup_irqs (pmu , pdev );
@@ -812,8 +816,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
812816 pmu -> pmu .capabilities |= PERF_PMU_CAP_NO_INTERRUPT ;
813817 pmu -> pmu .capabilities |= PERF_PMU_CAP_NO_EXCLUDE ;
814818 }
819+
815820 pmu -> pmu .attr_groups = riscv_pmu_attr_groups ;
816- pmu -> num_counters = num_counters ;
821+ pmu -> cmask = cmask ;
817822 pmu -> ctr_start = pmu_sbi_ctr_start ;
818823 pmu -> ctr_stop = pmu_sbi_ctr_stop ;
819824 pmu -> event_map = pmu_sbi_event_map ;
0 commit comments