|
14 | 14 | #include <asm/csr.h> |
15 | 15 | #include <asm/kvm_vcpu_sbi.h> |
16 | 16 | #include <asm/kvm_vcpu_pmu.h> |
| 17 | +#include <asm/sbi.h> |
17 | 18 | #include <linux/bitops.h> |
18 | 19 |
|
19 | 20 | #define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs) |
@@ -311,6 +312,80 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, |
311 | 312 | return ret; |
312 | 313 | } |
313 | 314 |
|
| 315 | +static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu) |
| 316 | +{ |
| 317 | + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); |
| 318 | + int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); |
| 319 | + |
| 320 | + if (kvpmu->sdata) { |
| 321 | + if (kvpmu->snapshot_addr != INVALID_GPA) { |
| 322 | + memset(kvpmu->sdata, 0, snapshot_area_size); |
| 323 | + kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, |
| 324 | + kvpmu->sdata, snapshot_area_size); |
| 325 | + } else { |
| 326 | + pr_warn("snapshot address invalid\n"); |
| 327 | + } |
| 328 | + kfree(kvpmu->sdata); |
| 329 | + kvpmu->sdata = NULL; |
| 330 | + } |
| 331 | + kvpmu->snapshot_addr = INVALID_GPA; |
| 332 | +} |
| 333 | + |
| 334 | +int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low, |
| 335 | + unsigned long saddr_high, unsigned long flags, |
| 336 | + struct kvm_vcpu_sbi_return *retdata) |
| 337 | +{ |
| 338 | + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); |
| 339 | + int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); |
| 340 | + int sbiret = 0; |
| 341 | + gpa_t saddr; |
| 342 | + unsigned long hva; |
| 343 | + bool writable; |
| 344 | + |
| 345 | + if (!kvpmu || flags) { |
| 346 | + sbiret = SBI_ERR_INVALID_PARAM; |
| 347 | + goto out; |
| 348 | + } |
| 349 | + |
| 350 | + if (saddr_low == SBI_SHMEM_DISABLE && saddr_high == SBI_SHMEM_DISABLE) { |
| 351 | + kvm_pmu_clear_snapshot_area(vcpu); |
| 352 | + return 0; |
| 353 | + } |
| 354 | + |
| 355 | + saddr = saddr_low; |
| 356 | + |
| 357 | + if (saddr_high != 0) { |
| 358 | + if (IS_ENABLED(CONFIG_32BIT)) |
| 359 | + saddr |= ((gpa_t)saddr_high << 32); |
| 360 | + else |
| 361 | + sbiret = SBI_ERR_INVALID_ADDRESS; |
| 362 | + goto out; |
| 363 | + } |
| 364 | + |
| 365 | + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable); |
| 366 | + if (kvm_is_error_hva(hva) || !writable) { |
| 367 | + sbiret = SBI_ERR_INVALID_ADDRESS; |
| 368 | + goto out; |
| 369 | + } |
| 370 | + |
| 371 | + kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); |
| 372 | + if (!kvpmu->sdata) |
| 373 | + return -ENOMEM; |
| 374 | + |
| 375 | + if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { |
| 376 | + kfree(kvpmu->sdata); |
| 377 | + sbiret = SBI_ERR_FAILURE; |
| 378 | + goto out; |
| 379 | + } |
| 380 | + |
| 381 | + kvpmu->snapshot_addr = saddr; |
| 382 | + |
| 383 | +out: |
| 384 | + retdata->err_val = sbiret; |
| 385 | + |
| 386 | + return 0; |
| 387 | +} |
| 388 | + |
314 | 389 | int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, |
315 | 390 | struct kvm_vcpu_sbi_return *retdata) |
316 | 391 | { |
@@ -344,20 +419,38 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, |
344 | 419 | int i, pmc_index, sbiret = 0; |
345 | 420 | struct kvm_pmc *pmc; |
346 | 421 | int fevent_code; |
| 422 | + bool snap_flag_set = flags & SBI_PMU_START_FLAG_INIT_SNAPSHOT; |
347 | 423 |
|
348 | 424 | if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { |
349 | 425 | sbiret = SBI_ERR_INVALID_PARAM; |
350 | 426 | goto out; |
351 | 427 | } |
352 | 428 |
|
| 429 | + if (snap_flag_set) { |
| 430 | + if (kvpmu->snapshot_addr == INVALID_GPA) { |
| 431 | + sbiret = SBI_ERR_NO_SHMEM; |
| 432 | + goto out; |
| 433 | + } |
| 434 | + if (kvm_vcpu_read_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, |
| 435 | + sizeof(struct riscv_pmu_snapshot_data))) { |
| 436 | + pr_warn("Unable to read snapshot shared memory while starting counters\n"); |
| 437 | + sbiret = SBI_ERR_FAILURE; |
| 438 | + goto out; |
| 439 | + } |
| 440 | + } |
353 | 441 | /* Start the counters that have been configured and requested by the guest */ |
354 | 442 | for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { |
355 | 443 | pmc_index = i + ctr_base; |
356 | 444 | if (!test_bit(pmc_index, kvpmu->pmc_in_use)) |
357 | 445 | continue; |
358 | 446 | pmc = &kvpmu->pmc[pmc_index]; |
359 | | - if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE) |
| 447 | + if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE) { |
360 | 448 | pmc->counter_val = ival; |
| 449 | + } else if (snap_flag_set) { |
| 450 | + /* The counter index in the snapshot are relative to the counter base */ |
| 451 | + pmc->counter_val = kvpmu->sdata->ctr_values[i]; |
| 452 | + } |
| 453 | + |
361 | 454 | if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { |
362 | 455 | fevent_code = get_event_code(pmc->event_idx); |
363 | 456 | if (fevent_code >= SBI_PMU_FW_MAX) { |
@@ -398,14 +491,22 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, |
398 | 491 | { |
399 | 492 | struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); |
400 | 493 | int i, pmc_index, sbiret = 0; |
| 494 | + u64 enabled, running; |
401 | 495 | struct kvm_pmc *pmc; |
402 | 496 | int fevent_code; |
| 497 | + bool snap_flag_set = flags & SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT; |
| 498 | + bool shmem_needs_update = false; |
403 | 499 |
|
404 | 500 | if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { |
405 | 501 | sbiret = SBI_ERR_INVALID_PARAM; |
406 | 502 | goto out; |
407 | 503 | } |
408 | 504 |
|
| 505 | + if (snap_flag_set && kvpmu->snapshot_addr == INVALID_GPA) { |
| 506 | + sbiret = SBI_ERR_NO_SHMEM; |
| 507 | + goto out; |
| 508 | + } |
| 509 | + |
409 | 510 | /* Stop the counters that have been configured and requested by the guest */ |
410 | 511 | for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { |
411 | 512 | pmc_index = i + ctr_base; |
@@ -438,12 +539,28 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, |
438 | 539 | } else { |
439 | 540 | sbiret = SBI_ERR_INVALID_PARAM; |
440 | 541 | } |
| 542 | + |
| 543 | + if (snap_flag_set && !sbiret) { |
| 544 | + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) |
| 545 | + pmc->counter_val = kvpmu->fw_event[fevent_code].value; |
| 546 | + else if (pmc->perf_event) |
| 547 | + pmc->counter_val += perf_event_read_value(pmc->perf_event, |
| 548 | + &enabled, &running); |
| 549 | + /* TODO: Add counter overflow support when sscofpmf support is added */ |
| 550 | + kvpmu->sdata->ctr_values[i] = pmc->counter_val; |
| 551 | + shmem_needs_update = true; |
| 552 | + } |
| 553 | + |
441 | 554 | if (flags & SBI_PMU_STOP_FLAG_RESET) { |
442 | 555 | pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; |
443 | 556 | clear_bit(pmc_index, kvpmu->pmc_in_use); |
444 | 557 | } |
445 | 558 | } |
446 | 559 |
|
| 560 | + if (shmem_needs_update) |
| 561 | + kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, |
| 562 | + sizeof(struct riscv_pmu_snapshot_data)); |
| 563 | + |
447 | 564 | out: |
448 | 565 | retdata->err_val = sbiret; |
449 | 566 |
|
@@ -566,6 +683,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) |
566 | 683 | kvpmu->num_hw_ctrs = num_hw_ctrs + 1; |
567 | 684 | kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; |
568 | 685 | memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); |
| 686 | + kvpmu->snapshot_addr = INVALID_GPA; |
569 | 687 |
|
570 | 688 | if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { |
571 | 689 | pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA"); |
@@ -625,6 +743,7 @@ void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) |
625 | 743 | } |
626 | 744 | bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS); |
627 | 745 | memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); |
| 746 | + kvm_pmu_clear_snapshot_area(vcpu); |
628 | 747 | } |
629 | 748 |
|
630 | 749 | void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) |
|
0 commit comments