|
3 | 3 | #include <linux/kvm_host.h> |
4 | 4 | #include <asm/asm-prototypes.h> |
5 | 5 | #include <asm/dbell.h> |
6 | | -#include <asm/kvm_ppc.h> |
7 | | -#include <asm/pmc.h> |
8 | 6 | #include <asm/ppc-opcode.h> |
9 | 7 |
|
10 | 8 | #include "book3s_hv.h" |
11 | 9 |
|
12 | | -static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra) |
13 | | -{ |
14 | | - if (!(mmcr0 & MMCR0_FC)) |
15 | | - goto do_freeze; |
16 | | - if (mmcra & MMCRA_SAMPLE_ENABLE) |
17 | | - goto do_freeze; |
18 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
19 | | - if (!(mmcr0 & MMCR0_PMCCEXT)) |
20 | | - goto do_freeze; |
21 | | - if (!(mmcra & MMCRA_BHRB_DISABLE)) |
22 | | - goto do_freeze; |
23 | | - } |
24 | | - return; |
25 | | - |
26 | | -do_freeze: |
27 | | - mmcr0 = MMCR0_FC; |
28 | | - mmcra = 0; |
29 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
30 | | - mmcr0 |= MMCR0_PMCCEXT; |
31 | | - mmcra = MMCRA_BHRB_DISABLE; |
32 | | - } |
33 | | - |
34 | | - mtspr(SPRN_MMCR0, mmcr0); |
35 | | - mtspr(SPRN_MMCRA, mmcra); |
36 | | - isync(); |
37 | | -} |
38 | | - |
39 | | -void switch_pmu_to_guest(struct kvm_vcpu *vcpu, |
40 | | - struct p9_host_os_sprs *host_os_sprs) |
41 | | -{ |
42 | | - struct lppaca *lp; |
43 | | - int load_pmu = 1; |
44 | | - |
45 | | - lp = vcpu->arch.vpa.pinned_addr; |
46 | | - if (lp) |
47 | | - load_pmu = lp->pmcregs_in_use; |
48 | | - |
49 | | - /* Save host */ |
50 | | - if (ppc_get_pmu_inuse()) { |
51 | | - /* |
52 | | - * It might be better to put PMU handling (at least for the |
53 | | - * host) in the perf subsystem because it knows more about what |
54 | | - * is being used. |
55 | | - */ |
56 | | - |
57 | | - /* POWER9, POWER10 do not implement HPMC or SPMC */ |
58 | | - |
59 | | - host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0); |
60 | | - host_os_sprs->mmcra = mfspr(SPRN_MMCRA); |
61 | | - |
62 | | - freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra); |
63 | | - |
64 | | - host_os_sprs->pmc1 = mfspr(SPRN_PMC1); |
65 | | - host_os_sprs->pmc2 = mfspr(SPRN_PMC2); |
66 | | - host_os_sprs->pmc3 = mfspr(SPRN_PMC3); |
67 | | - host_os_sprs->pmc4 = mfspr(SPRN_PMC4); |
68 | | - host_os_sprs->pmc5 = mfspr(SPRN_PMC5); |
69 | | - host_os_sprs->pmc6 = mfspr(SPRN_PMC6); |
70 | | - host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1); |
71 | | - host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2); |
72 | | - host_os_sprs->sdar = mfspr(SPRN_SDAR); |
73 | | - host_os_sprs->siar = mfspr(SPRN_SIAR); |
74 | | - host_os_sprs->sier1 = mfspr(SPRN_SIER); |
75 | | - |
76 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
77 | | - host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3); |
78 | | - host_os_sprs->sier2 = mfspr(SPRN_SIER2); |
79 | | - host_os_sprs->sier3 = mfspr(SPRN_SIER3); |
80 | | - } |
81 | | - } |
82 | | - |
83 | | -#ifdef CONFIG_PPC_PSERIES |
84 | | - /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */ |
85 | | - if (kvmhv_on_pseries()) { |
86 | | - barrier(); |
87 | | - get_lppaca()->pmcregs_in_use = load_pmu; |
88 | | - barrier(); |
89 | | - } |
90 | | -#endif |
91 | | - |
92 | | - /* |
93 | | - * Load guest. If the VPA said the PMCs are not in use but the guest |
94 | | - * tried to access them anyway, HFSCR[PM] will be set by the HFAC |
95 | | - * fault so we can make forward progress. |
96 | | - */ |
97 | | - if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) { |
98 | | - mtspr(SPRN_PMC1, vcpu->arch.pmc[0]); |
99 | | - mtspr(SPRN_PMC2, vcpu->arch.pmc[1]); |
100 | | - mtspr(SPRN_PMC3, vcpu->arch.pmc[2]); |
101 | | - mtspr(SPRN_PMC4, vcpu->arch.pmc[3]); |
102 | | - mtspr(SPRN_PMC5, vcpu->arch.pmc[4]); |
103 | | - mtspr(SPRN_PMC6, vcpu->arch.pmc[5]); |
104 | | - mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]); |
105 | | - mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]); |
106 | | - mtspr(SPRN_SDAR, vcpu->arch.sdar); |
107 | | - mtspr(SPRN_SIAR, vcpu->arch.siar); |
108 | | - mtspr(SPRN_SIER, vcpu->arch.sier[0]); |
109 | | - |
110 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
111 | | - mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]); |
112 | | - mtspr(SPRN_SIER2, vcpu->arch.sier[1]); |
113 | | - mtspr(SPRN_SIER3, vcpu->arch.sier[2]); |
114 | | - } |
115 | | - |
116 | | - /* Set MMCRA then MMCR0 last */ |
117 | | - mtspr(SPRN_MMCRA, vcpu->arch.mmcra); |
118 | | - mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]); |
119 | | - /* No isync necessary because we're starting counters */ |
120 | | - |
121 | | - if (!vcpu->arch.nested && |
122 | | - (vcpu->arch.hfscr_permitted & HFSCR_PM)) |
123 | | - vcpu->arch.hfscr |= HFSCR_PM; |
124 | | - } |
125 | | -} |
126 | | -EXPORT_SYMBOL_GPL(switch_pmu_to_guest); |
127 | | - |
128 | | -void switch_pmu_to_host(struct kvm_vcpu *vcpu, |
129 | | - struct p9_host_os_sprs *host_os_sprs) |
130 | | -{ |
131 | | - struct lppaca *lp; |
132 | | - int save_pmu = 1; |
133 | | - |
134 | | - lp = vcpu->arch.vpa.pinned_addr; |
135 | | - if (lp) |
136 | | - save_pmu = lp->pmcregs_in_use; |
137 | | - if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) { |
138 | | - /* |
139 | | - * Save pmu if this guest is capable of running nested guests. |
140 | | - * This is option is for old L1s that do not set their |
141 | | - * lppaca->pmcregs_in_use properly when entering their L2. |
142 | | - */ |
143 | | - save_pmu |= nesting_enabled(vcpu->kvm); |
144 | | - } |
145 | | - |
146 | | - if (save_pmu) { |
147 | | - vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0); |
148 | | - vcpu->arch.mmcra = mfspr(SPRN_MMCRA); |
149 | | - |
150 | | - freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra); |
151 | | - |
152 | | - vcpu->arch.pmc[0] = mfspr(SPRN_PMC1); |
153 | | - vcpu->arch.pmc[1] = mfspr(SPRN_PMC2); |
154 | | - vcpu->arch.pmc[2] = mfspr(SPRN_PMC3); |
155 | | - vcpu->arch.pmc[3] = mfspr(SPRN_PMC4); |
156 | | - vcpu->arch.pmc[4] = mfspr(SPRN_PMC5); |
157 | | - vcpu->arch.pmc[5] = mfspr(SPRN_PMC6); |
158 | | - vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1); |
159 | | - vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2); |
160 | | - vcpu->arch.sdar = mfspr(SPRN_SDAR); |
161 | | - vcpu->arch.siar = mfspr(SPRN_SIAR); |
162 | | - vcpu->arch.sier[0] = mfspr(SPRN_SIER); |
163 | | - |
164 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
165 | | - vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3); |
166 | | - vcpu->arch.sier[1] = mfspr(SPRN_SIER2); |
167 | | - vcpu->arch.sier[2] = mfspr(SPRN_SIER3); |
168 | | - } |
169 | | - |
170 | | - } else if (vcpu->arch.hfscr & HFSCR_PM) { |
171 | | - /* |
172 | | - * The guest accessed PMC SPRs without specifying they should |
173 | | - * be preserved, or it cleared pmcregs_in_use after the last |
174 | | - * access. Just ensure they are frozen. |
175 | | - */ |
176 | | - freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA)); |
177 | | - |
178 | | - /* |
179 | | - * Demand-fault PMU register access in the guest. |
180 | | - * |
181 | | - * This is used to grab the guest's VPA pmcregs_in_use value |
182 | | - * and reflect it into the host's VPA in the case of a nested |
183 | | - * hypervisor. |
184 | | - * |
185 | | - * It also avoids having to zero-out SPRs after each guest |
186 | | - * exit to avoid side-channels when. |
187 | | - * |
188 | | - * This is cleared here when we exit the guest, so later HFSCR |
189 | | - * interrupt handling can add it back to run the guest with |
190 | | - * PM enabled next time. |
191 | | - */ |
192 | | - if (!vcpu->arch.nested) |
193 | | - vcpu->arch.hfscr &= ~HFSCR_PM; |
194 | | - } /* otherwise the PMU should still be frozen */ |
195 | | - |
196 | | -#ifdef CONFIG_PPC_PSERIES |
197 | | - if (kvmhv_on_pseries()) { |
198 | | - barrier(); |
199 | | - get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse(); |
200 | | - barrier(); |
201 | | - } |
202 | | -#endif |
203 | | - |
204 | | - if (ppc_get_pmu_inuse()) { |
205 | | - mtspr(SPRN_PMC1, host_os_sprs->pmc1); |
206 | | - mtspr(SPRN_PMC2, host_os_sprs->pmc2); |
207 | | - mtspr(SPRN_PMC3, host_os_sprs->pmc3); |
208 | | - mtspr(SPRN_PMC4, host_os_sprs->pmc4); |
209 | | - mtspr(SPRN_PMC5, host_os_sprs->pmc5); |
210 | | - mtspr(SPRN_PMC6, host_os_sprs->pmc6); |
211 | | - mtspr(SPRN_MMCR1, host_os_sprs->mmcr1); |
212 | | - mtspr(SPRN_MMCR2, host_os_sprs->mmcr2); |
213 | | - mtspr(SPRN_SDAR, host_os_sprs->sdar); |
214 | | - mtspr(SPRN_SIAR, host_os_sprs->siar); |
215 | | - mtspr(SPRN_SIER, host_os_sprs->sier1); |
216 | | - |
217 | | - if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
218 | | - mtspr(SPRN_MMCR3, host_os_sprs->mmcr3); |
219 | | - mtspr(SPRN_SIER2, host_os_sprs->sier2); |
220 | | - mtspr(SPRN_SIER3, host_os_sprs->sier3); |
221 | | - } |
222 | | - |
223 | | - /* Set MMCRA then MMCR0 last */ |
224 | | - mtspr(SPRN_MMCRA, host_os_sprs->mmcra); |
225 | | - mtspr(SPRN_MMCR0, host_os_sprs->mmcr0); |
226 | | - isync(); |
227 | | - } |
228 | | -} |
229 | | -EXPORT_SYMBOL_GPL(switch_pmu_to_host); |
230 | | - |
231 | 10 | static void load_spr_state(struct kvm_vcpu *vcpu, |
232 | 11 | struct p9_host_os_sprs *host_os_sprs) |
233 | 12 | { |
|
0 commit comments