@@ -33,10 +33,8 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33
33
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
34
34
int i ;
35
35
36
- if (vp_dev -> intx_enabled )
37
- synchronize_irq (vp_dev -> pci_dev -> irq );
38
-
39
- for (i = 0 ; i < vp_dev -> msix_vectors ; ++ i )
36
+ synchronize_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ));
37
+ for (i = 1 ; i < vp_dev -> msix_vectors ; i ++ )
40
38
synchronize_irq (pci_irq_vector (vp_dev -> pci_dev , i ));
41
39
}
42
40
@@ -99,77 +97,10 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
99
97
return vp_vring_interrupt (irq , opaque );
100
98
}
101
99
102
- static int vp_request_msix_vectors (struct virtio_device * vdev , int nvectors ,
103
- bool per_vq_vectors )
104
- {
105
- struct virtio_pci_device * vp_dev = to_vp_device (vdev );
106
- const char * name = dev_name (& vp_dev -> vdev .dev );
107
- unsigned i , v ;
108
- int err = - ENOMEM ;
109
-
110
- vp_dev -> msix_vectors = nvectors ;
111
-
112
- vp_dev -> msix_names = kmalloc (nvectors * sizeof * vp_dev -> msix_names ,
113
- GFP_KERNEL );
114
- if (!vp_dev -> msix_names )
115
- goto error ;
116
- vp_dev -> msix_affinity_masks
117
- = kzalloc (nvectors * sizeof * vp_dev -> msix_affinity_masks ,
118
- GFP_KERNEL );
119
- if (!vp_dev -> msix_affinity_masks )
120
- goto error ;
121
- for (i = 0 ; i < nvectors ; ++ i )
122
- if (!alloc_cpumask_var (& vp_dev -> msix_affinity_masks [i ],
123
- GFP_KERNEL ))
124
- goto error ;
125
-
126
- err = pci_alloc_irq_vectors (vp_dev -> pci_dev , nvectors , nvectors ,
127
- PCI_IRQ_MSIX );
128
- if (err < 0 )
129
- goto error ;
130
- vp_dev -> msix_enabled = 1 ;
131
-
132
- /* Set the vector used for configuration */
133
- v = vp_dev -> msix_used_vectors ;
134
- snprintf (vp_dev -> msix_names [v ], sizeof * vp_dev -> msix_names ,
135
- "%s-config" , name );
136
- err = request_irq (pci_irq_vector (vp_dev -> pci_dev , v ),
137
- vp_config_changed , 0 , vp_dev -> msix_names [v ],
138
- vp_dev );
139
- if (err )
140
- goto error ;
141
- ++ vp_dev -> msix_used_vectors ;
142
-
143
- v = vp_dev -> config_vector (vp_dev , v );
144
- /* Verify we had enough resources to assign the vector */
145
- if (v == VIRTIO_MSI_NO_VECTOR ) {
146
- err = - EBUSY ;
147
- goto error ;
148
- }
149
-
150
- if (!per_vq_vectors ) {
151
- /* Shared vector for all VQs */
152
- v = vp_dev -> msix_used_vectors ;
153
- snprintf (vp_dev -> msix_names [v ], sizeof * vp_dev -> msix_names ,
154
- "%s-virtqueues" , name );
155
- err = request_irq (pci_irq_vector (vp_dev -> pci_dev , v ),
156
- vp_vring_interrupt , 0 , vp_dev -> msix_names [v ],
157
- vp_dev );
158
- if (err )
159
- goto error ;
160
- ++ vp_dev -> msix_used_vectors ;
161
- }
162
- return 0 ;
163
- error :
164
- return err ;
165
- }
166
-
167
- /* the config->del_vqs() implementation */
168
- void vp_del_vqs (struct virtio_device * vdev )
100
+ static void vp_remove_vqs (struct virtio_device * vdev )
169
101
{
170
102
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
171
103
struct virtqueue * vq , * n ;
172
- int i ;
173
104
174
105
list_for_each_entry_safe (vq , n , & vdev -> vqs , list ) {
175
106
if (vp_dev -> msix_vector_map ) {
@@ -181,35 +112,33 @@ void vp_del_vqs(struct virtio_device *vdev)
181
112
}
182
113
vp_dev -> del_vq (vq );
183
114
}
115
+ }
184
116
185
- if (vp_dev -> intx_enabled ) {
186
- free_irq (vp_dev -> pci_dev -> irq , vp_dev );
187
- vp_dev -> intx_enabled = 0 ;
188
- }
117
+ /* the config->del_vqs() implementation */
118
+ void vp_del_vqs (struct virtio_device * vdev )
119
+ {
120
+ struct virtio_pci_device * vp_dev = to_vp_device (vdev );
121
+ int i ;
189
122
190
- for ( i = 0 ; i < vp_dev -> msix_used_vectors ; ++ i )
191
- free_irq ( pci_irq_vector ( vp_dev -> pci_dev , i ), vp_dev ) ;
123
+ if ( WARN_ON_ONCE ( list_empty_careful ( & vdev -> vqs )) )
124
+ return ;
192
125
193
- for (i = 0 ; i < vp_dev -> msix_vectors ; i ++ )
194
- if (vp_dev -> msix_affinity_masks [i ])
195
- free_cpumask_var (vp_dev -> msix_affinity_masks [i ]);
126
+ vp_remove_vqs (vdev );
196
127
197
128
if (vp_dev -> msix_enabled ) {
129
+ for (i = 0 ; i < vp_dev -> msix_vectors ; i ++ )
130
+ free_cpumask_var (vp_dev -> msix_affinity_masks [i ]);
131
+
198
132
/* Disable the vector used for configuration */
199
133
vp_dev -> config_vector (vp_dev , VIRTIO_MSI_NO_VECTOR );
200
134
201
- pci_free_irq_vectors (vp_dev -> pci_dev );
202
- vp_dev -> msix_enabled = 0 ;
135
+ kfree (vp_dev -> msix_affinity_masks );
136
+ kfree (vp_dev -> msix_names );
137
+ kfree (vp_dev -> msix_vector_map );
203
138
}
204
139
205
- vp_dev -> msix_vectors = 0 ;
206
- vp_dev -> msix_used_vectors = 0 ;
207
- kfree (vp_dev -> msix_names );
208
- vp_dev -> msix_names = NULL ;
209
- kfree (vp_dev -> msix_affinity_masks );
210
- vp_dev -> msix_affinity_masks = NULL ;
211
- kfree (vp_dev -> msix_vector_map );
212
- vp_dev -> msix_vector_map = NULL ;
140
+ free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
141
+ pci_free_irq_vectors (vp_dev -> pci_dev );
213
142
}
214
143
215
144
static int vp_find_vqs_msix (struct virtio_device * vdev , unsigned nvqs ,
@@ -219,79 +148,122 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
219
148
bool per_vq_vectors )
220
149
{
221
150
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
151
+ const char * name = dev_name (& vp_dev -> vdev .dev );
152
+ int i , err = - ENOMEM , allocated_vectors , nvectors ;
222
153
u16 msix_vec ;
223
- int i , err , nvectors , allocated_vectors ;
154
+
155
+ nvectors = 1 ;
156
+ for (i = 0 ; i < nvqs ; i ++ )
157
+ if (callbacks [i ])
158
+ nvectors ++ ;
224
159
225
160
if (per_vq_vectors ) {
226
- /* Best option: one for change interrupt, one per vq. */
227
- nvectors = 1 ;
228
- for (i = 0 ; i < nvqs ; ++ i )
229
- if (callbacks [i ])
230
- ++ nvectors ;
161
+ err = pci_alloc_irq_vectors (vp_dev -> pci_dev , nvectors , nvectors ,
162
+ PCI_IRQ_MSIX );
231
163
} else {
232
- /* Second best: one for change, shared for all vqs. */
233
- nvectors = 2 ;
164
+ err = pci_alloc_irq_vectors ( vp_dev -> pci_dev , 2 , 2 ,
165
+ PCI_IRQ_MSIX ) ;
234
166
}
167
+ if (err < 0 )
168
+ return err ;
235
169
236
- err = vp_request_msix_vectors (vdev , nvectors , per_vq_vectors );
170
+ vp_dev -> msix_vectors = nvectors ;
171
+ vp_dev -> msix_names = kmalloc_array (nvectors ,
172
+ sizeof (* vp_dev -> msix_names ), GFP_KERNEL );
173
+ if (!vp_dev -> msix_names )
174
+ goto out_free_irq_vectors ;
175
+
176
+ vp_dev -> msix_affinity_masks = kcalloc (nvectors ,
177
+ sizeof (* vp_dev -> msix_affinity_masks ), GFP_KERNEL );
178
+ if (!vp_dev -> msix_affinity_masks )
179
+ goto out_free_msix_names ;
180
+
181
+ for (i = 0 ; i < nvectors ; ++ i ) {
182
+ if (!alloc_cpumask_var (& vp_dev -> msix_affinity_masks [i ],
183
+ GFP_KERNEL ))
184
+ goto out_free_msix_affinity_masks ;
185
+ }
186
+
187
+ /* Set the vector used for configuration */
188
+ snprintf (vp_dev -> msix_names [0 ], sizeof (* vp_dev -> msix_names ),
189
+ "%s-config" , name );
190
+ err = request_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_config_changed ,
191
+ 0 , vp_dev -> msix_names [0 ], vp_dev );
237
192
if (err )
238
- goto error_find ;
193
+ goto out_free_irq_vectors ;
239
194
240
- if (per_vq_vectors ) {
241
- vp_dev -> msix_vector_map = kmalloc_array (nvqs ,
242
- sizeof (* vp_dev -> msix_vector_map ), GFP_KERNEL );
243
- if (!vp_dev -> msix_vector_map )
244
- goto error_find ;
195
+ /* Verify we had enough resources to assign the vector */
196
+ if (vp_dev -> config_vector (vp_dev , 0 ) == VIRTIO_MSI_NO_VECTOR ) {
197
+ err = - EBUSY ;
198
+ goto out_free_config_irq ;
245
199
}
246
200
247
- allocated_vectors = vp_dev -> msix_used_vectors ;
201
+ vp_dev -> msix_vector_map = kmalloc_array (nvqs ,
202
+ sizeof (* vp_dev -> msix_vector_map ), GFP_KERNEL );
203
+ if (!vp_dev -> msix_vector_map )
204
+ goto out_disable_config_irq ;
205
+
206
+ allocated_vectors = 1 ; /* vector 0 is the config interrupt */
248
207
for (i = 0 ; i < nvqs ; ++ i ) {
249
208
if (!names [i ]) {
250
209
vqs [i ] = NULL ;
251
210
continue ;
252
211
}
253
212
254
- if (!callbacks [i ])
255
- msix_vec = VIRTIO_MSI_NO_VECTOR ;
256
- else if (per_vq_vectors )
257
- msix_vec = allocated_vectors ++ ;
213
+ if (callbacks [i ])
214
+ msix_vec = allocated_vectors ;
258
215
else
259
- msix_vec = VP_MSIX_VQ_VECTOR ;
216
+ msix_vec = VIRTIO_MSI_NO_VECTOR ;
217
+
260
218
vqs [i ] = vp_dev -> setup_vq (vp_dev , i , callbacks [i ], names [i ],
261
219
msix_vec );
262
220
if (IS_ERR (vqs [i ])) {
263
221
err = PTR_ERR (vqs [i ]);
264
- goto error_find ;
222
+ goto out_remove_vqs ;
265
223
}
266
224
267
- if (!per_vq_vectors )
268
- continue ;
269
-
270
225
if (msix_vec == VIRTIO_MSI_NO_VECTOR ) {
271
226
vp_dev -> msix_vector_map [i ] = VIRTIO_MSI_NO_VECTOR ;
272
227
continue ;
273
228
}
274
229
275
- /* allocate per-vq irq if available and necessary */
276
- snprintf (vp_dev -> msix_names [msix_vec ],
277
- sizeof * vp_dev -> msix_names ,
278
- "%s-%s" ,
230
+ snprintf (vp_dev -> msix_names [i + 1 ],
231
+ sizeof (* vp_dev -> msix_names ), "%s-%s" ,
279
232
dev_name (& vp_dev -> vdev .dev ), names [i ]);
280
233
err = request_irq (pci_irq_vector (vp_dev -> pci_dev , msix_vec ),
281
- vring_interrupt , 0 ,
282
- vp_dev -> msix_names [msix_vec ],
283
- vqs [i ]);
234
+ vring_interrupt , IRQF_SHARED ,
235
+ vp_dev -> msix_names [i + 1 ], vqs [i ]);
284
236
if (err ) {
285
237
/* don't free this irq on error */
286
238
vp_dev -> msix_vector_map [i ] = VIRTIO_MSI_NO_VECTOR ;
287
- goto error_find ;
239
+ goto out_remove_vqs ;
288
240
}
289
241
vp_dev -> msix_vector_map [i ] = msix_vec ;
242
+
243
+ if (per_vq_vectors )
244
+ allocated_vectors ++ ;
290
245
}
246
+
247
+ vp_dev -> msix_enabled = 1 ;
291
248
return 0 ;
292
249
293
- error_find :
294
- vp_del_vqs (vdev );
250
+ out_remove_vqs :
251
+ vp_remove_vqs (vdev );
252
+ kfree (vp_dev -> msix_vector_map );
253
+ out_disable_config_irq :
254
+ vp_dev -> config_vector (vp_dev , VIRTIO_MSI_NO_VECTOR );
255
+ out_free_config_irq :
256
+ free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
257
+ out_free_msix_affinity_masks :
258
+ for (i = 0 ; i < nvectors ; i ++ ) {
259
+ if (vp_dev -> msix_affinity_masks [i ])
260
+ free_cpumask_var (vp_dev -> msix_affinity_masks [i ]);
261
+ }
262
+ kfree (vp_dev -> msix_affinity_masks );
263
+ out_free_msix_names :
264
+ kfree (vp_dev -> msix_names );
265
+ out_free_irq_vectors :
266
+ pci_free_irq_vectors (vp_dev -> pci_dev );
295
267
return err ;
296
268
}
297
269
@@ -305,9 +277,8 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
305
277
err = request_irq (vp_dev -> pci_dev -> irq , vp_interrupt , IRQF_SHARED ,
306
278
dev_name (& vdev -> dev ), vp_dev );
307
279
if (err )
308
- goto out_del_vqs ;
280
+ return err ;
309
281
310
- vp_dev -> intx_enabled = 1 ;
311
282
for (i = 0 ; i < nvqs ; ++ i ) {
312
283
if (!names [i ]) {
313
284
vqs [i ] = NULL ;
@@ -317,13 +288,15 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
317
288
VIRTIO_MSI_NO_VECTOR );
318
289
if (IS_ERR (vqs [i ])) {
319
290
err = PTR_ERR (vqs [i ]);
320
- goto out_del_vqs ;
291
+ goto out_remove_vqs ;
321
292
}
322
293
}
323
294
324
295
return 0 ;
325
- out_del_vqs :
326
- vp_del_vqs (vdev );
296
+
297
+ out_remove_vqs :
298
+ vp_remove_vqs (vdev );
299
+ free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
327
300
return err ;
328
301
}
329
302
0 commit comments