@@ -566,67 +566,28 @@ static void virtio_transport_rx_work(struct work_struct *work)
566
566
mutex_unlock (& vsock -> rx_lock );
567
567
}
568
568
569
- static int virtio_vsock_probe (struct virtio_device * vdev )
569
+ static int virtio_vsock_vqs_init (struct virtio_vsock * vsock )
570
570
{
571
- vq_callback_t * callbacks [] = {
572
- virtio_vsock_rx_done ,
573
- virtio_vsock_tx_done ,
574
- virtio_vsock_event_done ,
575
- };
571
+ struct virtio_device * vdev = vsock -> vdev ;
576
572
static const char * const names [] = {
577
573
"rx" ,
578
574
"tx" ,
579
575
"event" ,
580
576
};
581
- struct virtio_vsock * vsock = NULL ;
577
+ vq_callback_t * callbacks [] = {
578
+ virtio_vsock_rx_done ,
579
+ virtio_vsock_tx_done ,
580
+ virtio_vsock_event_done ,
581
+ };
582
582
int ret ;
583
583
584
- ret = mutex_lock_interruptible (& the_virtio_vsock_mutex );
585
- if (ret )
586
- return ret ;
587
-
588
- /* Only one virtio-vsock device per guest is supported */
589
- if (rcu_dereference_protected (the_virtio_vsock ,
590
- lockdep_is_held (& the_virtio_vsock_mutex ))) {
591
- ret = - EBUSY ;
592
- goto out ;
593
- }
594
-
595
- vsock = kzalloc (sizeof (* vsock ), GFP_KERNEL );
596
- if (!vsock ) {
597
- ret = - ENOMEM ;
598
- goto out ;
599
- }
600
-
601
- vsock -> vdev = vdev ;
602
-
603
- ret = virtio_find_vqs (vsock -> vdev , VSOCK_VQ_MAX ,
604
- vsock -> vqs , callbacks , names ,
584
+ ret = virtio_find_vqs (vdev , VSOCK_VQ_MAX , vsock -> vqs , callbacks , names ,
605
585
NULL );
606
586
if (ret < 0 )
607
- goto out ;
587
+ return ret ;
608
588
609
589
virtio_vsock_update_guest_cid (vsock );
610
590
611
- vsock -> rx_buf_nr = 0 ;
612
- vsock -> rx_buf_max_nr = 0 ;
613
- atomic_set (& vsock -> queued_replies , 0 );
614
-
615
- mutex_init (& vsock -> tx_lock );
616
- mutex_init (& vsock -> rx_lock );
617
- mutex_init (& vsock -> event_lock );
618
- spin_lock_init (& vsock -> send_pkt_list_lock );
619
- INIT_LIST_HEAD (& vsock -> send_pkt_list );
620
- INIT_WORK (& vsock -> rx_work , virtio_transport_rx_work );
621
- INIT_WORK (& vsock -> tx_work , virtio_transport_tx_work );
622
- INIT_WORK (& vsock -> event_work , virtio_transport_event_work );
623
- INIT_WORK (& vsock -> send_pkt_work , virtio_transport_send_pkt_work );
624
-
625
- if (virtio_has_feature (vdev , VIRTIO_VSOCK_F_SEQPACKET ))
626
- vsock -> seqpacket_allow = true;
627
-
628
- vdev -> priv = vsock ;
629
-
630
591
virtio_device_ready (vdev );
631
592
632
593
mutex_lock (& vsock -> tx_lock );
@@ -643,30 +604,15 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
643
604
vsock -> event_run = true;
644
605
mutex_unlock (& vsock -> event_lock );
645
606
646
- rcu_assign_pointer (the_virtio_vsock , vsock );
647
-
648
- mutex_unlock (& the_virtio_vsock_mutex );
649
-
650
607
return 0 ;
651
-
652
- out :
653
- kfree (vsock );
654
- mutex_unlock (& the_virtio_vsock_mutex );
655
- return ret ;
656
608
}
657
609
658
- static void virtio_vsock_remove (struct virtio_device * vdev )
610
+ static void virtio_vsock_vqs_del (struct virtio_vsock * vsock )
659
611
{
660
- struct virtio_vsock * vsock = vdev -> priv ;
612
+ struct virtio_device * vdev = vsock -> vdev ;
661
613
struct virtio_vsock_pkt * pkt ;
662
614
663
- mutex_lock (& the_virtio_vsock_mutex );
664
-
665
- vdev -> priv = NULL ;
666
- rcu_assign_pointer (the_virtio_vsock , NULL );
667
- synchronize_rcu ();
668
-
669
- /* Reset all connected sockets when the device disappear */
615
+ /* Reset all connected sockets when the VQs disappear */
670
616
vsock_for_each_connected_socket (& virtio_transport .transport ,
671
617
virtio_vsock_reset_sock );
672
618
@@ -711,6 +657,78 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
711
657
712
658
/* Delete virtqueues and flush outstanding callbacks if any */
713
659
vdev -> config -> del_vqs (vdev );
660
+ }
661
+
662
+ static int virtio_vsock_probe (struct virtio_device * vdev )
663
+ {
664
+ struct virtio_vsock * vsock = NULL ;
665
+ int ret ;
666
+
667
+ ret = mutex_lock_interruptible (& the_virtio_vsock_mutex );
668
+ if (ret )
669
+ return ret ;
670
+
671
+ /* Only one virtio-vsock device per guest is supported */
672
+ if (rcu_dereference_protected (the_virtio_vsock ,
673
+ lockdep_is_held (& the_virtio_vsock_mutex ))) {
674
+ ret = - EBUSY ;
675
+ goto out ;
676
+ }
677
+
678
+ vsock = kzalloc (sizeof (* vsock ), GFP_KERNEL );
679
+ if (!vsock ) {
680
+ ret = - ENOMEM ;
681
+ goto out ;
682
+ }
683
+
684
+ vsock -> vdev = vdev ;
685
+
686
+ vsock -> rx_buf_nr = 0 ;
687
+ vsock -> rx_buf_max_nr = 0 ;
688
+ atomic_set (& vsock -> queued_replies , 0 );
689
+
690
+ mutex_init (& vsock -> tx_lock );
691
+ mutex_init (& vsock -> rx_lock );
692
+ mutex_init (& vsock -> event_lock );
693
+ spin_lock_init (& vsock -> send_pkt_list_lock );
694
+ INIT_LIST_HEAD (& vsock -> send_pkt_list );
695
+ INIT_WORK (& vsock -> rx_work , virtio_transport_rx_work );
696
+ INIT_WORK (& vsock -> tx_work , virtio_transport_tx_work );
697
+ INIT_WORK (& vsock -> event_work , virtio_transport_event_work );
698
+ INIT_WORK (& vsock -> send_pkt_work , virtio_transport_send_pkt_work );
699
+
700
+ if (virtio_has_feature (vdev , VIRTIO_VSOCK_F_SEQPACKET ))
701
+ vsock -> seqpacket_allow = true;
702
+
703
+ vdev -> priv = vsock ;
704
+
705
+ ret = virtio_vsock_vqs_init (vsock );
706
+ if (ret < 0 )
707
+ goto out ;
708
+
709
+ rcu_assign_pointer (the_virtio_vsock , vsock );
710
+
711
+ mutex_unlock (& the_virtio_vsock_mutex );
712
+
713
+ return 0 ;
714
+
715
+ out :
716
+ kfree (vsock );
717
+ mutex_unlock (& the_virtio_vsock_mutex );
718
+ return ret ;
719
+ }
720
+
721
+ static void virtio_vsock_remove (struct virtio_device * vdev )
722
+ {
723
+ struct virtio_vsock * vsock = vdev -> priv ;
724
+
725
+ mutex_lock (& the_virtio_vsock_mutex );
726
+
727
+ vdev -> priv = NULL ;
728
+ rcu_assign_pointer (the_virtio_vsock , NULL );
729
+ synchronize_rcu ();
730
+
731
+ virtio_vsock_vqs_del (vsock );
714
732
715
733
/* Other works can be queued before 'config->del_vqs()', so we flush
716
734
* all works before to free the vsock object to avoid use after free.
0 commit comments