@@ -158,7 +158,6 @@ struct nvme_fc_ctrl {
158158 u32 cnum ;
159159
160160 bool ioq_live ;
161- atomic_t err_work_active ;
162161 u64 association_id ;
163162 struct nvmefc_ls_rcv_op * rcv_disconn ;
164163
@@ -168,7 +167,6 @@ struct nvme_fc_ctrl {
168167 struct blk_mq_tag_set tag_set ;
169168
170169 struct delayed_work connect_work ;
171- struct work_struct err_work ;
172170
173171 struct kref ref ;
174172 unsigned long flags ;
@@ -2415,11 +2413,11 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
24152413 nvme_fc_ctrl_put (ctrl );
24162414}
24172415
2416+ static void __nvme_fc_terminate_io (struct nvme_fc_ctrl * ctrl );
2417+
24182418static void
24192419nvme_fc_error_recovery (struct nvme_fc_ctrl * ctrl , char * errmsg )
24202420{
2421- int active ;
2422-
24232421 /*
24242422 * if an error (io timeout, etc) while (re)connecting,
24252423 * it's an error on creating the new association.
@@ -2428,11 +2426,14 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
24282426 * ios hitting this path before things are cleaned up.
24292427 */
24302428 if (ctrl -> ctrl .state == NVME_CTRL_CONNECTING ) {
2431- active = atomic_xchg (& ctrl -> err_work_active , 1 );
2432- if (!active && !queue_work (nvme_fc_wq , & ctrl -> err_work )) {
2433- atomic_set (& ctrl -> err_work_active , 0 );
2434- WARN_ON (1 );
2435- }
2429+ __nvme_fc_terminate_io (ctrl );
2430+
2431+ /*
2432+ * Rescheduling the connection after recovering
2433+ * from the io error is left to the reconnect work
2434+ * item, which is what should have stalled waiting on
2435+ * the io that had the error that scheduled this work.
2436+ */
24362437 return ;
24372438 }
24382439
@@ -3240,7 +3241,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
32403241{
32413242 struct nvme_fc_ctrl * ctrl = to_fc_ctrl (nctrl );
32423243
3243- cancel_work_sync (& ctrl -> err_work );
32443244 cancel_delayed_work_sync (& ctrl -> connect_work );
32453245 /*
32463246 * kill the association on the link side. this will block
@@ -3351,23 +3351,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
33513351 ctrl -> cnum );
33523352}
33533353
3354- static void
3355- nvme_fc_connect_err_work (struct work_struct * work )
3356- {
3357- struct nvme_fc_ctrl * ctrl =
3358- container_of (work , struct nvme_fc_ctrl , err_work );
3359-
3360- __nvme_fc_terminate_io (ctrl );
3361-
3362- atomic_set (& ctrl -> err_work_active , 0 );
3363-
3364- /*
3365- * Rescheduling the connection after recovering
3366- * from the io error is left to the reconnect work
3367- * item, which is what should have stalled waiting on
3368- * the io that had the error that scheduled this work.
3369- */
3370- }
33713354
33723355static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
33733356 .name = "fc" ,
@@ -3495,15 +3478,13 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
34953478 ctrl -> dev = lport -> dev ;
34963479 ctrl -> cnum = idx ;
34973480 ctrl -> ioq_live = false;
3498- atomic_set (& ctrl -> err_work_active , 0 );
34993481 init_waitqueue_head (& ctrl -> ioabort_wait );
35003482
35013483 get_device (ctrl -> dev );
35023484 kref_init (& ctrl -> ref );
35033485
35043486 INIT_WORK (& ctrl -> ctrl .reset_work , nvme_fc_reset_ctrl_work );
35053487 INIT_DELAYED_WORK (& ctrl -> connect_work , nvme_fc_connect_ctrl_work );
3506- INIT_WORK (& ctrl -> err_work , nvme_fc_connect_err_work );
35073488 spin_lock_init (& ctrl -> lock );
35083489
35093490 /* io queue count */
@@ -3596,7 +3577,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
35963577fail_ctrl :
35973578 nvme_change_ctrl_state (& ctrl -> ctrl , NVME_CTRL_DELETING );
35983579 cancel_work_sync (& ctrl -> ctrl .reset_work );
3599- cancel_work_sync (& ctrl -> err_work );
36003580 cancel_delayed_work_sync (& ctrl -> connect_work );
36013581
36023582 ctrl -> ctrl .opts = NULL ;
0 commit comments