@@ -56,6 +56,7 @@ static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
56
56
static void __smc_lgr_terminate (struct smc_link_group * lgr , bool soft );
57
57
58
58
static void smc_link_up_work (struct work_struct * work );
59
+ static void smc_link_down_work (struct work_struct * work );
59
60
60
61
/* return head of link group list and its lock for a given link group */
61
62
static inline struct list_head * smc_lgr_list_head (struct smc_link_group * lgr ,
@@ -320,6 +321,7 @@ static int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
320
321
lnk -> smcibdev = ini -> ib_dev ;
321
322
lnk -> ibport = ini -> ib_port ;
322
323
lnk -> path_mtu = ini -> ib_dev -> pattr [ini -> ib_port - 1 ].active_mtu ;
324
+ INIT_WORK (& lnk -> link_down_wrk , smc_link_down_work );
323
325
if (!ini -> ib_dev -> initialized ) {
324
326
rc = (int )smc_ib_setup_per_ibdev (ini -> ib_dev );
325
327
if (rc )
@@ -818,36 +820,6 @@ void smc_lgr_terminate_sched(struct smc_link_group *lgr)
818
820
schedule_work (& lgr -> terminate_work );
819
821
}
820
822
821
- /* Called when IB port is terminated */
822
- void smc_port_terminate (struct smc_ib_device * smcibdev , u8 ibport )
823
- {
824
- struct smc_link_group * lgr , * l ;
825
- LIST_HEAD (lgr_free_list );
826
- int i ;
827
-
828
- spin_lock_bh (& smc_lgr_list .lock );
829
- list_for_each_entry_safe (lgr , l , & smc_lgr_list .list , list ) {
830
- if (lgr -> is_smcd )
831
- continue ;
832
- /* tbd - terminate only when no more links are active */
833
- for (i = 0 ; i < SMC_LINKS_PER_LGR_MAX ; i ++ ) {
834
- if (!smc_link_usable (& lgr -> lnk [i ]))
835
- continue ;
836
- if (lgr -> lnk [i ].smcibdev == smcibdev &&
837
- lgr -> lnk [i ].ibport == ibport ) {
838
- list_move (& lgr -> list , & lgr_free_list );
839
- lgr -> freeing = 1 ;
840
- }
841
- }
842
- }
843
- spin_unlock_bh (& smc_lgr_list .lock );
844
-
845
- list_for_each_entry_safe (lgr , l , & lgr_free_list , list ) {
846
- list_del_init (& lgr -> list );
847
- __smc_lgr_terminate (lgr , false);
848
- }
849
- }
850
-
851
823
/* Called when peer lgr shutdown (regularly or abnormally) is received */
852
824
void smc_smcd_terminate (struct smcd_dev * dev , u64 peer_gid , unsigned short vlan )
853
825
{
@@ -1000,6 +972,79 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1000
972
}
1001
973
}
1002
974
975
+ /* link is down - switch connections to alternate link,
976
+ * must be called under lgr->llc_conf_mutex lock
977
+ */
978
+ static void smcr_link_down (struct smc_link * lnk )
979
+ {
980
+ struct smc_link_group * lgr = lnk -> lgr ;
981
+ struct smc_link * to_lnk ;
982
+ int del_link_id ;
983
+
984
+ if (!lgr || lnk -> state == SMC_LNK_UNUSED || list_empty (& lgr -> list ))
985
+ return ;
986
+
987
+ smc_ib_modify_qp_reset (lnk );
988
+ to_lnk = NULL ;
989
+ /* tbd: call to_lnk = smc_switch_conns(lgr, lnk, true); */
990
+ if (!to_lnk ) { /* no backup link available */
991
+ smcr_link_clear (lnk );
992
+ return ;
993
+ }
994
+ lgr -> type = SMC_LGR_SINGLE ;
995
+ del_link_id = lnk -> link_id ;
996
+
997
+ if (lgr -> role == SMC_SERV ) {
998
+ /* trigger local delete link processing */
999
+ } else {
1000
+ if (lgr -> llc_flow_lcl .type != SMC_LLC_FLOW_NONE ) {
1001
+ /* another llc task is ongoing */
1002
+ mutex_unlock (& lgr -> llc_conf_mutex );
1003
+ wait_event_interruptible_timeout (lgr -> llc_waiter ,
1004
+ (lgr -> llc_flow_lcl .type == SMC_LLC_FLOW_NONE ),
1005
+ SMC_LLC_WAIT_TIME );
1006
+ mutex_lock (& lgr -> llc_conf_mutex );
1007
+ }
1008
+ smc_llc_send_delete_link (to_lnk , del_link_id , SMC_LLC_REQ , true,
1009
+ SMC_LLC_DEL_LOST_PATH );
1010
+ }
1011
+ }
1012
+
1013
+ /* must be called under lgr->llc_conf_mutex lock */
1014
+ void smcr_link_down_cond (struct smc_link * lnk )
1015
+ {
1016
+ if (smc_link_downing (& lnk -> state ))
1017
+ smcr_link_down (lnk );
1018
+ }
1019
+
1020
+ /* will get the lgr->llc_conf_mutex lock */
1021
+ void smcr_link_down_cond_sched (struct smc_link * lnk )
1022
+ {
1023
+ if (smc_link_downing (& lnk -> state ))
1024
+ schedule_work (& lnk -> link_down_wrk );
1025
+ }
1026
+
1027
+ void smcr_port_err (struct smc_ib_device * smcibdev , u8 ibport )
1028
+ {
1029
+ struct smc_link_group * lgr , * n ;
1030
+ int i ;
1031
+
1032
+ list_for_each_entry_safe (lgr , n , & smc_lgr_list .list , list ) {
1033
+ if (strncmp (smcibdev -> pnetid [ibport - 1 ], lgr -> pnet_id ,
1034
+ SMC_MAX_PNETID_LEN ))
1035
+ continue ; /* lgr is not affected */
1036
+ if (list_empty (& lgr -> list ))
1037
+ continue ;
1038
+ for (i = 0 ; i < SMC_LINKS_PER_LGR_MAX ; i ++ ) {
1039
+ struct smc_link * lnk = & lgr -> lnk [i ];
1040
+
1041
+ if (smc_link_usable (lnk ) &&
1042
+ lnk -> smcibdev == smcibdev && lnk -> ibport == ibport )
1043
+ smcr_link_down_cond_sched (lnk );
1044
+ }
1045
+ }
1046
+ }
1047
+
1003
1048
static void smc_link_up_work (struct work_struct * work )
1004
1049
{
1005
1050
struct smc_ib_up_work * ib_work = container_of (work ,
@@ -1014,6 +1059,20 @@ static void smc_link_up_work(struct work_struct *work)
1014
1059
kfree (ib_work );
1015
1060
}
1016
1061
1062
+ static void smc_link_down_work (struct work_struct * work )
1063
+ {
1064
+ struct smc_link * link = container_of (work , struct smc_link ,
1065
+ link_down_wrk );
1066
+ struct smc_link_group * lgr = link -> lgr ;
1067
+
1068
+ if (list_empty (& lgr -> list ))
1069
+ return ;
1070
+ wake_up_interruptible_all (& lgr -> llc_waiter );
1071
+ mutex_lock (& lgr -> llc_conf_mutex );
1072
+ smcr_link_down (link );
1073
+ mutex_unlock (& lgr -> llc_conf_mutex );
1074
+ }
1075
+
1017
1076
/* Determine vlan of internal TCP socket.
1018
1077
* @vlan_id: address to store the determined vlan id into
1019
1078
*/
0 commit comments