@@ -797,6 +797,30 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
797797 return 0 ;
798798}
799799
800+
801+ /* Complete the cancelled URBs we unlinked from td_list. */
802+ static void xhci_giveback_invalidated_tds (struct xhci_virt_ep * ep )
803+ {
804+ struct xhci_ring * ring ;
805+ struct xhci_td * td , * tmp_td ;
806+
807+ list_for_each_entry_safe (td , tmp_td , & ep -> cancelled_td_list ,
808+ cancelled_td_list ) {
809+
810+ /*
811+ * Doesn't matter what we pass for status, since the core will
812+ * just overwrite it (because the URB has been unlinked).
813+ */
814+ ring = xhci_urb_to_transfer_ring (ep -> xhci , td -> urb );
815+
816+ if (td -> cancel_status == TD_CLEARED )
817+ xhci_td_cleanup (ep -> xhci , td , ring , 0 );
818+
819+ if (ep -> xhci -> xhc_state & XHCI_STATE_DYING )
820+ return ;
821+ }
822+ }
823+
800824static int xhci_reset_halted_ep (struct xhci_hcd * xhci , unsigned int slot_id ,
801825 unsigned int ep_index , enum xhci_ep_reset_type reset_type )
802826{
@@ -834,15 +858,19 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
834858
835859 ep -> ep_state |= EP_HALTED ;
836860
861+ /* add td to cancelled list and let reset ep handler take care of it */
862+ if (reset_type == EP_HARD_RESET ) {
863+ ep -> ep_state |= EP_HARD_CLEAR_TOGGLE ;
864+ if (td && list_empty (& td -> cancelled_td_list )) {
865+ list_add_tail (& td -> cancelled_td_list , & ep -> cancelled_td_list );
866+ td -> cancel_status = TD_HALTED ;
867+ }
868+ }
869+
837870 err = xhci_reset_halted_ep (xhci , slot_id , ep -> ep_index , reset_type );
838871 if (err )
839872 return ;
840873
841- if (reset_type == EP_HARD_RESET ) {
842- ep -> ep_state |= EP_HARD_CLEAR_TOGGLE ;
843- xhci_cleanup_stalled_ring (xhci , slot_id , ep -> ep_index , stream_id ,
844- td );
845- }
846874 xhci_ring_cmd_db (xhci );
847875}
848876
@@ -851,16 +879,20 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
851879 * We have the xHCI lock, so nothing can modify this list until we drop it.
852880 * We're also in the event handler, so we can't get re-interrupted if another
853881 * Stop Endpoint command completes.
882+ *
883+ * only call this when ring is not in a running state
854884 */
855885
856- static int xhci_invalidate_cancelled_tds (struct xhci_virt_ep * ep ,
857- struct xhci_dequeue_state * deq_state )
886+ static int xhci_invalidate_cancelled_tds (struct xhci_virt_ep * ep )
858887{
859888 struct xhci_hcd * xhci ;
860889 struct xhci_td * td = NULL ;
861890 struct xhci_td * tmp_td = NULL ;
891+ struct xhci_td * cached_td = NULL ;
862892 struct xhci_ring * ring ;
893+ struct xhci_dequeue_state deq_state ;
863894 u64 hw_deq ;
895+ unsigned int slot_id = ep -> vdev -> slot_id ;
864896
865897 xhci = ep -> xhci ;
866898
@@ -886,14 +918,28 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep,
886918
887919 if (trb_in_td (xhci , td -> start_seg , td -> first_trb ,
888920 td -> last_trb , hw_deq , false)) {
889- xhci_find_new_dequeue_state (xhci , ep -> vdev -> slot_id ,
890- ep -> ep_index ,
891- td -> urb -> stream_id ,
892- td , deq_state );
921+ switch (td -> cancel_status ) {
922+ case TD_CLEARED : /* TD is already no-op */
923+ case TD_CLEARING_CACHE : /* set TR deq command already queued */
924+ break ;
925+ case TD_DIRTY : /* TD is cached, clear it */
926+ case TD_HALTED :
927+ /* FIXME stream case, several stopped rings */
928+ cached_td = td ;
929+ break ;
930+ }
893931 } else {
894932 td_to_noop (xhci , ring , td , false);
933+ td -> cancel_status = TD_CLEARED ;
895934 }
896-
935+ }
936+ if (cached_td ) {
937+ cached_td -> cancel_status = TD_CLEARING_CACHE ;
938+ xhci_find_new_dequeue_state (xhci , slot_id , ep -> ep_index ,
939+ cached_td -> urb -> stream_id ,
940+ cached_td , & deq_state );
941+ xhci_queue_new_dequeue_state (xhci , slot_id , ep -> ep_index ,
942+ & deq_state );
897943 }
898944 return 0 ;
899945}
@@ -912,81 +958,32 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
912958 union xhci_trb * trb )
913959{
914960 unsigned int ep_index ;
915- struct xhci_ring * ep_ring ;
916961 struct xhci_virt_ep * ep ;
917- struct xhci_td * cur_td = NULL ;
918- struct xhci_td * last_unlinked_td ;
919962 struct xhci_ep_ctx * ep_ctx ;
920- struct xhci_virt_device * vdev ;
921- struct xhci_dequeue_state deq_state ;
922963
923964 if (unlikely (TRB_TO_SUSPEND_PORT (le32_to_cpu (trb -> generic .field [3 ])))) {
924965 if (!xhci -> devs [slot_id ])
925- xhci_warn (xhci , "Stop endpoint command "
926- "completion for disabled slot %u\n" ,
927- slot_id );
966+ xhci_warn (xhci , "Stop endpoint command completion for disabled slot %u\n" ,
967+ slot_id );
928968 return ;
929969 }
930970
931- memset (& deq_state , 0 , sizeof (deq_state ));
932971 ep_index = TRB_TO_EP_INDEX (le32_to_cpu (trb -> generic .field [3 ]));
933-
934972 ep = xhci_get_virt_ep (xhci , slot_id , ep_index );
935973 if (!ep )
936974 return ;
937975
938- vdev = ep -> vdev ;
939- ep_ctx = xhci_get_ep_ctx (xhci , vdev -> out_ctx , ep_index );
940- trace_xhci_handle_cmd_stop_ep (ep_ctx );
941-
942- last_unlinked_td = list_last_entry (& ep -> cancelled_td_list ,
943- struct xhci_td , cancelled_td_list );
944-
945- if (list_empty (& ep -> cancelled_td_list )) {
946- xhci_stop_watchdog_timer_in_irq (xhci , ep );
947- ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
948- return ;
949- }
976+ ep_ctx = xhci_get_ep_ctx (xhci , ep -> vdev -> out_ctx , ep_index );
950977
951- xhci_invalidate_cancelled_tds ( ep , & deq_state );
978+ trace_xhci_handle_cmd_stop_ep ( ep_ctx );
952979
980+ /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
981+ xhci_invalidate_cancelled_tds (ep );
953982 xhci_stop_watchdog_timer_in_irq (xhci , ep );
954983
955- /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
956- if (deq_state .new_deq_ptr && deq_state .new_deq_seg ) {
957- xhci_queue_new_dequeue_state (xhci , slot_id , ep_index ,
958- & deq_state );
959- xhci_ring_cmd_db (xhci );
960- } else {
961- /* Otherwise ring the doorbell(s) to restart queued transfers */
962- ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
963- }
964-
965- /*
966- * Drop the lock and complete the URBs in the cancelled TD list.
967- * New TDs to be cancelled might be added to the end of the list before
968- * we can complete all the URBs for the TDs we already unlinked.
969- * So stop when we've completed the URB for the last TD we unlinked.
970- */
971- do {
972- cur_td = list_first_entry (& ep -> cancelled_td_list ,
973- struct xhci_td , cancelled_td_list );
974- list_del_init (& cur_td -> cancelled_td_list );
975-
976- /* Doesn't matter what we pass for status, since the core will
977- * just overwrite it (because the URB has been unlinked).
978- */
979- ep_ring = xhci_urb_to_transfer_ring (xhci , cur_td -> urb );
980- xhci_td_cleanup (xhci , cur_td , ep_ring , 0 );
981-
982- /* Stop processing the cancelled list if the watchdog timer is
983- * running.
984- */
985- if (xhci -> xhc_state & XHCI_STATE_DYING )
986- return ;
987- } while (cur_td != last_unlinked_td );
988-
989- /* Return to the event handler with xhci->lock re-acquired */
984+ /* Otherwise ring the doorbell(s) to restart queued transfers */
985+ xhci_giveback_invalidated_tds (ep );
986+ ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
990987}
991988
992989static void xhci_kill_ring_urbs (struct xhci_hcd * xhci , struct xhci_ring * ring )
@@ -1202,6 +1199,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
12021199 struct xhci_virt_ep * ep ;
12031200 struct xhci_ep_ctx * ep_ctx ;
12041201 struct xhci_slot_ctx * slot_ctx ;
1202+ struct xhci_td * td , * tmp_td ;
12051203
12061204 ep_index = TRB_TO_EP_INDEX (le32_to_cpu (trb -> generic .field [3 ]));
12071205 stream_id = TRB_TO_STREAM_ID (le32_to_cpu (trb -> generic .field [2 ]));
@@ -1279,7 +1277,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
12791277 ep -> queued_deq_seg , ep -> queued_deq_ptr );
12801278 }
12811279 }
1282-
1280+ /* HW cached TDs cleared from cache, give them back */
1281+ list_for_each_entry_safe (td , tmp_td , & ep -> cancelled_td_list ,
1282+ cancelled_td_list ) {
1283+ ep_ring = xhci_urb_to_transfer_ring (ep -> xhci , td -> urb );
1284+ if (td -> cancel_status == TD_CLEARING_CACHE ) {
1285+ td -> cancel_status = TD_CLEARED ;
1286+ xhci_td_cleanup (ep -> xhci , td , ep_ring , td -> status );
1287+ }
1288+ }
12831289cleanup :
12841290 ep -> ep_state &= ~SET_DEQ_PENDING ;
12851291 ep -> queued_deq_seg = NULL ;
@@ -1309,27 +1315,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
13091315 xhci_dbg_trace (xhci , trace_xhci_dbg_reset_ep ,
13101316 "Ignoring reset ep completion code of %u" , cmd_comp_code );
13111317
1312- /* HW with the reset endpoint quirk needs to have a configure endpoint
1313- * command complete before the endpoint can be used. Queue that here
1314- * because the HW can't handle two commands being queued in a row.
1315- */
1316- if (xhci -> quirks & XHCI_RESET_EP_QUIRK ) {
1317- struct xhci_command * command ;
1318+ /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1319+ xhci_invalidate_cancelled_tds (ep );
13181320
1319- command = xhci_alloc_command (xhci , false, GFP_ATOMIC );
1320- if (!command )
1321- return ;
1321+ if (xhci -> quirks & XHCI_RESET_EP_QUIRK )
1322+ xhci_dbg (xhci , "Note: Removed workaround to queue config ep for this hw" );
1323+ /* Clear our internal halted state */
1324+ ep -> ep_state &= ~EP_HALTED ;
13221325
1323- xhci_dbg_trace (xhci , trace_xhci_dbg_quirks ,
1324- "Queueing configure endpoint command" );
1325- xhci_queue_configure_endpoint (xhci , command ,
1326- xhci -> devs [slot_id ]-> in_ctx -> dma , slot_id ,
1327- false);
1328- xhci_ring_cmd_db (xhci );
1329- } else {
1330- /* Clear our internal halted state */
1331- ep -> ep_state &= ~EP_HALTED ;
1332- }
1326+ xhci_giveback_invalidated_tds (ep );
13331327
13341328 /* if this was a soft reset, then restart */
13351329 if ((le32_to_cpu (trb -> generic .field [3 ])) & TRB_TSP )
@@ -2070,7 +2064,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
20702064 xhci_clear_hub_tt_buffer (xhci , td , ep );
20712065
20722066 xhci_handle_halted_endpoint (xhci , ep , ep_ring -> stream_id , td ,
2073- EP_HARD_RESET );
2067+ EP_HARD_RESET );
2068+
2069+ return 0 ; /* xhci_handle_halted_endpoint marked td cancelled */
20742070 } else {
20752071 /* Update ring dequeue pointer */
20762072 ep_ring -> dequeue = td -> last_trb ;
0 commit comments