@@ -415,8 +415,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
415415 struct at_xdmac_desc * desc = txd_to_at_desc (tx );
416416 struct at_xdmac_chan * atchan = to_at_xdmac_chan (tx -> chan );
417417 dma_cookie_t cookie ;
418+ unsigned long irqflags ;
418419
419- spin_lock_bh (& atchan -> lock );
420+ spin_lock_irqsave (& atchan -> lock , irqflags );
420421 cookie = dma_cookie_assign (tx );
421422
422423 dev_vdbg (chan2dev (tx -> chan ), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n" ,
@@ -425,7 +426,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
425426 if (list_is_singular (& atchan -> xfers_list ))
426427 at_xdmac_start_xfer (atchan , desc );
427428
428- spin_unlock_bh (& atchan -> lock );
429+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
429430 return cookie ;
430431}
431432
@@ -563,6 +564,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
563564 struct scatterlist * sg ;
564565 int i ;
565566 unsigned int xfer_size = 0 ;
567+ unsigned long irqflags ;
568+ struct dma_async_tx_descriptor * ret = NULL ;
566569
567570 if (!sgl )
568571 return NULL ;
@@ -578,7 +581,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
578581 flags );
579582
580583 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
581- spin_lock_bh (& atchan -> lock );
584+ spin_lock_irqsave (& atchan -> lock , irqflags );
582585
583586 /* Prepare descriptors. */
584587 for_each_sg (sgl , sg , sg_len , i ) {
@@ -589,8 +592,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
589592 mem = sg_dma_address (sg );
590593 if (unlikely (!len )) {
591594 dev_err (chan2dev (chan ), "sg data length is zero\n" );
592- spin_unlock_bh (& atchan -> lock );
593- return NULL ;
595+ goto spin_unlock ;
594596 }
595597 dev_dbg (chan2dev (chan ), "%s: * sg%d len=%u, mem=0x%08x\n" ,
596598 __func__ , i , len , mem );
@@ -600,8 +602,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
600602 dev_err (chan2dev (chan ), "can't get descriptor\n" );
601603 if (first )
602604 list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
603- spin_unlock_bh (& atchan -> lock );
604- return NULL ;
605+ goto spin_unlock ;
605606 }
606607
607608 /* Linked list descriptor setup. */
@@ -645,13 +646,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
645646 xfer_size += len ;
646647 }
647648
648- spin_unlock_bh (& atchan -> lock );
649649
650650 first -> tx_dma_desc .flags = flags ;
651651 first -> xfer_size = xfer_size ;
652652 first -> direction = direction ;
653+ ret = & first -> tx_dma_desc ;
653654
654- return & first -> tx_dma_desc ;
655+ spin_unlock :
656+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
657+ return ret ;
655658}
656659
657660static struct dma_async_tx_descriptor *
@@ -664,6 +667,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664667 struct at_xdmac_desc * first = NULL , * prev = NULL ;
665668 unsigned int periods = buf_len / period_len ;
666669 int i ;
670+ unsigned long irqflags ;
667671
668672 dev_dbg (chan2dev (chan ), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n" ,
669673 __func__ , & buf_addr , buf_len , period_len ,
@@ -682,16 +686,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
682686 for (i = 0 ; i < periods ; i ++ ) {
683687 struct at_xdmac_desc * desc = NULL ;
684688
685- spin_lock_bh (& atchan -> lock );
689+ spin_lock_irqsave (& atchan -> lock , irqflags );
686690 desc = at_xdmac_get_desc (atchan );
687691 if (!desc ) {
688692 dev_err (chan2dev (chan ), "can't get descriptor\n" );
689693 if (first )
690694 list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
691- spin_unlock_bh (& atchan -> lock );
695+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
692696 return NULL ;
693697 }
694- spin_unlock_bh (& atchan -> lock );
698+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
695699 dev_dbg (chan2dev (chan ),
696700 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n" ,
697701 __func__ , desc , & desc -> tx_dma_desc .phys );
@@ -766,6 +770,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
766770 | AT_XDMAC_CC_SIF (0 )
767771 | AT_XDMAC_CC_MBSIZE_SIXTEEN
768772 | AT_XDMAC_CC_TYPE_MEM_TRAN ;
773+ unsigned long irqflags ;
769774
770775 dev_dbg (chan2dev (chan ), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n" ,
771776 __func__ , & src , & dest , len , flags );
@@ -798,9 +803,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
798803
799804 dev_dbg (chan2dev (chan ), "%s: remaining_size=%zu\n" , __func__ , remaining_size );
800805
801- spin_lock_bh (& atchan -> lock );
806+ spin_lock_irqsave (& atchan -> lock , irqflags );
802807 desc = at_xdmac_get_desc (atchan );
803- spin_unlock_bh (& atchan -> lock );
808+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
804809 if (!desc ) {
805810 dev_err (chan2dev (chan ), "can't get descriptor\n" );
806811 if (first )
@@ -886,6 +891,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
886891 int residue ;
887892 u32 cur_nda , mask , value ;
888893 u8 dwidth = 0 ;
894+ unsigned long flags ;
889895
890896 ret = dma_cookie_status (chan , cookie , txstate );
891897 if (ret == DMA_COMPLETE )
@@ -894,7 +900,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
894900 if (!txstate )
895901 return ret ;
896902
897- spin_lock_bh (& atchan -> lock );
903+ spin_lock_irqsave (& atchan -> lock , flags );
898904
899905 desc = list_first_entry (& atchan -> xfers_list , struct at_xdmac_desc , xfer_node );
900906
@@ -904,8 +910,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
904910 */
905911 if (!desc -> active_xfer ) {
906912 dma_set_residue (txstate , desc -> xfer_size );
907- spin_unlock_bh (& atchan -> lock );
908- return ret ;
913+ goto spin_unlock ;
909914 }
910915
911916 residue = desc -> xfer_size ;
@@ -936,14 +941,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
936941 }
937942 residue += at_xdmac_chan_read (atchan , AT_XDMAC_CUBC ) << dwidth ;
938943
939- spin_unlock_bh (& atchan -> lock );
940-
941944 dma_set_residue (txstate , residue );
942945
943946 dev_dbg (chan2dev (chan ),
944947 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n" ,
945948 __func__ , desc , & desc -> tx_dma_desc .phys , ret , cookie , residue );
946949
950+ spin_unlock :
951+ spin_unlock_irqrestore (& atchan -> lock , flags );
947952 return ret ;
948953}
949954
@@ -964,8 +969,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
964969static void at_xdmac_advance_work (struct at_xdmac_chan * atchan )
965970{
966971 struct at_xdmac_desc * desc ;
972+ unsigned long flags ;
967973
968- spin_lock_bh (& atchan -> lock );
974+ spin_lock_irqsave (& atchan -> lock , flags );
969975
970976 /*
971977 * If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +986,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
980986 at_xdmac_start_xfer (atchan , desc );
981987 }
982988
983- spin_unlock_bh (& atchan -> lock );
989+ spin_unlock_irqrestore (& atchan -> lock , flags );
984990}
985991
986992static void at_xdmac_handle_cyclic (struct at_xdmac_chan * atchan )
@@ -1116,12 +1122,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
11161122{
11171123 struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
11181124 int ret ;
1125+ unsigned long flags ;
11191126
11201127 dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
11211128
1122- spin_lock_bh (& atchan -> lock );
1129+ spin_lock_irqsave (& atchan -> lock , flags );
11231130 ret = at_xdmac_set_slave_config (chan , config );
1124- spin_unlock_bh (& atchan -> lock );
1131+ spin_unlock_irqrestore (& atchan -> lock , flags );
11251132
11261133 return ret ;
11271134}
@@ -1130,18 +1137,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
11301137{
11311138 struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
11321139 struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1140+ unsigned long flags ;
11331141
11341142 dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
11351143
11361144 if (test_and_set_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status ))
11371145 return 0 ;
11381146
1139- spin_lock_bh (& atchan -> lock );
1147+ spin_lock_irqsave (& atchan -> lock , flags );
11401148 at_xdmac_write (atxdmac , AT_XDMAC_GRWS , atchan -> mask );
11411149 while (at_xdmac_chan_read (atchan , AT_XDMAC_CC )
11421150 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP ))
11431151 cpu_relax ();
1144- spin_unlock_bh (& atchan -> lock );
1152+ spin_unlock_irqrestore (& atchan -> lock , flags );
11451153
11461154 return 0 ;
11471155}
@@ -1150,18 +1158,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
11501158{
11511159 struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
11521160 struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1161+ unsigned long flags ;
11531162
11541163 dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
11551164
1156- spin_lock_bh (& atchan -> lock );
1165+ spin_lock_irqsave (& atchan -> lock , flags );
11571166 if (!at_xdmac_chan_is_paused (atchan )) {
1158- spin_unlock_bh (& atchan -> lock );
1167+ spin_unlock_irqrestore (& atchan -> lock , flags );
11591168 return 0 ;
11601169 }
11611170
11621171 at_xdmac_write (atxdmac , AT_XDMAC_GRWR , atchan -> mask );
11631172 clear_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status );
1164- spin_unlock_bh (& atchan -> lock );
1173+ spin_unlock_irqrestore (& atchan -> lock , flags );
11651174
11661175 return 0 ;
11671176}
@@ -1171,10 +1180,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
11711180 struct at_xdmac_desc * desc , * _desc ;
11721181 struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
11731182 struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1183+ unsigned long flags ;
11741184
11751185 dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
11761186
1177- spin_lock_bh (& atchan -> lock );
1187+ spin_lock_irqsave (& atchan -> lock , flags );
11781188 at_xdmac_write (atxdmac , AT_XDMAC_GD , atchan -> mask );
11791189 while (at_xdmac_read (atxdmac , AT_XDMAC_GS ) & atchan -> mask )
11801190 cpu_relax ();
@@ -1184,7 +1194,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
11841194 at_xdmac_remove_xfer (atchan , desc );
11851195
11861196 clear_bit (AT_XDMAC_CHAN_IS_CYCLIC , & atchan -> status );
1187- spin_unlock_bh (& atchan -> lock );
1197+ spin_unlock_irqrestore (& atchan -> lock , flags );
11881198
11891199 return 0 ;
11901200}
@@ -1194,8 +1204,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
11941204 struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
11951205 struct at_xdmac_desc * desc ;
11961206 int i ;
1207+ unsigned long flags ;
11971208
1198- spin_lock_bh (& atchan -> lock );
1209+ spin_lock_irqsave (& atchan -> lock , flags );
11991210
12001211 if (at_xdmac_chan_is_enabled (atchan )) {
12011212 dev_err (chan2dev (chan ),
@@ -1226,7 +1237,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
12261237 dev_dbg (chan2dev (chan ), "%s: allocated %d descriptors\n" , __func__ , i );
12271238
12281239spin_unlock :
1229- spin_unlock_bh (& atchan -> lock );
1240+ spin_unlock_irqrestore (& atchan -> lock , flags );
12301241 return i ;
12311242}
12321243
0 commit comments