@@ -485,17 +485,18 @@ static void m_can_clean(struct net_device *net)
485485{
486486 struct m_can_classdev * cdev = netdev_priv (net );
487487
488- if (cdev -> tx_skb ) {
489- u32 putidx = 0 ;
488+ if (cdev -> tx_ops ) {
489+ for (int i = 0 ; i != cdev -> tx_fifo_size ; ++ i ) {
490+ if (!cdev -> tx_ops [i ].skb )
491+ continue ;
490492
491- net -> stats .tx_errors ++ ;
492- if (cdev -> version > 30 )
493- putidx = FIELD_GET (TXFQS_TFQPI_MASK ,
494- m_can_read (cdev , M_CAN_TXFQS ));
495-
496- can_free_echo_skb (cdev -> net , putidx , NULL );
497- cdev -> tx_skb = NULL ;
493+ net -> stats .tx_errors ++ ;
494+ cdev -> tx_ops [i ].skb = NULL ;
495+ }
498496 }
497+
498+ for (int i = 0 ; i != cdev -> can .echo_skb_max ; ++ i )
499+ can_free_echo_skb (cdev -> net , i , NULL );
499500}
500501
501502/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1685,8 +1686,9 @@ static int m_can_close(struct net_device *dev)
16851686 m_can_clk_stop (cdev );
16861687 free_irq (dev -> irq , dev );
16871688
1689+ m_can_clean (dev );
1690+
16881691 if (cdev -> is_peripheral ) {
1689- cdev -> tx_skb = NULL ;
16901692 destroy_workqueue (cdev -> tx_wq );
16911693 cdev -> tx_wq = NULL ;
16921694 can_rx_offload_disable (& cdev -> offload );
@@ -1713,20 +1715,18 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx)
17131715 return !!cdev -> can .echo_skb [next_idx ];
17141716}
17151717
1716- static netdev_tx_t m_can_tx_handler (struct m_can_classdev * cdev )
1718+ static netdev_tx_t m_can_tx_handler (struct m_can_classdev * cdev ,
1719+ struct sk_buff * skb )
17171720{
1718- struct canfd_frame * cf = (struct canfd_frame * )cdev -> tx_skb -> data ;
1721+ struct canfd_frame * cf = (struct canfd_frame * )skb -> data ;
17191722 u8 len_padded = DIV_ROUND_UP (cf -> len , 4 );
17201723 struct m_can_fifo_element fifo_element ;
17211724 struct net_device * dev = cdev -> net ;
1722- struct sk_buff * skb = cdev -> tx_skb ;
17231725 u32 cccr , fdflags ;
17241726 u32 txfqs ;
17251727 int err ;
17261728 u32 putidx ;
17271729
1728- cdev -> tx_skb = NULL ;
1729-
17301730 /* Generate ID field for TX buffer Element */
17311731 /* Common to all supported M_CAN versions */
17321732 if (cf -> can_id & CAN_EFF_FLAG ) {
@@ -1850,10 +1850,31 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
18501850
18511851static void m_can_tx_work_queue (struct work_struct * ws )
18521852{
1853- struct m_can_classdev * cdev = container_of (ws , struct m_can_classdev ,
1854- tx_work );
1853+ struct m_can_tx_op * op = container_of (ws , struct m_can_tx_op , work );
1854+ struct m_can_classdev * cdev = op -> cdev ;
1855+ struct sk_buff * skb = op -> skb ;
18551856
1856- m_can_tx_handler (cdev );
1857+ op -> skb = NULL ;
1858+ m_can_tx_handler (cdev , skb );
1859+ }
1860+
1861+ static void m_can_tx_queue_skb (struct m_can_classdev * cdev , struct sk_buff * skb )
1862+ {
1863+ cdev -> tx_ops [cdev -> next_tx_op ].skb = skb ;
1864+ queue_work (cdev -> tx_wq , & cdev -> tx_ops [cdev -> next_tx_op ].work );
1865+
1866+ ++ cdev -> next_tx_op ;
1867+ if (cdev -> next_tx_op >= cdev -> tx_fifo_size )
1868+ cdev -> next_tx_op = 0 ;
1869+ }
1870+
1871+ static netdev_tx_t m_can_start_peripheral_xmit (struct m_can_classdev * cdev ,
1872+ struct sk_buff * skb )
1873+ {
1874+ netif_stop_queue (cdev -> net );
1875+ m_can_tx_queue_skb (cdev , skb );
1876+
1877+ return NETDEV_TX_OK ;
18571878}
18581879
18591880static netdev_tx_t m_can_start_xmit (struct sk_buff * skb ,
@@ -1864,30 +1885,15 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
18641885 if (can_dev_dropped_skb (dev , skb ))
18651886 return NETDEV_TX_OK ;
18661887
1867- if (cdev -> is_peripheral ) {
1868- if (cdev -> tx_skb ) {
1869- netdev_err (dev , "hard_xmit called while tx busy\n" );
1870- return NETDEV_TX_BUSY ;
1871- }
1872-
1873- if (cdev -> can .state == CAN_STATE_BUS_OFF ) {
1874- m_can_clean (dev );
1875- } else {
1876- /* Need to stop the queue to avoid numerous requests
1877- * from being sent. Suggested improvement is to create
1878- * a queueing mechanism that will queue the skbs and
1879- * process them in order.
1880- */
1881- cdev -> tx_skb = skb ;
1882- netif_stop_queue (cdev -> net );
1883- queue_work (cdev -> tx_wq , & cdev -> tx_work );
1884- }
1885- } else {
1886- cdev -> tx_skb = skb ;
1887- return m_can_tx_handler (cdev );
1888+ if (cdev -> can .state == CAN_STATE_BUS_OFF ) {
1889+ m_can_clean (cdev -> net );
1890+ return NETDEV_TX_OK ;
18881891 }
18891892
1890- return NETDEV_TX_OK ;
1893+ if (cdev -> is_peripheral )
1894+ return m_can_start_peripheral_xmit (cdev , skb );
1895+ else
1896+ return m_can_tx_handler (cdev , skb );
18911897}
18921898
18931899static enum hrtimer_restart hrtimer_callback (struct hrtimer * timer )
@@ -1927,15 +1933,17 @@ static int m_can_open(struct net_device *dev)
19271933
19281934 /* register interrupt handler */
19291935 if (cdev -> is_peripheral ) {
1930- cdev -> tx_skb = NULL ;
1931- cdev -> tx_wq = alloc_workqueue ("mcan_wq" ,
1932- WQ_FREEZABLE | WQ_MEM_RECLAIM , 0 );
1936+ cdev -> tx_wq = alloc_ordered_workqueue ("mcan_wq" ,
1937+ WQ_FREEZABLE | WQ_MEM_RECLAIM );
19331938 if (!cdev -> tx_wq ) {
19341939 err = - ENOMEM ;
19351940 goto out_wq_fail ;
19361941 }
19371942
1938- INIT_WORK (& cdev -> tx_work , m_can_tx_work_queue );
1943+ for (int i = 0 ; i != cdev -> tx_fifo_size ; ++ i ) {
1944+ cdev -> tx_ops [i ].cdev = cdev ;
1945+ INIT_WORK (& cdev -> tx_ops [i ].work , m_can_tx_work_queue );
1946+ }
19391947
19401948 err = request_threaded_irq (dev -> irq , NULL , m_can_isr ,
19411949 IRQF_ONESHOT ,
@@ -2228,6 +2236,19 @@ int m_can_class_register(struct m_can_classdev *cdev)
22282236{
22292237 int ret ;
22302238
2239+ cdev -> tx_fifo_size = max (1 , min (cdev -> mcfg [MRAM_TXB ].num ,
2240+ cdev -> mcfg [MRAM_TXE ].num ));
2241+ if (cdev -> is_peripheral ) {
2242+ cdev -> tx_ops =
2243+ devm_kzalloc (cdev -> dev ,
2244+ cdev -> tx_fifo_size * sizeof (* cdev -> tx_ops ),
2245+ GFP_KERNEL );
2246+ if (!cdev -> tx_ops ) {
2247+ dev_err (cdev -> dev , "Failed to allocate tx_ops for workqueue\n" );
2248+ return - ENOMEM ;
2249+ }
2250+ }
2251+
22312252 if (cdev -> pm_clock_support ) {
22322253 ret = m_can_clk_start (cdev );
22332254 if (ret )
0 commit comments