@@ -1033,21 +1033,22 @@ static void init_ring(struct net_device *dev)
10331033
10341034 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
10351035 for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1036+ dma_addr_t addr ;
1037+
10361038 struct sk_buff * skb =
10371039 netdev_alloc_skb (dev , np -> rx_buf_sz + 2 );
10381040 np -> rx_skbuff [i ] = skb ;
10391041 if (skb == NULL )
10401042 break ;
10411043 skb_reserve (skb , 2 ); /* 16 byte align the IP header. */
1042- np -> rx_ring [i ].frag .addr = cpu_to_le32 (
1043- dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1044- np -> rx_buf_sz , DMA_FROM_DEVICE ));
1045- if (dma_mapping_error (& np -> pci_dev -> dev ,
1046- np -> rx_ring [i ].frag .addr )) {
1044+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1045+ np -> rx_buf_sz , DMA_FROM_DEVICE );
1046+ if (dma_mapping_error (& np -> pci_dev -> dev , addr )) {
10471047 dev_kfree_skb (skb );
10481048 np -> rx_skbuff [i ] = NULL ;
10491049 break ;
10501050 }
1051+ np -> rx_ring [i ].frag .addr = cpu_to_le32 (addr );
10511052 np -> rx_ring [i ].frag .length = cpu_to_le32 (np -> rx_buf_sz | LastFrag );
10521053 }
10531054 np -> dirty_rx = (unsigned int )(i - RX_RING_SIZE );
@@ -1088,20 +1089,22 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
10881089{
10891090 struct netdev_private * np = netdev_priv (dev );
10901091 struct netdev_desc * txdesc ;
1092+ dma_addr_t addr ;
10911093 unsigned entry ;
10921094
10931095 /* Calculate the next Tx descriptor entry. */
10941096 entry = np -> cur_tx % TX_RING_SIZE ;
10951097 np -> tx_skbuff [entry ] = skb ;
10961098 txdesc = & np -> tx_ring [entry ];
10971099
1100+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data , skb -> len ,
1101+ DMA_TO_DEVICE );
1102+ if (dma_mapping_error (& np -> pci_dev -> dev , addr ))
1103+ goto drop_frame ;
1104+
10981105 txdesc -> next_desc = 0 ;
10991106 txdesc -> status = cpu_to_le32 ((entry << 2 ) | DisableAlign );
1100- txdesc -> frag .addr = cpu_to_le32 (dma_map_single (& np -> pci_dev -> dev ,
1101- skb -> data , skb -> len , DMA_TO_DEVICE ));
1102- if (dma_mapping_error (& np -> pci_dev -> dev ,
1103- txdesc -> frag .addr ))
1104- goto drop_frame ;
1107+ txdesc -> frag .addr = cpu_to_le32 (addr );
11051108 txdesc -> frag .length = cpu_to_le32 (skb -> len | LastFrag );
11061109
11071110 /* Increment cur_tx before tasklet_schedule() */
@@ -1419,22 +1422,24 @@ static void refill_rx (struct net_device *dev)
14191422 for (;(np -> cur_rx - np -> dirty_rx + RX_RING_SIZE ) % RX_RING_SIZE > 0 ;
14201423 np -> dirty_rx = (np -> dirty_rx + 1 ) % RX_RING_SIZE ) {
14211424 struct sk_buff * skb ;
1425+ dma_addr_t addr ;
1426+
14221427 entry = np -> dirty_rx % RX_RING_SIZE ;
14231428 if (np -> rx_skbuff [entry ] == NULL ) {
14241429 skb = netdev_alloc_skb (dev , np -> rx_buf_sz + 2 );
14251430 np -> rx_skbuff [entry ] = skb ;
14261431 if (skb == NULL )
14271432 break ; /* Better luck next round. */
14281433 skb_reserve (skb , 2 ); /* Align IP on 16 byte boundaries */
1429- np -> rx_ring [entry ].frag .addr = cpu_to_le32 (
1430- dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1431- np -> rx_buf_sz , DMA_FROM_DEVICE ));
1432- if (dma_mapping_error (& np -> pci_dev -> dev ,
1433- np -> rx_ring [entry ].frag .addr )) {
1434+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1435+ np -> rx_buf_sz , DMA_FROM_DEVICE );
1436+ if (dma_mapping_error (& np -> pci_dev -> dev , addr )) {
14341437 dev_kfree_skb_irq (skb );
14351438 np -> rx_skbuff [entry ] = NULL ;
14361439 break ;
14371440 }
1441+
1442+ np -> rx_ring [entry ].frag .addr = cpu_to_le32 (addr );
14381443 }
14391444 /* Perhaps we need not reset this field. */
14401445 np -> rx_ring [entry ].frag .length =
0 commit comments