63
63
#include "fec.h"
64
64
65
65
static void set_multicast_list (struct net_device * ndev );
66
+ static void fec_enet_itr_coal_init (struct net_device * ndev );
66
67
67
68
#define DRIVER_NAME "fec"
68
69
@@ -110,6 +111,12 @@ static void set_multicast_list(struct net_device *ndev);
110
111
* independent rings
111
112
*/
112
113
#define FEC_QUIRK_HAS_AVB (1 << 8)
114
+ /* There is a TDAR race condition for mutliQ when the software sets TDAR
115
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
116
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
117
+ * The issue exist at i.MX6SX enet IP.
118
+ */
119
+ #define FEC_QUIRK_ERR007885 (1 << 9)
113
120
114
121
static struct platform_device_id fec_devtype [] = {
115
122
{
@@ -138,7 +145,7 @@ static struct platform_device_id fec_devtype[] = {
138
145
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
139
146
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
140
147
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
141
- FEC_QUIRK_HAS_AVB ,
148
+ FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 ,
142
149
}, {
143
150
/* sentinel */
144
151
}
@@ -708,6 +715,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
708
715
struct tso_t tso ;
709
716
unsigned int index = 0 ;
710
717
int ret ;
718
+ const struct platform_device_id * id_entry =
719
+ platform_get_device_id (fep -> pdev );
711
720
712
721
if (tso_count_descs (skb ) >= fec_enet_get_free_txdesc_num (fep , txq )) {
713
722
dev_kfree_skb_any (skb );
@@ -769,7 +778,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
769
778
txq -> cur_tx = bdp ;
770
779
771
780
/* Trigger transmission start */
772
- writel (0 , fep -> hwp + FEC_X_DES_ACTIVE (queue ));
781
+ if (!(id_entry -> driver_data & FEC_QUIRK_ERR007885 ) ||
782
+ !readl (fep -> hwp + FEC_X_DES_ACTIVE (queue )) ||
783
+ !readl (fep -> hwp + FEC_X_DES_ACTIVE (queue )) ||
784
+ !readl (fep -> hwp + FEC_X_DES_ACTIVE (queue )) ||
785
+ !readl (fep -> hwp + FEC_X_DES_ACTIVE (queue )))
786
+ writel (0 , fep -> hwp + FEC_X_DES_ACTIVE (queue ));
773
787
774
788
return 0 ;
775
789
@@ -1095,6 +1109,10 @@ fec_restart(struct net_device *ndev)
1095
1109
1096
1110
/* Enable interrupts we wish to service */
1097
1111
writel (FEC_DEFAULT_IMASK , fep -> hwp + FEC_IMASK );
1112
+
1113
+ /* Init the interrupt coalescing */
1114
+ fec_enet_itr_coal_init (ndev );
1115
+
1098
1116
}
1099
1117
1100
1118
static void
@@ -2234,12 +2252,141 @@ static int fec_enet_nway_reset(struct net_device *dev)
2234
2252
return genphy_restart_aneg (phydev );
2235
2253
}
2236
2254
2255
+ /* ITR clock source is enet system clock (clk_ahb).
2256
+ * TCTT unit is cycle_ns * 64 cycle
2257
+ * So, the ICTT value = X us / (cycle_ns * 64)
2258
+ */
2259
+ static int fec_enet_us_to_itr_clock (struct net_device * ndev , int us )
2260
+ {
2261
+ struct fec_enet_private * fep = netdev_priv (ndev );
2262
+
2263
+ return us * (fep -> itr_clk_rate / 64000 ) / 1000 ;
2264
+ }
2265
+
2266
+ /* Set threshold for interrupt coalescing */
2267
+ static void fec_enet_itr_coal_set (struct net_device * ndev )
2268
+ {
2269
+ struct fec_enet_private * fep = netdev_priv (ndev );
2270
+ const struct platform_device_id * id_entry =
2271
+ platform_get_device_id (fep -> pdev );
2272
+ int rx_itr , tx_itr ;
2273
+
2274
+ if (!(id_entry -> driver_data & FEC_QUIRK_HAS_AVB ))
2275
+ return ;
2276
+
2277
+ /* Must be greater than zero to avoid unpredictable behavior */
2278
+ if (!fep -> rx_time_itr || !fep -> rx_pkts_itr ||
2279
+ !fep -> tx_time_itr || !fep -> tx_pkts_itr )
2280
+ return ;
2281
+
2282
+ /* Select enet system clock as Interrupt Coalescing
2283
+ * timer Clock Source
2284
+ */
2285
+ rx_itr = FEC_ITR_CLK_SEL ;
2286
+ tx_itr = FEC_ITR_CLK_SEL ;
2287
+
2288
+ /* set ICFT and ICTT */
2289
+ rx_itr |= FEC_ITR_ICFT (fep -> rx_pkts_itr );
2290
+ rx_itr |= FEC_ITR_ICTT (fec_enet_us_to_itr_clock (ndev , fep -> rx_time_itr ));
2291
+ tx_itr |= FEC_ITR_ICFT (fep -> tx_pkts_itr );
2292
+ tx_itr |= FEC_ITR_ICTT (fec_enet_us_to_itr_clock (ndev , fep -> tx_time_itr ));
2293
+
2294
+ rx_itr |= FEC_ITR_EN ;
2295
+ tx_itr |= FEC_ITR_EN ;
2296
+
2297
+ writel (tx_itr , fep -> hwp + FEC_TXIC0 );
2298
+ writel (rx_itr , fep -> hwp + FEC_RXIC0 );
2299
+ writel (tx_itr , fep -> hwp + FEC_TXIC1 );
2300
+ writel (rx_itr , fep -> hwp + FEC_RXIC1 );
2301
+ writel (tx_itr , fep -> hwp + FEC_TXIC2 );
2302
+ writel (rx_itr , fep -> hwp + FEC_RXIC2 );
2303
+ }
2304
+
2305
+ static int
2306
+ fec_enet_get_coalesce (struct net_device * ndev , struct ethtool_coalesce * ec )
2307
+ {
2308
+ struct fec_enet_private * fep = netdev_priv (ndev );
2309
+ const struct platform_device_id * id_entry =
2310
+ platform_get_device_id (fep -> pdev );
2311
+
2312
+ if (!(id_entry -> driver_data & FEC_QUIRK_HAS_AVB ))
2313
+ return - EOPNOTSUPP ;
2314
+
2315
+ ec -> rx_coalesce_usecs = fep -> rx_time_itr ;
2316
+ ec -> rx_max_coalesced_frames = fep -> rx_pkts_itr ;
2317
+
2318
+ ec -> tx_coalesce_usecs = fep -> tx_time_itr ;
2319
+ ec -> tx_max_coalesced_frames = fep -> tx_pkts_itr ;
2320
+
2321
+ return 0 ;
2322
+ }
2323
+
2324
+ static int
2325
+ fec_enet_set_coalesce (struct net_device * ndev , struct ethtool_coalesce * ec )
2326
+ {
2327
+ struct fec_enet_private * fep = netdev_priv (ndev );
2328
+ const struct platform_device_id * id_entry =
2329
+ platform_get_device_id (fep -> pdev );
2330
+
2331
+ unsigned int cycle ;
2332
+
2333
+ if (!(id_entry -> driver_data & FEC_QUIRK_HAS_AVB ))
2334
+ return - EOPNOTSUPP ;
2335
+
2336
+ if (ec -> rx_max_coalesced_frames > 255 ) {
2337
+ pr_err ("Rx coalesced frames exceed hardware limiation" );
2338
+ return - EINVAL ;
2339
+ }
2340
+
2341
+ if (ec -> tx_max_coalesced_frames > 255 ) {
2342
+ pr_err ("Tx coalesced frame exceed hardware limiation" );
2343
+ return - EINVAL ;
2344
+ }
2345
+
2346
+ cycle = fec_enet_us_to_itr_clock (ndev , fep -> rx_time_itr );
2347
+ if (cycle > 0xFFFF ) {
2348
+ pr_err ("Rx coalesed usec exceeed hardware limiation" );
2349
+ return - EINVAL ;
2350
+ }
2351
+
2352
+ cycle = fec_enet_us_to_itr_clock (ndev , fep -> tx_time_itr );
2353
+ if (cycle > 0xFFFF ) {
2354
+ pr_err ("Rx coalesed usec exceeed hardware limiation" );
2355
+ return - EINVAL ;
2356
+ }
2357
+
2358
+ fep -> rx_time_itr = ec -> rx_coalesce_usecs ;
2359
+ fep -> rx_pkts_itr = ec -> rx_max_coalesced_frames ;
2360
+
2361
+ fep -> tx_time_itr = ec -> tx_coalesce_usecs ;
2362
+ fep -> tx_pkts_itr = ec -> tx_max_coalesced_frames ;
2363
+
2364
+ fec_enet_itr_coal_set (ndev );
2365
+
2366
+ return 0 ;
2367
+ }
2368
+
2369
+ static void fec_enet_itr_coal_init (struct net_device * ndev )
2370
+ {
2371
+ struct ethtool_coalesce ec ;
2372
+
2373
+ ec .rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT ;
2374
+ ec .rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT ;
2375
+
2376
+ ec .tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT ;
2377
+ ec .tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT ;
2378
+
2379
+ fec_enet_set_coalesce (ndev , & ec );
2380
+ }
2381
+
2237
2382
static const struct ethtool_ops fec_enet_ethtool_ops = {
2238
2383
.get_settings = fec_enet_get_settings ,
2239
2384
.set_settings = fec_enet_set_settings ,
2240
2385
.get_drvinfo = fec_enet_get_drvinfo ,
2241
2386
.nway_reset = fec_enet_nway_reset ,
2242
2387
.get_link = ethtool_op_get_link ,
2388
+ .get_coalesce = fec_enet_get_coalesce ,
2389
+ .set_coalesce = fec_enet_set_coalesce ,
2243
2390
#ifndef CONFIG_M5272
2244
2391
.get_pauseparam = fec_enet_get_pauseparam ,
2245
2392
.set_pauseparam = fec_enet_set_pauseparam ,
@@ -2890,23 +3037,23 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2890
3037
2891
3038
/* parse the num of tx and rx queues */
2892
3039
err = of_property_read_u32 (np , "fsl,num-tx-queues" , num_tx );
2893
- err |= of_property_read_u32 (np , "fsl,num-rx-queues" , num_rx );
2894
- if (err ) {
3040
+ if (err )
2895
3041
* num_tx = 1 ;
3042
+
3043
+ err = of_property_read_u32 (np , "fsl,num-rx-queues" , num_rx );
3044
+ if (err )
2896
3045
* num_rx = 1 ;
2897
- return ;
2898
- }
2899
3046
2900
3047
if (* num_tx < 1 || * num_tx > FEC_ENET_MAX_TX_QS ) {
2901
- dev_err (& pdev -> dev , "Invalidate num_tx(=%d), fail back to 1\n" ,
2902
- * num_tx );
3048
+ dev_warn (& pdev -> dev , "Invalid num_tx(=%d), fall back to 1\n" ,
3049
+ * num_tx );
2903
3050
* num_tx = 1 ;
2904
3051
return ;
2905
3052
}
2906
3053
2907
3054
if (* num_rx < 1 || * num_rx > FEC_ENET_MAX_RX_QS ) {
2908
- dev_err (& pdev -> dev , "Invalidate num_rx(=%d), fail back to 1\n" ,
2909
- * num_rx );
3055
+ dev_warn (& pdev -> dev , "Invalid num_rx(=%d), fall back to 1\n" ,
3056
+ * num_rx );
2910
3057
* num_rx = 1 ;
2911
3058
return ;
2912
3059
}
@@ -2924,8 +3071,8 @@ fec_probe(struct platform_device *pdev)
2924
3071
const struct of_device_id * of_id ;
2925
3072
static int dev_id ;
2926
3073
struct device_node * np = pdev -> dev .of_node , * phy_node ;
2927
- int num_tx_qs = 1 ;
2928
- int num_rx_qs = 1 ;
3074
+ int num_tx_qs ;
3075
+ int num_rx_qs ;
2929
3076
2930
3077
of_id = of_match_device (fec_dt_ids , & pdev -> dev );
2931
3078
if (of_id )
@@ -3006,6 +3153,8 @@ fec_probe(struct platform_device *pdev)
3006
3153
goto failed_clk ;
3007
3154
}
3008
3155
3156
+ fep -> itr_clk_rate = clk_get_rate (fep -> clk_ahb );
3157
+
3009
3158
/* enet_out is optional, depends on board */
3010
3159
fep -> clk_enet_out = devm_clk_get (& pdev -> dev , "enet_out" );
3011
3160
if (IS_ERR (fep -> clk_enet_out ))
0 commit comments