Skip to content

Commit 2d0cb84

Browse files
chelsiocudbgdavem330
authored andcommitted
cxgb4: add ETHOFLD hardware queue support
Add support for configuring and managing ETHOFLD hardware queues. Keep the queue count and MSI-X allocation scheme same as NIC queues. ETHOFLD hardware queues are dynamically allocated/destroyed as TC-MQPRIO Qdisc offload is enabled/disabled on the corresponding interface, respectively. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent b1396c2 commit 2d0cb84

File tree

9 files changed

+419
-63
lines changed

9 files changed

+419
-63
lines changed

drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
325325
CUDBG_QTYPE_CRYPTO_FLQ,
326326
CUDBG_QTYPE_TLS_RXQ,
327327
CUDBG_QTYPE_TLS_FLQ,
328+
CUDBG_QTYPE_ETHOFLD_TXQ,
329+
CUDBG_QTYPE_ETHOFLD_RXQ,
330+
CUDBG_QTYPE_ETHOFLD_FLQ,
328331
CUDBG_QTYPE_MAX,
329332
};
330333

drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2930,6 +2930,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
29302930
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
29312931
MAX_RXQ_DESC_SIZE;
29322932

2933+
/* ETHOFLD TXQ, RXQ, and FLQ */
2934+
tot_entries += MAX_OFLD_QSETS * 3;
2935+
tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2936+
29332937
tot_size += sizeof(struct cudbg_ver_hdr) +
29342938
sizeof(struct cudbg_qdesc_info) +
29352939
sizeof(struct cudbg_qdesc_entry) * tot_entries;
@@ -3087,6 +3091,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
30873091
}
30883092
}
30893093

3094+
/* ETHOFLD TXQ */
3095+
if (s->eohw_txq)
3096+
for (i = 0; i < s->eoqsets; i++)
3097+
QDESC_GET_TXQ(&s->eohw_txq[i].q,
3098+
CUDBG_QTYPE_ETHOFLD_TXQ, out);
3099+
3100+
/* ETHOFLD RXQ and FLQ */
3101+
if (s->eohw_rxq) {
3102+
for (i = 0; i < s->eoqsets; i++)
3103+
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
3104+
CUDBG_QTYPE_ETHOFLD_RXQ, out);
3105+
3106+
for (i = 0; i < s->eoqsets; i++)
3107+
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
3108+
CUDBG_QTYPE_ETHOFLD_FLQ, out);
3109+
}
3110+
30903111
out_unlock:
30913112
mutex_unlock(&uld_mutex);
30923113

drivers/net/ethernet/chelsio/cxgb4/cxgb4.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -835,6 +835,16 @@ struct sge_eosw_txq {
835835
struct tasklet_struct qresume_tsk; /* Restarts the queue */
836836
};
837837

838+
struct sge_eohw_txq {
839+
spinlock_t lock; /* Per queue lock */
840+
struct sge_txq q; /* HW Txq */
841+
struct adapter *adap; /* Backpointer to adapter */
842+
unsigned long tso; /* # of TSO requests */
843+
unsigned long tx_cso; /* # of Tx checksum offloads */
844+
unsigned long vlan_ins; /* # of Tx VLAN insertions */
845+
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
846+
};
847+
838848
struct sge {
839849
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
840850
struct sge_eth_txq ptptxq;
@@ -848,11 +858,16 @@ struct sge {
848858
struct sge_rspq intrq ____cacheline_aligned_in_smp;
849859
spinlock_t intrq_lock;
850860

861+
struct sge_eohw_txq *eohw_txq;
862+
struct sge_ofld_rxq *eohw_rxq;
863+
851864
u16 max_ethqsets; /* # of available Ethernet queue sets */
852865
u16 ethqsets; /* # of active Ethernet queue sets */
853866
u16 ethtxq_rover; /* Tx queue to clean up next */
854867
u16 ofldqsets; /* # of active ofld queue sets */
855868
u16 nqs_per_uld; /* # of Rx queues per ULD */
869+
u16 eoqsets; /* # of ETHOFLD queues */
870+
856871
u16 timer_val[SGE_NTIMERS];
857872
u8 counter_val[SGE_NCOUNTERS];
858873
u16 dbqtimer_tick;
@@ -1466,6 +1481,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
14661481
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
14671482
struct net_device *dev, unsigned int iqid,
14681483
unsigned int uld_type);
1484+
int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
1485+
struct net_device *dev, u32 iqid);
1486+
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
14691487
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
14701488
int t4_sge_init(struct adapter *adap);
14711489
void t4_sge_start(struct adapter *adap);
@@ -1995,4 +2013,6 @@ int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
19952013
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
19962014
int cxgb_open(struct net_device *dev);
19972015
int cxgb_close(struct net_device *dev);
2016+
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
2017+
void cxgb4_quiesce_rx(struct sge_rspq *q);
19982018
#endif /* __CXGB4_H__ */

drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2658,18 +2658,20 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
26582658

26592659
static int sge_qinfo_show(struct seq_file *seq, void *v)
26602660
{
2661+
int eth_entries, ctrl_entries, eo_entries = 0;
26612662
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
26622663
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
26632664
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
26642665
const struct sge_uld_txq_info *utxq_info;
26652666
const struct sge_uld_rxq_info *urxq_info;
26662667
struct adapter *adap = seq->private;
26672668
int i, n, r = (uintptr_t)v - 1;
2668-
int eth_entries, ctrl_entries;
26692669
struct sge *s = &adap->sge;
26702670

26712671
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
26722672
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
2673+
if (adap->sge.eohw_txq)
2674+
eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
26732675

26742676
mutex_lock(&uld_mutex);
26752677
if (s->uld_txq_info)
@@ -2761,6 +2763,54 @@ do { \
27612763
}
27622764

27632765
r -= eth_entries;
2766+
if (r < eo_entries) {
2767+
int base_qset = r * 4;
2768+
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
2769+
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
2770+
2771+
n = min(4, s->eoqsets - 4 * r);
2772+
2773+
S("QType:", "ETHOFLD");
2774+
S("Interface:",
2775+
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2776+
T("TxQ ID:", q.cntxt_id);
2777+
T("TxQ size:", q.size);
2778+
T("TxQ inuse:", q.in_use);
2779+
T("TxQ CIDX:", q.cidx);
2780+
T("TxQ PIDX:", q.pidx);
2781+
R("RspQ ID:", rspq.abs_id);
2782+
R("RspQ size:", rspq.size);
2783+
R("RspQE size:", rspq.iqe_len);
2784+
R("RspQ CIDX:", rspq.cidx);
2785+
R("RspQ Gen:", rspq.gen);
2786+
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2787+
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
2788+
R("FL ID:", fl.cntxt_id);
2789+
S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
2790+
R("FL pend:", fl.pend_cred);
2791+
R("FL avail:", fl.avail);
2792+
R("FL PIDX:", fl.pidx);
2793+
R("FL CIDX:", fl.cidx);
2794+
RL("RxPackets:", stats.pkts);
2795+
RL("RxImm:", stats.imm);
2796+
RL("RxAN", stats.an);
2797+
RL("RxNoMem", stats.nomem);
2798+
TL("TSO:", tso);
2799+
TL("TxCSO:", tx_cso);
2800+
TL("VLANins:", vlan_ins);
2801+
TL("TxQFull:", q.stops);
2802+
TL("TxQRestarts:", q.restarts);
2803+
TL("TxMapErr:", mapping_err);
2804+
RL("FLAllocErr:", fl.alloc_failed);
2805+
RL("FLLrgAlcErr:", fl.large_alloc_failed);
2806+
RL("FLMapErr:", fl.mapping_err);
2807+
RL("FLLow:", fl.low);
2808+
RL("FLStarving:", fl.starving);
2809+
2810+
goto unlock;
2811+
}
2812+
2813+
r -= eo_entries;
27642814
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
27652815
const struct sge_uld_txq *tx;
27662816

@@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap)
30073057
mutex_unlock(&uld_mutex);
30083058

30093059
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
3060+
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
30103061
tot_uld_entries +
30113062
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
30123063
}

drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c

Lines changed: 59 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -880,6 +880,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
880880
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
881881
}
882882

883+
void cxgb4_quiesce_rx(struct sge_rspq *q)
884+
{
885+
if (q->handler)
886+
napi_disable(&q->napi);
887+
}
888+
883889
/*
884890
* Wait until all NAPI handlers are descheduled.
885891
*/
@@ -890,8 +896,10 @@ static void quiesce_rx(struct adapter *adap)
890896
for (i = 0; i < adap->sge.ingr_sz; i++) {
891897
struct sge_rspq *q = adap->sge.ingr_map[i];
892898

893-
if (q && q->handler)
894-
napi_disable(&q->napi);
899+
if (!q)
900+
continue;
901+
902+
cxgb4_quiesce_rx(q);
895903
}
896904
}
897905

@@ -913,6 +921,17 @@ static void disable_interrupts(struct adapter *adap)
913921
}
914922
}
915923

924+
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
925+
{
926+
if (q->handler)
927+
napi_enable(&q->napi);
928+
929+
/* 0-increment GTS to start the timer and enable interrupts */
930+
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
931+
SEINTARM_V(q->intr_params) |
932+
INGRESSQID_V(q->cntxt_id));
933+
}
934+
916935
/*
917936
* Enable NAPI scheduling and interrupt generation for all Rx queues.
918937
*/
@@ -925,13 +944,8 @@ static void enable_rx(struct adapter *adap)
925944

926945
if (!q)
927946
continue;
928-
if (q->handler)
929-
napi_enable(&q->napi);
930947

931-
/* 0-increment GTS to start the timer and enable interrupts */
932-
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
933-
SEINTARM_V(q->intr_params) |
934-
INGRESSQID_V(q->cntxt_id));
948+
cxgb4_enable_rx(adap, q);
935949
}
936950
}
937951

@@ -5360,6 +5374,19 @@ static int cfg_queues(struct adapter *adap)
53605374
avail_qsets -= num_ulds * s->ofldqsets;
53615375
}
53625376

5377+
/* ETHOFLD Queues used for QoS offload should follow same
5378+
* allocation scheme as normal Ethernet Queues.
5379+
*/
5380+
if (is_ethofld(adap)) {
5381+
if (avail_qsets < s->max_ethqsets) {
5382+
adap->params.ethofld = 0;
5383+
s->eoqsets = 0;
5384+
} else {
5385+
s->eoqsets = s->max_ethqsets;
5386+
}
5387+
avail_qsets -= s->eoqsets;
5388+
}
5389+
53635390
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
53645391
struct sge_eth_rxq *r = &s->ethrxq[i];
53655392

@@ -5473,9 +5500,9 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
54735500

54745501
static int enable_msix(struct adapter *adap)
54755502
{
5503+
u32 eth_need, uld_need = 0, ethofld_need = 0;
5504+
u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
54765505
u8 num_uld = 0, nchan = adap->params.nports;
5477-
u32 ethqsets = 0, ofldqsets = 0;
5478-
u32 eth_need, uld_need = 0;
54795506
u32 i, want, need, num_vec;
54805507
struct sge *s = &adap->sge;
54815508
struct msix_entry *entries;
@@ -5499,6 +5526,12 @@ static int enable_msix(struct adapter *adap)
54995526
need += uld_need;
55005527
}
55015528

5529+
if (is_ethofld(adap)) {
5530+
want += s->eoqsets;
5531+
ethofld_need = eth_need;
5532+
need += ethofld_need;
5533+
}
5534+
55025535
want += EXTRA_VECS;
55035536
need += EXTRA_VECS;
55045537

@@ -5531,7 +5564,9 @@ static int enable_msix(struct adapter *adap)
55315564
adap->params.crypto = 0;
55325565
adap->params.ethofld = 0;
55335566
s->ofldqsets = 0;
5567+
s->eoqsets = 0;
55345568
uld_need = 0;
5569+
ethofld_need = 0;
55355570
}
55365571

55375572
num_vec = allocated;
@@ -5543,10 +5578,12 @@ static int enable_msix(struct adapter *adap)
55435578
ethqsets = eth_need;
55445579
if (is_uld(adap))
55455580
ofldqsets = nchan;
5581+
if (is_ethofld(adap))
5582+
eoqsets = ethofld_need;
55465583

55475584
num_vec -= need;
55485585
while (num_vec) {
5549-
if (num_vec < eth_need ||
5586+
if (num_vec < eth_need + ethofld_need ||
55505587
ethqsets > s->max_ethqsets)
55515588
break;
55525589

@@ -5557,6 +5594,10 @@ static int enable_msix(struct adapter *adap)
55575594

55585595
ethqsets++;
55595596
num_vec--;
5597+
if (ethofld_need) {
5598+
eoqsets++;
5599+
num_vec--;
5600+
}
55605601
}
55615602
}
55625603

@@ -5574,6 +5615,8 @@ static int enable_msix(struct adapter *adap)
55745615
ethqsets = s->max_ethqsets;
55755616
if (is_uld(adap))
55765617
ofldqsets = s->ofldqsets;
5618+
if (is_ethofld(adap))
5619+
eoqsets = s->eoqsets;
55775620
}
55785621

55795622
if (ethqsets < s->max_ethqsets) {
@@ -5586,6 +5629,9 @@ static int enable_msix(struct adapter *adap)
55865629
s->nqs_per_uld = s->ofldqsets;
55875630
}
55885631

5632+
if (is_ethofld(adap))
5633+
s->eoqsets = eoqsets;
5634+
55895635
/* map for msix */
55905636
ret = alloc_msix_info(adap, allocated);
55915637
if (ret)
@@ -5597,8 +5643,8 @@ static int enable_msix(struct adapter *adap)
55975643
}
55985644

55995645
dev_info(adap->pdev_dev,
5600-
"%d MSI-X vectors allocated, nic %d per uld %d\n",
5601-
allocated, s->max_ethqsets, s->nqs_per_uld);
5646+
"%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
5647+
allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
56025648

56035649
kfree(entries);
56045650
return 0;

0 commit comments

Comments
 (0)