Skip to content

Commit 472e146

Browse files
jsmart-ghmartinkpetersen
authored andcommitted
scsi: lpfc: Correct upcalling nvmet_fc transport during io done downcall
When the transport calls into the lpfc target to release an IO job structure, which corresponds to an exchange, and if the driver was waiting for an exchange in order to post a previously received command to the transport, the driver immediately takes the IO job and reuses the context for the prior command and calls nvmet_fc_rcv_fcp_req() to tell the transport about a newly received command. Problem is, the execution of the IO job release may be in the context of the back end driver and its bio completion handlers, thus it may be in a irq context and protection code kicks in in the bio and request layers that are subsequently called. Rework lpfc so that instead of immediately upcalling, queue it to a deferred work thread and have the thread make the upcall. Took advantage of this change to remove duplicated code with the normal command receive path that preps the IO job and upcalls nvmet_fc. Created a common routine both paths use. Also corrected some errors that were found during review of the context freeing and reuse - basically unlocked operations and a somewhat disjoint set of calls to release associated job elements. Cleaned up this path and added locks for coherency. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent f6e8479 commit 472e146

File tree

3 files changed

+137
-112
lines changed

3 files changed

+137
-112
lines changed

drivers/scsi/lpfc/lpfc.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ struct lpfc_nvmet_ctxbuf {
144144
struct lpfc_nvmet_rcv_ctx *context;
145145
struct lpfc_iocbq *iocbq;
146146
struct lpfc_sglq *sglq;
147+
struct work_struct defer_work;
147148
};
148149

149150
struct lpfc_dma_pool {

drivers/scsi/lpfc/lpfc_nvmet.c

Lines changed: 135 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,9 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
7373
uint32_t, uint16_t);
7474
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
7575
struct lpfc_nvmet_rcv_ctx *);
76+
static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77+
78+
static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
7679

7780
static union lpfc_wqe128 lpfc_tsend_cmd_template;
7881
static union lpfc_wqe128 lpfc_treceive_cmd_template;
@@ -220,21 +223,19 @@ lpfc_nvmet_cmd_template(void)
220223
void
221224
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
222225
{
223-
unsigned long iflag;
226+
lockdep_assert_held(&ctxp->ctxlock);
224227

225228
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226229
"6313 NVMET Defer ctx release xri x%x flg x%x\n",
227230
ctxp->oxid, ctxp->flag);
228231

229-
spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
230-
if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231-
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
232-
iflag);
232+
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
233233
return;
234-
}
234+
235235
ctxp->flag |= LPFC_NVMET_CTX_RLS;
236+
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
236237
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237-
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
238+
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
238239
}
239240

240241
/**
@@ -325,7 +326,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
325326
struct rqb_dmabuf *nvmebuf;
326327
struct lpfc_nvmet_ctx_info *infop;
327328
uint32_t *payload;
328-
uint32_t size, oxid, sid, rc;
329+
uint32_t size, oxid, sid;
329330
int cpu;
330331
unsigned long iflag;
331332

@@ -341,6 +342,20 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
341342
"6411 NVMET free, already free IO x%x: %d %d\n",
342343
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343344
}
345+
346+
if (ctxp->rqb_buffer) {
347+
nvmebuf = ctxp->rqb_buffer;
348+
spin_lock_irqsave(&ctxp->ctxlock, iflag);
349+
ctxp->rqb_buffer = NULL;
350+
if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
351+
ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
352+
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
353+
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
354+
} else {
355+
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
356+
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
357+
}
358+
}
344359
ctxp->state = LPFC_NVMET_STE_FREE;
345360

346361
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -388,46 +403,30 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
388403
}
389404
#endif
390405
atomic_inc(&tgtp->rcv_fcp_cmd_in);
391-
/*
392-
* The calling sequence should be:
393-
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394-
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395-
* When we return from nvmet_fc_rcv_fcp_req, all relevant info
396-
* the NVME command / FC header is stored.
397-
* A buffer has already been reposted for this IO, so just free
398-
* the nvmebuf.
399-
*/
400-
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401-
payload, size);
402406

403-
/* Process FCP command */
404-
if (rc == 0) {
405-
ctxp->rqb_buffer = NULL;
406-
atomic_inc(&tgtp->rcv_fcp_cmd_out);
407-
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
408-
return;
409-
}
407+
/* flag new work queued, replacement buffer has already
408+
* been reposted
409+
*/
410+
spin_lock_irqsave(&ctxp->ctxlock, iflag);
411+
ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
412+
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
410413

411-
/* Processing of FCP command is deferred */
412-
if (rc == -EOVERFLOW) {
413-
lpfc_nvmeio_data(phba,
414-
"NVMET RCV BUSY: xri x%x sz %d "
415-
"from %06x\n",
416-
oxid, size, sid);
417-
atomic_inc(&tgtp->rcv_fcp_cmd_out);
418-
return;
414+
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
415+
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
416+
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
417+
"6181 Unable to queue deferred work "
418+
"for oxid x%x. "
419+
"FCP Drop IO [x%x x%x x%x]\n",
420+
ctxp->oxid,
421+
atomic_read(&tgtp->rcv_fcp_cmd_in),
422+
atomic_read(&tgtp->rcv_fcp_cmd_out),
423+
atomic_read(&tgtp->xmt_fcp_release));
424+
425+
spin_lock_irqsave(&ctxp->ctxlock, iflag);
426+
lpfc_nvmet_defer_release(phba, ctxp);
427+
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
428+
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
419429
}
420-
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421-
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422-
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
423-
ctxp->oxid, rc,
424-
atomic_read(&tgtp->rcv_fcp_cmd_in),
425-
atomic_read(&tgtp->rcv_fcp_cmd_out),
426-
atomic_read(&tgtp->xmt_fcp_release));
427-
428-
lpfc_nvmet_defer_release(phba, ctxp);
429-
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430-
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
431430
return;
432431
}
433432
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -1113,6 +1112,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
11131112
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
11141113
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
11151114
struct lpfc_hba *phba = ctxp->phba;
1115+
unsigned long iflag;
1116+
11161117

11171118
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
11181119
ctxp->oxid, ctxp->size, smp_processor_id());
@@ -1131,6 +1132,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
11311132

11321133
/* Free the nvmebuf since a new buffer already replaced it */
11331134
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1135+
spin_lock_irqsave(&ctxp->ctxlock, iflag);
1136+
ctxp->rqb_buffer = NULL;
1137+
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
11341138
}
11351139

11361140
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1323,6 +1327,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
13231327
"6407 Ran out of NVMET XRIs\n");
13241328
return -ENOMEM;
13251329
}
1330+
INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
13261331

13271332
/*
13281333
* Add ctx to MRQidx context list. Our initial assumption
@@ -1824,6 +1829,86 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
18241829
#endif
18251830
}
18261831

1832+
static void
1833+
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
1834+
{
1835+
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1836+
struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
1837+
struct lpfc_hba *phba = ctxp->phba;
1838+
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1839+
struct lpfc_nvmet_tgtport *tgtp;
1840+
uint32_t *payload;
1841+
uint32_t rc;
1842+
unsigned long iflags;
1843+
1844+
if (!nvmebuf) {
1845+
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1846+
"6159 process_rcv_fcp_req, nvmebuf is NULL, "
1847+
"oxid: x%x flg: x%x state: x%x\n",
1848+
ctxp->oxid, ctxp->flag, ctxp->state);
1849+
spin_lock_irqsave(&ctxp->ctxlock, iflags);
1850+
lpfc_nvmet_defer_release(phba, ctxp);
1851+
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1852+
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1853+
ctxp->oxid);
1854+
return;
1855+
}
1856+
1857+
payload = (uint32_t *)(nvmebuf->dbuf.virt);
1858+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1859+
/*
1860+
* The calling sequence should be:
1861+
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1862+
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1863+
* When we return from nvmet_fc_rcv_fcp_req, all relevant info
1864+
* the NVME command / FC header is stored.
1865+
* A buffer has already been reposted for this IO, so just free
1866+
* the nvmebuf.
1867+
*/
1868+
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1869+
payload, ctxp->size);
1870+
/* Process FCP command */
1871+
if (rc == 0) {
1872+
atomic_inc(&tgtp->rcv_fcp_cmd_out);
1873+
return;
1874+
}
1875+
1876+
/* Processing of FCP command is deferred */
1877+
if (rc == -EOVERFLOW) {
1878+
lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
1879+
"from %06x\n",
1880+
ctxp->oxid, ctxp->size, ctxp->sid);
1881+
atomic_inc(&tgtp->rcv_fcp_cmd_out);
1882+
atomic_inc(&tgtp->defer_fod);
1883+
return;
1884+
}
1885+
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1886+
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1887+
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1888+
ctxp->oxid, rc,
1889+
atomic_read(&tgtp->rcv_fcp_cmd_in),
1890+
atomic_read(&tgtp->rcv_fcp_cmd_out),
1891+
atomic_read(&tgtp->xmt_fcp_release));
1892+
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1893+
ctxp->oxid, ctxp->size, ctxp->sid);
1894+
spin_lock_irqsave(&ctxp->ctxlock, iflags);
1895+
lpfc_nvmet_defer_release(phba, ctxp);
1896+
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1897+
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
1898+
#endif
1899+
}
1900+
1901+
static void
1902+
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
1903+
{
1904+
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1905+
struct lpfc_nvmet_ctxbuf *ctx_buf =
1906+
container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
1907+
1908+
lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
1909+
#endif
1910+
}
1911+
18271912
static struct lpfc_nvmet_ctxbuf *
18281913
lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
18291914
struct lpfc_nvmet_ctx_info *current_infop)
@@ -1906,7 +1991,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
19061991
struct lpfc_nvmet_ctxbuf *ctx_buf;
19071992
struct lpfc_nvmet_ctx_info *current_infop;
19081993
uint32_t *payload;
1909-
uint32_t size, oxid, sid, rc, qno;
1994+
uint32_t size, oxid, sid, qno;
19101995
unsigned long iflag;
19111996
int current_cpu;
19121997

@@ -1917,11 +2002,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
19172002
if (!nvmebuf || !phba->targetport) {
19182003
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
19192004
"6157 NVMET FCP Drop IO\n");
1920-
oxid = 0;
1921-
size = 0;
1922-
sid = 0;
1923-
ctxp = NULL;
1924-
goto dropit;
2005+
if (nvmebuf)
2006+
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2007+
return;
19252008
}
19262009

19272010
/*
@@ -2028,67 +2111,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
20282111
#endif
20292112

20302113
atomic_inc(&tgtp->rcv_fcp_cmd_in);
2031-
/*
2032-
* The calling sequence should be:
2033-
* nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2034-
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2035-
* When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2036-
* the NVME command / FC header is stored, so we are free to repost
2037-
* the buffer.
2038-
*/
2039-
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2040-
payload, size);
2041-
2042-
/* Process FCP command */
2043-
if (rc == 0) {
2044-
ctxp->rqb_buffer = NULL;
2045-
atomic_inc(&tgtp->rcv_fcp_cmd_out);
2046-
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2047-
return;
2048-
}
2049-
2050-
/* Processing of FCP command is deferred */
2051-
if (rc == -EOVERFLOW) {
2052-
/*
2053-
* Post a brand new DMA buffer to RQ and defer
2054-
* freeing rcv buffer till .defer_rcv callback
2055-
*/
2056-
qno = nvmebuf->idx;
2057-
lpfc_post_rq_buffer(
2058-
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2059-
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2060-
2061-
lpfc_nvmeio_data(phba,
2062-
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2063-
oxid, size, sid);
2064-
atomic_inc(&tgtp->rcv_fcp_cmd_out);
2065-
atomic_inc(&tgtp->defer_fod);
2066-
return;
2067-
}
2068-
ctxp->rqb_buffer = nvmebuf;
2069-
2070-
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2071-
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2072-
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2073-
ctxp->oxid, rc,
2074-
atomic_read(&tgtp->rcv_fcp_cmd_in),
2075-
atomic_read(&tgtp->rcv_fcp_cmd_out),
2076-
atomic_read(&tgtp->xmt_fcp_release));
2077-
dropit:
2078-
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2079-
oxid, size, sid);
2080-
if (oxid) {
2081-
lpfc_nvmet_defer_release(phba, ctxp);
2082-
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2083-
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2084-
return;
2085-
}
2086-
2087-
if (ctx_buf)
2088-
lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2089-
2090-
if (nvmebuf)
2091-
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2114+
lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
20922115
}
20932116

20942117
/**

drivers/scsi/lpfc/lpfc_nvmet.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ struct lpfc_nvmet_rcv_ctx {
137137
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
138138
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
139139
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
140+
#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
140141
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
141142
struct rqb_dmabuf *rqb_buffer;
142143
struct lpfc_nvmet_ctxbuf *ctxbuf;

0 commit comments

Comments
 (0)