Skip to content

Commit 062c496

Browse files
Keith Buschgregkh
authored andcommitted
nvme-pci: Remap CMB SQ entries on every controller reset
commit 815c670 upstream. The controller memory buffer is remapped into a kernel address on each reset, but the driver was setting the submission queue base address only on the very first queue creation. The remapped address is likely to change after a reset, so accessing the old address will hit a kernel bug. This patch fixes that by setting the queue's CMB base address each time the queue is created. Fixes: f63572d ("nvme: unmap CMB and remove sysfs file in reset path") Reported-by: Christian Black <christian.d.black@intel.com> Cc: Jon Derrick <jonathan.derrick@intel.com> Cc: <stable@vger.kernel.org> # 4.9+ Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Scott Bauer <scott.bauer@intel.com> Reviewed-by: Jon Derrick <jonathan.derrick@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 70c89bc commit 062c496

File tree

1 file changed

+16
-11
lines changed

1 file changed

+16
-11
lines changed

drivers/nvme/host/pci.c

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,17 +1034,15 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
10341034
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
10351035
int qid, int depth)
10361036
{
1037-
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1038-
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1039-
dev->ctrl.page_size);
1040-
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1041-
nvmeq->sq_cmds_io = dev->cmb + offset;
1042-
} else {
1043-
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1044-
&nvmeq->sq_dma_addr, GFP_KERNEL);
1045-
if (!nvmeq->sq_cmds)
1046-
return -ENOMEM;
1047-
}
1037+
1038+
/* CMB SQEs will be mapped before creation */
1039+
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz))
1040+
return 0;
1041+
1042+
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1043+
&nvmeq->sq_dma_addr, GFP_KERNEL);
1044+
if (!nvmeq->sq_cmds)
1045+
return -ENOMEM;
10481046

10491047
return 0;
10501048
}
@@ -1117,6 +1115,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
11171115
struct nvme_dev *dev = nvmeq->dev;
11181116
int result;
11191117

1118+
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1119+
unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
1120+
dev->ctrl.page_size);
1121+
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1122+
nvmeq->sq_cmds_io = dev->cmb + offset;
1123+
}
1124+
11201125
nvmeq->cq_vector = qid - 1;
11211126
result = adapter_alloc_cq(dev, qid, nvmeq);
11221127
if (result < 0)

0 commit comments

Comments
 (0)