Skip to content

Commit 99adef3

Browse files
committed
spi: Provide core support for DMA mapping transfers
The process of DMA mapping buffers for SPI transfers does not vary between devices so in order to save duplication of code in drivers this can be factored out into the core, allowing it to be integrated with the work that is being done on factoring out the common elements from the data path including more sharing of dmaengine code. In order to use this masters need to provide a can_dma() operation and while the hardware is prepared they should ensure that DMA channels are provided in tx_dma and rx_dma. The core will then ensure that the buffers are mapped for DMA prior to calling transfer_one_message(). Currently the cleanup on error is not complete, this needs to be improved. Signed-off-by: Mark Brown <broonie@linaro.org>
1 parent 38dbfb5 commit 99adef3

File tree

2 files changed

+100
-0
lines changed

2 files changed

+100
-0
lines changed

drivers/spi/spi.c

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
#include <linux/device.h>
2525
#include <linux/init.h>
2626
#include <linux/cache.h>
27+
#include <linux/dma-mapping.h>
28+
#include <linux/dmaengine.h>
2729
#include <linux/mutex.h>
2830
#include <linux/of_device.h>
2931
#include <linux/of_irq.h>
@@ -580,6 +582,77 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580582
spi->master->set_cs(spi, !enable);
581583
}
582584

585+
static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
586+
{
587+
struct device *dev = master->dev.parent;
588+
struct device *tx_dev, *rx_dev;
589+
struct spi_transfer *xfer;
590+
591+
if (msg->is_dma_mapped || !master->can_dma)
592+
return 0;
593+
594+
tx_dev = &master->dma_tx->dev->device;
595+
rx_dev = &master->dma_rx->dev->device;
596+
597+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
598+
if (!master->can_dma(master, msg->spi, xfer))
599+
continue;
600+
601+
if (xfer->tx_buf != NULL) {
602+
xfer->tx_dma = dma_map_single(tx_dev,
603+
(void *)xfer->tx_buf,
604+
xfer->len,
605+
DMA_TO_DEVICE);
606+
if (dma_mapping_error(dev, xfer->tx_dma)) {
607+
dev_err(dev, "dma_map_single Tx failed\n");
608+
return -ENOMEM;
609+
}
610+
}
611+
612+
if (xfer->rx_buf != NULL) {
613+
xfer->rx_dma = dma_map_single(rx_dev,
614+
xfer->rx_buf, xfer->len,
615+
DMA_FROM_DEVICE);
616+
if (dma_mapping_error(dev, xfer->rx_dma)) {
617+
dev_err(dev, "dma_map_single Rx failed\n");
618+
dma_unmap_single(tx_dev, xfer->tx_dma,
619+
xfer->len, DMA_TO_DEVICE);
620+
return -ENOMEM;
621+
}
622+
}
623+
}
624+
625+
master->cur_msg_mapped = true;
626+
627+
return 0;
628+
}
629+
630+
static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
631+
{
632+
struct spi_transfer *xfer;
633+
struct device *tx_dev, *rx_dev;
634+
635+
if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma)
636+
return 0;
637+
638+
tx_dev = &master->dma_tx->dev->device;
639+
rx_dev = &master->dma_rx->dev->device;
640+
641+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
642+
if (!master->can_dma(master, msg->spi, xfer))
643+
continue;
644+
645+
if (xfer->rx_buf)
646+
dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len,
647+
DMA_FROM_DEVICE);
648+
if (xfer->tx_buf)
649+
dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
650+
DMA_TO_DEVICE);
651+
}
652+
653+
return 0;
654+
}
655+
583656
/*
584657
* spi_transfer_one_message - Default implementation of transfer_one_message()
585658
*
@@ -752,6 +825,13 @@ static void spi_pump_messages(struct kthread_work *work)
752825
master->cur_msg_prepared = true;
753826
}
754827

828+
ret = spi_map_msg(master, master->cur_msg);
829+
if (ret) {
830+
master->cur_msg->status = ret;
831+
spi_finalize_current_message(master);
832+
return;
833+
}
834+
755835
ret = master->transfer_one_message(master, master->cur_msg);
756836
if (ret) {
757837
dev_err(&master->dev,
@@ -841,6 +921,8 @@ void spi_finalize_current_message(struct spi_master *master)
841921
queue_kthread_work(&master->kworker, &master->pump_messages);
842922
spin_unlock_irqrestore(&master->queue_lock, flags);
843923

924+
spi_unmap_msg(master, mesg);
925+
844926
if (master->cur_msg_prepared && master->unprepare_message) {
845927
ret = master->unprepare_message(master, mesg);
846928
if (ret) {

include/linux/spi/spi.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
#include <linux/kthread.h>
2626
#include <linux/completion.h>
2727

28+
struct dma_chan;
29+
2830
/*
2931
* INTERFACES between SPI master-side drivers and SPI infrastructure.
3032
* (There's no SPI slave support for Linux yet...)
@@ -386,6 +388,17 @@ struct spi_master {
386388
/* called on release() to free memory provided by spi_master */
387389
void (*cleanup)(struct spi_device *spi);
388390

391+
/*
392+
* Used to enable core support for DMA handling, if can_dma()
393+
* exists and returns true then the transfer will be mapped
394+
* prior to transfer_one() being called. The driver should
395+
* not modify or store xfer and dma_tx and dma_rx must be set
396+
* while the device is prepared.
397+
*/
398+
bool (*can_dma)(struct spi_master *master,
399+
struct spi_device *spi,
400+
struct spi_transfer *xfer);
401+
389402
/*
390403
* These hooks are for drivers that want to use the generic
391404
* master transfer queueing mechanism. If these are used, the
@@ -404,6 +417,7 @@ struct spi_master {
404417
bool rt;
405418
bool auto_runtime_pm;
406419
bool cur_msg_prepared;
420+
bool cur_msg_mapped;
407421
struct completion xfer_completion;
408422

409423
int (*prepare_transfer_hardware)(struct spi_master *master);
@@ -425,6 +439,10 @@ struct spi_master {
425439

426440
/* gpio chip select */
427441
int *cs_gpios;
442+
443+
/* DMA channels for use with core dmaengine helpers */
444+
struct dma_chan *dma_tx;
445+
struct dma_chan *dma_rx;
428446
};
429447

430448
static inline void *spi_master_get_devdata(struct spi_master *master)

0 commit comments

Comments
 (0)