|
24 | 24 | #include <linux/device.h>
|
25 | 25 | #include <linux/init.h>
|
26 | 26 | #include <linux/cache.h>
|
| 27 | +#include <linux/dma-mapping.h> |
| 28 | +#include <linux/dmaengine.h> |
27 | 29 | #include <linux/mutex.h>
|
28 | 30 | #include <linux/of_device.h>
|
29 | 31 | #include <linux/of_irq.h>
|
@@ -580,6 +582,77 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
|
580 | 582 | spi->master->set_cs(spi, !enable);
|
581 | 583 | }
|
582 | 584 |
|
| 585 | +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) |
| 586 | +{ |
| 587 | + struct device *dev = master->dev.parent; |
| 588 | + struct device *tx_dev, *rx_dev; |
| 589 | + struct spi_transfer *xfer; |
| 590 | + |
| 591 | + if (msg->is_dma_mapped || !master->can_dma) |
| 592 | + return 0; |
| 593 | + |
| 594 | + tx_dev = &master->dma_tx->dev->device; |
| 595 | + rx_dev = &master->dma_rx->dev->device; |
| 596 | + |
| 597 | + list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
| 598 | + if (!master->can_dma(master, msg->spi, xfer)) |
| 599 | + continue; |
| 600 | + |
| 601 | + if (xfer->tx_buf != NULL) { |
| 602 | + xfer->tx_dma = dma_map_single(tx_dev, |
| 603 | + (void *)xfer->tx_buf, |
| 604 | + xfer->len, |
| 605 | + DMA_TO_DEVICE); |
| 606 | + if (dma_mapping_error(dev, xfer->tx_dma)) { |
| 607 | + dev_err(dev, "dma_map_single Tx failed\n"); |
| 608 | + return -ENOMEM; |
| 609 | + } |
| 610 | + } |
| 611 | + |
| 612 | + if (xfer->rx_buf != NULL) { |
| 613 | + xfer->rx_dma = dma_map_single(rx_dev, |
| 614 | + xfer->rx_buf, xfer->len, |
| 615 | + DMA_FROM_DEVICE); |
| 616 | + if (dma_mapping_error(dev, xfer->rx_dma)) { |
| 617 | + dev_err(dev, "dma_map_single Rx failed\n"); |
| 618 | + dma_unmap_single(tx_dev, xfer->tx_dma, |
| 619 | + xfer->len, DMA_TO_DEVICE); |
| 620 | + return -ENOMEM; |
| 621 | + } |
| 622 | + } |
| 623 | + } |
| 624 | + |
| 625 | + master->cur_msg_mapped = true; |
| 626 | + |
| 627 | + return 0; |
| 628 | +} |
| 629 | + |
| 630 | +static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) |
| 631 | +{ |
| 632 | + struct spi_transfer *xfer; |
| 633 | + struct device *tx_dev, *rx_dev; |
| 634 | + |
| 635 | + if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) |
| 636 | + return 0; |
| 637 | + |
| 638 | + tx_dev = &master->dma_tx->dev->device; |
| 639 | + rx_dev = &master->dma_rx->dev->device; |
| 640 | + |
| 641 | + list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
| 642 | + if (!master->can_dma(master, msg->spi, xfer)) |
| 643 | + continue; |
| 644 | + |
| 645 | + if (xfer->rx_buf) |
| 646 | + dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, |
| 647 | + DMA_FROM_DEVICE); |
| 648 | + if (xfer->tx_buf) |
| 649 | + dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, |
| 650 | + DMA_TO_DEVICE); |
| 651 | + } |
| 652 | + |
| 653 | + return 0; |
| 654 | +} |
| 655 | + |
583 | 656 | /*
|
584 | 657 | * spi_transfer_one_message - Default implementation of transfer_one_message()
|
585 | 658 | *
|
@@ -752,6 +825,13 @@ static void spi_pump_messages(struct kthread_work *work)
|
752 | 825 | master->cur_msg_prepared = true;
|
753 | 826 | }
|
754 | 827 |
|
| 828 | + ret = spi_map_msg(master, master->cur_msg); |
| 829 | + if (ret) { |
| 830 | + master->cur_msg->status = ret; |
| 831 | + spi_finalize_current_message(master); |
| 832 | + return; |
| 833 | + } |
| 834 | + |
755 | 835 | ret = master->transfer_one_message(master, master->cur_msg);
|
756 | 836 | if (ret) {
|
757 | 837 | dev_err(&master->dev,
|
@@ -841,6 +921,8 @@ void spi_finalize_current_message(struct spi_master *master)
|
841 | 921 | queue_kthread_work(&master->kworker, &master->pump_messages);
|
842 | 922 | spin_unlock_irqrestore(&master->queue_lock, flags);
|
843 | 923 |
|
| 924 | + spi_unmap_msg(master, mesg); |
| 925 | + |
844 | 926 | if (master->cur_msg_prepared && master->unprepare_message) {
|
845 | 927 | ret = master->unprepare_message(master, mesg);
|
846 | 928 | if (ret) {
|
|
0 commit comments