|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved |
| 4 | + * Author: Ludovic.barre@st.com for STMicroelectronics. |
| 5 | + */ |
| 6 | +#include <linux/delay.h> |
| 7 | +#include <linux/dma-mapping.h> |
| 8 | +#include <linux/mmc/host.h> |
| 9 | +#include <linux/mmc/card.h> |
| 10 | +#include <linux/reset.h> |
| 11 | +#include <linux/scatterlist.h> |
| 12 | +#include "mmci.h" |
| 13 | + |
| 14 | +#define SDMMC_LLI_BUF_LEN PAGE_SIZE |
| 15 | +#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT) |
| 16 | + |
| 17 | +struct sdmmc_lli_desc { |
| 18 | + u32 idmalar; |
| 19 | + u32 idmabase; |
| 20 | + u32 idmasize; |
| 21 | +}; |
| 22 | + |
| 23 | +struct sdmmc_priv { |
| 24 | + dma_addr_t sg_dma; |
| 25 | + void *sg_cpu; |
| 26 | +}; |
| 27 | + |
| 28 | +int sdmmc_idma_validate_data(struct mmci_host *host, |
| 29 | + struct mmc_data *data) |
| 30 | +{ |
| 31 | + struct scatterlist *sg; |
| 32 | + int i; |
| 33 | + |
| 34 | + /* |
| 35 | + * idma has constraints on idmabase & idmasize for each element |
| 36 | + * excepted the last element which has no constraint on idmasize |
| 37 | + */ |
| 38 | + for_each_sg(data->sg, sg, data->sg_len - 1, i) { |
| 39 | + if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) || |
| 40 | + !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) { |
| 41 | + dev_err(mmc_dev(host->mmc), |
| 42 | + "unaligned scatterlist: ofst:%x length:%d\n", |
| 43 | + data->sg->offset, data->sg->length); |
| 44 | + return -EINVAL; |
| 45 | + } |
| 46 | + } |
| 47 | + |
| 48 | + if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) { |
| 49 | + dev_err(mmc_dev(host->mmc), |
| 50 | + "unaligned last scatterlist: ofst:%x length:%d\n", |
| 51 | + data->sg->offset, data->sg->length); |
| 52 | + return -EINVAL; |
| 53 | + } |
| 54 | + |
| 55 | + return 0; |
| 56 | +} |
| 57 | + |
| 58 | +static int _sdmmc_idma_prep_data(struct mmci_host *host, |
| 59 | + struct mmc_data *data) |
| 60 | +{ |
| 61 | + int n_elem; |
| 62 | + |
| 63 | + n_elem = dma_map_sg(mmc_dev(host->mmc), |
| 64 | + data->sg, |
| 65 | + data->sg_len, |
| 66 | + mmc_get_dma_dir(data)); |
| 67 | + |
| 68 | + if (!n_elem) { |
| 69 | + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); |
| 70 | + return -EINVAL; |
| 71 | + } |
| 72 | + |
| 73 | + return 0; |
| 74 | +} |
| 75 | + |
| 76 | +static int sdmmc_idma_prep_data(struct mmci_host *host, |
| 77 | + struct mmc_data *data, bool next) |
| 78 | +{ |
| 79 | + /* Check if job is already prepared. */ |
| 80 | + if (!next && data->host_cookie == host->next_cookie) |
| 81 | + return 0; |
| 82 | + |
| 83 | + return _sdmmc_idma_prep_data(host, data); |
| 84 | +} |
| 85 | + |
| 86 | +static void sdmmc_idma_unprep_data(struct mmci_host *host, |
| 87 | + struct mmc_data *data, int err) |
| 88 | +{ |
| 89 | + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
| 90 | + mmc_get_dma_dir(data)); |
| 91 | +} |
| 92 | + |
| 93 | +static int sdmmc_idma_setup(struct mmci_host *host) |
| 94 | +{ |
| 95 | + struct sdmmc_priv *idma; |
| 96 | + |
| 97 | + idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL); |
| 98 | + if (!idma) |
| 99 | + return -ENOMEM; |
| 100 | + |
| 101 | + host->dma_priv = idma; |
| 102 | + |
| 103 | + if (host->variant->dma_lli) { |
| 104 | + idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc), |
| 105 | + SDMMC_LLI_BUF_LEN, |
| 106 | + &idma->sg_dma, GFP_KERNEL); |
| 107 | + if (!idma->sg_cpu) { |
| 108 | + dev_err(mmc_dev(host->mmc), |
| 109 | + "Failed to alloc IDMA descriptor\n"); |
| 110 | + return -ENOMEM; |
| 111 | + } |
| 112 | + host->mmc->max_segs = SDMMC_LLI_BUF_LEN / |
| 113 | + sizeof(struct sdmmc_lli_desc); |
| 114 | + host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; |
| 115 | + } else { |
| 116 | + host->mmc->max_segs = 1; |
| 117 | + host->mmc->max_seg_size = host->mmc->max_req_size; |
| 118 | + } |
| 119 | + |
| 120 | + return 0; |
| 121 | +} |
| 122 | + |
| 123 | +static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) |
| 124 | + |
| 125 | +{ |
| 126 | + struct sdmmc_priv *idma = host->dma_priv; |
| 127 | + struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; |
| 128 | + struct mmc_data *data = host->data; |
| 129 | + struct scatterlist *sg; |
| 130 | + int i; |
| 131 | + |
| 132 | + if (!host->variant->dma_lli || data->sg_len == 1) { |
| 133 | + writel_relaxed(sg_dma_address(data->sg), |
| 134 | + host->base + MMCI_STM32_IDMABASE0R); |
| 135 | + writel_relaxed(MMCI_STM32_IDMAEN, |
| 136 | + host->base + MMCI_STM32_IDMACTRLR); |
| 137 | + return 0; |
| 138 | + } |
| 139 | + |
| 140 | + for_each_sg(data->sg, sg, data->sg_len, i) { |
| 141 | + desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc); |
| 142 | + desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS |
| 143 | + | MMCI_STM32_ABR; |
| 144 | + desc[i].idmabase = sg_dma_address(sg); |
| 145 | + desc[i].idmasize = sg_dma_len(sg); |
| 146 | + } |
| 147 | + |
| 148 | + /* notice the end of link list */ |
| 149 | + desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; |
| 150 | + |
| 151 | + dma_wmb(); |
| 152 | + writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); |
| 153 | + writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); |
| 154 | + writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); |
| 155 | + writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); |
| 156 | + writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN, |
| 157 | + host->base + MMCI_STM32_IDMACTRLR); |
| 158 | + |
| 159 | + return 0; |
| 160 | +} |
| 161 | + |
| 162 | +static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) |
| 163 | +{ |
| 164 | + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); |
| 165 | +} |
| 166 | + |
| 167 | +static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) |
| 168 | +{ |
| 169 | + unsigned int clk = 0, ddr = 0; |
| 170 | + |
| 171 | + if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || |
| 172 | + host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) |
| 173 | + ddr = MCI_STM32_CLK_DDR; |
| 174 | + |
| 175 | + /* |
| 176 | + * cclk = mclk / (2 * clkdiv) |
| 177 | + * clkdiv 0 => bypass |
| 178 | + * in ddr mode bypass is not possible |
| 179 | + */ |
| 180 | + if (desired) { |
| 181 | + if (desired >= host->mclk && !ddr) { |
| 182 | + host->cclk = host->mclk; |
| 183 | + } else { |
| 184 | + clk = DIV_ROUND_UP(host->mclk, 2 * desired); |
| 185 | + if (clk > MCI_STM32_CLK_CLKDIV_MSK) |
| 186 | + clk = MCI_STM32_CLK_CLKDIV_MSK; |
| 187 | + host->cclk = host->mclk / (2 * clk); |
| 188 | + } |
| 189 | + } else { |
| 190 | + /* |
| 191 | + * while power-on phase the clock can't be define to 0, |
| 192 | + * Only power-off and power-cyc deactivate the clock. |
| 193 | + * if desired clock is 0, set max divider |
| 194 | + */ |
| 195 | + clk = MCI_STM32_CLK_CLKDIV_MSK; |
| 196 | + host->cclk = host->mclk / (2 * clk); |
| 197 | + } |
| 198 | + |
| 199 | + /* Set actual clock for debug */ |
| 200 | + if (host->mmc->ios.power_mode == MMC_POWER_ON) |
| 201 | + host->mmc->actual_clock = host->cclk; |
| 202 | + else |
| 203 | + host->mmc->actual_clock = 0; |
| 204 | + |
| 205 | + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) |
| 206 | + clk |= MCI_STM32_CLK_WIDEBUS_4; |
| 207 | + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) |
| 208 | + clk |= MCI_STM32_CLK_WIDEBUS_8; |
| 209 | + |
| 210 | + clk |= MCI_STM32_CLK_HWFCEN; |
| 211 | + clk |= host->clk_reg_add; |
| 212 | + clk |= ddr; |
| 213 | + |
| 214 | + /* |
| 215 | + * SDMMC_FBCK is selected when an external Delay Block is needed |
| 216 | + * with SDR104. |
| 217 | + */ |
| 218 | + if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) { |
| 219 | + clk |= MCI_STM32_CLK_BUSSPEED; |
| 220 | + if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) { |
| 221 | + clk &= ~MCI_STM32_CLK_SEL_MSK; |
| 222 | + clk |= MCI_STM32_CLK_SELFBCK; |
| 223 | + } |
| 224 | + } |
| 225 | + |
| 226 | + mmci_write_clkreg(host, clk); |
| 227 | +} |
| 228 | + |
| 229 | +static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) |
| 230 | +{ |
| 231 | + struct mmc_ios ios = host->mmc->ios; |
| 232 | + |
| 233 | + pwr = host->pwr_reg_add; |
| 234 | + |
| 235 | + if (ios.power_mode == MMC_POWER_OFF) { |
| 236 | + /* Only a reset could power-off sdmmc */ |
| 237 | + reset_control_assert(host->rst); |
| 238 | + udelay(2); |
| 239 | + reset_control_deassert(host->rst); |
| 240 | + |
| 241 | + /* |
| 242 | + * Set the SDMMC in Power-cycle state. |
| 243 | + * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK |
| 244 | + * are driven low, to prevent the Card from being supplied |
| 245 | + * through the signal lines. |
| 246 | + */ |
| 247 | + mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); |
| 248 | + } else if (ios.power_mode == MMC_POWER_ON) { |
| 249 | + /* |
| 250 | + * After power-off (reset): the irq mask defined in probe |
| 251 | + * functionis lost |
| 252 | + * ault irq mask (probe) must be activated |
| 253 | + */ |
| 254 | + writel(MCI_IRQENABLE | host->variant->start_err, |
| 255 | + host->base + MMCIMASK0); |
| 256 | + |
| 257 | + /* |
| 258 | + * After a power-cycle state, we must set the SDMMC in |
| 259 | + * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are |
| 260 | + * driven high. Then we can set the SDMMC to Power-on state |
| 261 | + */ |
| 262 | + mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); |
| 263 | + mdelay(1); |
| 264 | + mmci_write_pwrreg(host, MCI_PWR_ON | pwr); |
| 265 | + } |
| 266 | +} |
| 267 | + |
| 268 | +static struct mmci_host_ops sdmmc_variant_ops = { |
| 269 | + .validate_data = sdmmc_idma_validate_data, |
| 270 | + .prep_data = sdmmc_idma_prep_data, |
| 271 | + .unprep_data = sdmmc_idma_unprep_data, |
| 272 | + .dma_setup = sdmmc_idma_setup, |
| 273 | + .dma_start = sdmmc_idma_start, |
| 274 | + .dma_finalize = sdmmc_idma_finalize, |
| 275 | + .set_clkreg = mmci_sdmmc_set_clkreg, |
| 276 | + .set_pwrreg = mmci_sdmmc_set_pwrreg, |
| 277 | +}; |
| 278 | + |
| 279 | +void sdmmc_variant_init(struct mmci_host *host) |
| 280 | +{ |
| 281 | + host->ops = &sdmmc_variant_ops; |
| 282 | +} |
0 commit comments