Version:  2.0.40 2.2.26 2.4.37 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18

Linux/drivers/spi/spi-sirf.c

  1 /*
  2  * SPI bus driver for CSR SiRFprimaII
  3  *
  4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5  *
  6  * Licensed under GPLv2 or later.
  7  */
  8 
  9 #include <linux/module.h>
 10 #include <linux/kernel.h>
 11 #include <linux/slab.h>
 12 #include <linux/clk.h>
 13 #include <linux/completion.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/io.h>
 16 #include <linux/of.h>
 17 #include <linux/bitops.h>
 18 #include <linux/err.h>
 19 #include <linux/platform_device.h>
 20 #include <linux/of_gpio.h>
 21 #include <linux/spi/spi.h>
 22 #include <linux/spi/spi_bitbang.h>
 23 #include <linux/dmaengine.h>
 24 #include <linux/dma-direction.h>
 25 #include <linux/dma-mapping.h>
 26 
 27 #define DRIVER_NAME "sirfsoc_spi"
 28 
 29 #define SIRFSOC_SPI_CTRL                0x0000
 30 #define SIRFSOC_SPI_CMD                 0x0004
 31 #define SIRFSOC_SPI_TX_RX_EN            0x0008
 32 #define SIRFSOC_SPI_INT_EN              0x000C
 33 #define SIRFSOC_SPI_INT_STATUS          0x0010
 34 #define SIRFSOC_SPI_TX_DMA_IO_CTRL      0x0100
 35 #define SIRFSOC_SPI_TX_DMA_IO_LEN       0x0104
 36 #define SIRFSOC_SPI_TXFIFO_CTRL         0x0108
 37 #define SIRFSOC_SPI_TXFIFO_LEVEL_CHK    0x010C
 38 #define SIRFSOC_SPI_TXFIFO_OP           0x0110
 39 #define SIRFSOC_SPI_TXFIFO_STATUS       0x0114
 40 #define SIRFSOC_SPI_TXFIFO_DATA         0x0118
 41 #define SIRFSOC_SPI_RX_DMA_IO_CTRL      0x0120
 42 #define SIRFSOC_SPI_RX_DMA_IO_LEN       0x0124
 43 #define SIRFSOC_SPI_RXFIFO_CTRL         0x0128
 44 #define SIRFSOC_SPI_RXFIFO_LEVEL_CHK    0x012C
 45 #define SIRFSOC_SPI_RXFIFO_OP           0x0130
 46 #define SIRFSOC_SPI_RXFIFO_STATUS       0x0134
 47 #define SIRFSOC_SPI_RXFIFO_DATA         0x0138
 48 #define SIRFSOC_SPI_DUMMY_DELAY_CTL     0x0144
 49 
 50 /* SPI CTRL register defines */
 51 #define SIRFSOC_SPI_SLV_MODE            BIT(16)
 52 #define SIRFSOC_SPI_CMD_MODE            BIT(17)
 53 #define SIRFSOC_SPI_CS_IO_OUT           BIT(18)
 54 #define SIRFSOC_SPI_CS_IO_MODE          BIT(19)
 55 #define SIRFSOC_SPI_CLK_IDLE_STAT       BIT(20)
 56 #define SIRFSOC_SPI_CS_IDLE_STAT        BIT(21)
 57 #define SIRFSOC_SPI_TRAN_MSB            BIT(22)
 58 #define SIRFSOC_SPI_DRV_POS_EDGE        BIT(23)
 59 #define SIRFSOC_SPI_CS_HOLD_TIME        BIT(24)
 60 #define SIRFSOC_SPI_CLK_SAMPLE_MODE     BIT(25)
 61 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8   (0 << 26)
 62 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12  (1 << 26)
 63 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16  (2 << 26)
 64 #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32  (3 << 26)
 65 #define SIRFSOC_SPI_CMD_BYTE_NUM(x)     ((x & 3) << 28)
 66 #define SIRFSOC_SPI_ENA_AUTO_CLR        BIT(30)
 67 #define SIRFSOC_SPI_MUL_DAT_MODE        BIT(31)
 68 
 69 /* Interrupt Enable */
 70 #define SIRFSOC_SPI_RX_DONE_INT_EN      BIT(0)
 71 #define SIRFSOC_SPI_TX_DONE_INT_EN      BIT(1)
 72 #define SIRFSOC_SPI_RX_OFLOW_INT_EN     BIT(2)
 73 #define SIRFSOC_SPI_TX_UFLOW_INT_EN     BIT(3)
 74 #define SIRFSOC_SPI_RX_IO_DMA_INT_EN    BIT(4)
 75 #define SIRFSOC_SPI_TX_IO_DMA_INT_EN    BIT(5)
 76 #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN  BIT(6)
 77 #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
 78 #define SIRFSOC_SPI_RXFIFO_THD_INT_EN   BIT(8)
 79 #define SIRFSOC_SPI_TXFIFO_THD_INT_EN   BIT(9)
 80 #define SIRFSOC_SPI_FRM_END_INT_EN      BIT(10)
 81 
 82 #define SIRFSOC_SPI_INT_MASK_ALL        0x1FFF
 83 
 84 /* Interrupt status */
 85 #define SIRFSOC_SPI_RX_DONE             BIT(0)
 86 #define SIRFSOC_SPI_TX_DONE             BIT(1)
 87 #define SIRFSOC_SPI_RX_OFLOW            BIT(2)
 88 #define SIRFSOC_SPI_TX_UFLOW            BIT(3)
 89 #define SIRFSOC_SPI_RX_IO_DMA           BIT(4)
 90 #define SIRFSOC_SPI_RX_FIFO_FULL        BIT(6)
 91 #define SIRFSOC_SPI_TXFIFO_EMPTY        BIT(7)
 92 #define SIRFSOC_SPI_RXFIFO_THD_REACH    BIT(8)
 93 #define SIRFSOC_SPI_TXFIFO_THD_REACH    BIT(9)
 94 #define SIRFSOC_SPI_FRM_END             BIT(10)
 95 
 96 /* TX RX enable */
 97 #define SIRFSOC_SPI_RX_EN               BIT(0)
 98 #define SIRFSOC_SPI_TX_EN               BIT(1)
 99 #define SIRFSOC_SPI_CMD_TX_EN           BIT(2)
100 
101 #define SIRFSOC_SPI_IO_MODE_SEL         BIT(0)
102 #define SIRFSOC_SPI_RX_DMA_FLUSH        BIT(2)
103 
104 /* FIFO OPs */
105 #define SIRFSOC_SPI_FIFO_RESET          BIT(0)
106 #define SIRFSOC_SPI_FIFO_START          BIT(1)
107 
108 /* FIFO CTRL */
109 #define SIRFSOC_SPI_FIFO_WIDTH_BYTE     (0 << 0)
110 #define SIRFSOC_SPI_FIFO_WIDTH_WORD     (1 << 0)
111 #define SIRFSOC_SPI_FIFO_WIDTH_DWORD    (2 << 0)
112 
113 /* FIFO Status */
114 #define SIRFSOC_SPI_FIFO_LEVEL_MASK     0xFF
115 #define SIRFSOC_SPI_FIFO_FULL           BIT(8)
116 #define SIRFSOC_SPI_FIFO_EMPTY          BIT(9)
117 
118 /* 256 bytes rx/tx FIFO */
119 #define SIRFSOC_SPI_FIFO_SIZE           256
120 #define SIRFSOC_SPI_DAT_FRM_LEN_MAX     (64 * 1024)
121 
122 #define SIRFSOC_SPI_FIFO_SC(x)          ((x) & 0x3F)
123 #define SIRFSOC_SPI_FIFO_LC(x)          (((x) & 0x3F) << 10)
124 #define SIRFSOC_SPI_FIFO_HC(x)          (((x) & 0x3F) << 20)
125 #define SIRFSOC_SPI_FIFO_THD(x)         (((x) & 0xFF) << 2)
126 
127 /*
128  * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
129  * due to the limitation of dma controller
130  */
131 
132 #define ALIGNED(x) (!((u32)x & 0x3))
133 #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
134         ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
135 
136 #define SIRFSOC_MAX_CMD_BYTES   4
137 
138 struct sirfsoc_spi {
139         struct spi_bitbang bitbang;
140         struct completion rx_done;
141         struct completion tx_done;
142 
143         void __iomem *base;
144         u32 ctrl_freq;  /* SPI controller clock speed */
145         struct clk *clk;
146 
147         /* rx & tx bufs from the spi_transfer */
148         const void *tx;
149         void *rx;
150 
151         /* place received word into rx buffer */
152         void (*rx_word) (struct sirfsoc_spi *);
153         /* get word from tx buffer for sending */
154         void (*tx_word) (struct sirfsoc_spi *);
155 
156         /* number of words left to be tranmitted/received */
157         unsigned int left_tx_word;
158         unsigned int left_rx_word;
159 
160         /* rx & tx DMA channels */
161         struct dma_chan *rx_chan;
162         struct dma_chan *tx_chan;
163         dma_addr_t src_start;
164         dma_addr_t dst_start;
165         void *dummypage;
166         int word_width; /* in bytes */
167 
168         /*
169          * if tx size is not more than 4 and rx size is NULL, use
170          * command model
171          */
172         bool    tx_by_cmd;
173         bool    hw_cs;
174 };
175 
176 static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
177 {
178         u32 data;
179         u8 *rx = sspi->rx;
180 
181         data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
182 
183         if (rx) {
184                 *rx++ = (u8) data;
185                 sspi->rx = rx;
186         }
187 
188         sspi->left_rx_word--;
189 }
190 
191 static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
192 {
193         u32 data = 0;
194         const u8 *tx = sspi->tx;
195 
196         if (tx) {
197                 data = *tx++;
198                 sspi->tx = tx;
199         }
200 
201         writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
202         sspi->left_tx_word--;
203 }
204 
205 static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
206 {
207         u32 data;
208         u16 *rx = sspi->rx;
209 
210         data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
211 
212         if (rx) {
213                 *rx++ = (u16) data;
214                 sspi->rx = rx;
215         }
216 
217         sspi->left_rx_word--;
218 }
219 
220 static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
221 {
222         u32 data = 0;
223         const u16 *tx = sspi->tx;
224 
225         if (tx) {
226                 data = *tx++;
227                 sspi->tx = tx;
228         }
229 
230         writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
231         sspi->left_tx_word--;
232 }
233 
234 static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
235 {
236         u32 data;
237         u32 *rx = sspi->rx;
238 
239         data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
240 
241         if (rx) {
242                 *rx++ = (u32) data;
243                 sspi->rx = rx;
244         }
245 
246         sspi->left_rx_word--;
247 
248 }
249 
250 static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
251 {
252         u32 data = 0;
253         const u32 *tx = sspi->tx;
254 
255         if (tx) {
256                 data = *tx++;
257                 sspi->tx = tx;
258         }
259 
260         writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
261         sspi->left_tx_word--;
262 }
263 
264 static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
265 {
266         struct sirfsoc_spi *sspi = dev_id;
267         u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
268         if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
269                 complete(&sspi->tx_done);
270                 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
271                 writel(SIRFSOC_SPI_INT_MASK_ALL,
272                                 sspi->base + SIRFSOC_SPI_INT_STATUS);
273                 return IRQ_HANDLED;
274         }
275 
276         /* Error Conditions */
277         if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
278                         spi_stat & SIRFSOC_SPI_TX_UFLOW) {
279                 complete(&sspi->tx_done);
280                 complete(&sspi->rx_done);
281                 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
282                 writel(SIRFSOC_SPI_INT_MASK_ALL,
283                                 sspi->base + SIRFSOC_SPI_INT_STATUS);
284                 return IRQ_HANDLED;
285         }
286         if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
287                 complete(&sspi->tx_done);
288         while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
289                 SIRFSOC_SPI_RX_IO_DMA))
290                 cpu_relax();
291         complete(&sspi->rx_done);
292         writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
293         writel(SIRFSOC_SPI_INT_MASK_ALL,
294                         sspi->base + SIRFSOC_SPI_INT_STATUS);
295 
296         return IRQ_HANDLED;
297 }
298 
299 static void spi_sirfsoc_dma_fini_callback(void *data)
300 {
301         struct completion *dma_complete = data;
302 
303         complete(dma_complete);
304 }
305 
306 static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
307         struct spi_transfer *t)
308 {
309         struct sirfsoc_spi *sspi;
310         int timeout = t->len * 10;
311         u32 cmd;
312 
313         sspi = spi_master_get_devdata(spi->master);
314         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
315         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
316         memcpy(&cmd, sspi->tx, t->len);
317         if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
318                 cmd = cpu_to_be32(cmd) >>
319                         ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
320         if (sspi->word_width == 2 && t->len == 4 &&
321                         (!(spi->mode & SPI_LSB_FIRST)))
322                 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
323         writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
324         writel(SIRFSOC_SPI_FRM_END_INT_EN,
325                 sspi->base + SIRFSOC_SPI_INT_EN);
326         writel(SIRFSOC_SPI_CMD_TX_EN,
327                 sspi->base + SIRFSOC_SPI_TX_RX_EN);
328         if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
329                 dev_err(&spi->dev, "cmd transfer timeout\n");
330                 return;
331         }
332         sspi->left_rx_word -= t->len;
333 }
334 
335 static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
336         struct spi_transfer *t)
337 {
338         struct sirfsoc_spi *sspi;
339         struct dma_async_tx_descriptor *rx_desc, *tx_desc;
340         int timeout = t->len * 10;
341 
342         sspi = spi_master_get_devdata(spi->master);
343         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
344         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
345         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
346         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
347         writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
348         writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
349         if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
350                 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
351                         SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
352                         sspi->base + SIRFSOC_SPI_CTRL);
353                 writel(sspi->left_tx_word - 1,
354                                 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
355                 writel(sspi->left_tx_word - 1,
356                                 sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
357         } else {
358                 writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
359                         sspi->base + SIRFSOC_SPI_CTRL);
360                 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
361                 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
362         }
363         sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
364                                         (t->tx_buf != t->rx_buf) ?
365                                         DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
366         rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
367                 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
368                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
369         rx_desc->callback = spi_sirfsoc_dma_fini_callback;
370         rx_desc->callback_param = &sspi->rx_done;
371 
372         sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
373                                         (t->tx_buf != t->rx_buf) ?
374                                         DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
375         tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
376                 sspi->src_start, t->len, DMA_MEM_TO_DEV,
377                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
378         tx_desc->callback = spi_sirfsoc_dma_fini_callback;
379         tx_desc->callback_param = &sspi->tx_done;
380 
381         dmaengine_submit(tx_desc);
382         dmaengine_submit(rx_desc);
383         dma_async_issue_pending(sspi->tx_chan);
384         dma_async_issue_pending(sspi->rx_chan);
385         writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
386                         sspi->base + SIRFSOC_SPI_TX_RX_EN);
387         if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
388                 dev_err(&spi->dev, "transfer timeout\n");
389                 dmaengine_terminate_all(sspi->rx_chan);
390         } else
391                 sspi->left_rx_word = 0;
392         /*
393          * we only wait tx-done event if transferring by DMA. for PIO,
394          * we get rx data by writing tx data, so if rx is done, tx has
395          * done earlier
396          */
397         if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
398                 dev_err(&spi->dev, "transfer timeout\n");
399                 dmaengine_terminate_all(sspi->tx_chan);
400         }
401         dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
402         dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
403         /* TX, RX FIFO stop */
404         writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
405         writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
406         if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
407                 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
408 }
409 
410 static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
411                 struct spi_transfer *t)
412 {
413         struct sirfsoc_spi *sspi;
414         int timeout = t->len * 10;
415 
416         sspi = spi_master_get_devdata(spi->master);
417         do {
418                 writel(SIRFSOC_SPI_FIFO_RESET,
419                         sspi->base + SIRFSOC_SPI_RXFIFO_OP);
420                 writel(SIRFSOC_SPI_FIFO_RESET,
421                         sspi->base + SIRFSOC_SPI_TXFIFO_OP);
422                 writel(SIRFSOC_SPI_FIFO_START,
423                         sspi->base + SIRFSOC_SPI_RXFIFO_OP);
424                 writel(SIRFSOC_SPI_FIFO_START,
425                         sspi->base + SIRFSOC_SPI_TXFIFO_OP);
426                 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
427                 writel(SIRFSOC_SPI_INT_MASK_ALL,
428                         sspi->base + SIRFSOC_SPI_INT_STATUS);
429                 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
430                         SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
431                         sspi->base + SIRFSOC_SPI_CTRL);
432                 writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
433                                 - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
434                 writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
435                                 - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
436                 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
437                         & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
438                         sspi->tx_word(sspi);
439                 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
440                         SIRFSOC_SPI_TX_UFLOW_INT_EN |
441                         SIRFSOC_SPI_RX_OFLOW_INT_EN |
442                         SIRFSOC_SPI_RX_IO_DMA_INT_EN,
443                         sspi->base + SIRFSOC_SPI_INT_EN);
444                 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
445                         sspi->base + SIRFSOC_SPI_TX_RX_EN);
446                 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
447                         !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
448                         dev_err(&spi->dev, "transfer timeout\n");
449                         break;
450                 }
451                 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
452                         & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
453                         sspi->rx_word(sspi);
454                 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
455                 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
456         } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
457 }
458 
459 static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
460 {
461         struct sirfsoc_spi *sspi;
462         sspi = spi_master_get_devdata(spi->master);
463 
464         sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
465         sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
466         sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
467         reinit_completion(&sspi->rx_done);
468         reinit_completion(&sspi->tx_done);
469         /*
470          * in the transfer, if transfer data using command register with rx_buf
471          * null, just fill command data into command register and wait for its
472          * completion.
473          */
474         if (sspi->tx_by_cmd)
475                 spi_sirfsoc_cmd_transfer(spi, t);
476         else if (IS_DMA_VALID(t))
477                 spi_sirfsoc_dma_transfer(spi, t);
478         else
479                 spi_sirfsoc_pio_transfer(spi, t);
480 
481         return t->len - sspi->left_rx_word * sspi->word_width;
482 }
483 
484 static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
485 {
486         struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
487 
488         if (sspi->hw_cs) {
489                 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
490                 switch (value) {
491                 case BITBANG_CS_ACTIVE:
492                         if (spi->mode & SPI_CS_HIGH)
493                                 regval |= SIRFSOC_SPI_CS_IO_OUT;
494                         else
495                                 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
496                         break;
497                 case BITBANG_CS_INACTIVE:
498                         if (spi->mode & SPI_CS_HIGH)
499                                 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
500                         else
501                                 regval |= SIRFSOC_SPI_CS_IO_OUT;
502                         break;
503                 }
504                 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
505         } else {
506                 switch (value) {
507                 case BITBANG_CS_ACTIVE:
508                         gpio_direction_output(spi->cs_gpio,
509                                         spi->mode & SPI_CS_HIGH ? 1 : 0);
510                         break;
511                 case BITBANG_CS_INACTIVE:
512                         gpio_direction_output(spi->cs_gpio,
513                                         spi->mode & SPI_CS_HIGH ? 0 : 1);
514                         break;
515                 }
516         }
517 }
518 
519 static int
520 spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
521 {
522         struct sirfsoc_spi *sspi;
523         u8 bits_per_word = 0;
524         int hz = 0;
525         u32 regval;
526         u32 txfifo_ctrl, rxfifo_ctrl;
527         u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
528 
529         sspi = spi_master_get_devdata(spi->master);
530 
531         bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
532         hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
533 
534         regval = (sspi->ctrl_freq / (2 * hz)) - 1;
535         if (regval > 0xFFFF || regval < 0) {
536                 dev_err(&spi->dev, "Speed %d not supported\n", hz);
537                 return -EINVAL;
538         }
539 
540         switch (bits_per_word) {
541         case 8:
542                 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
543                 sspi->rx_word = spi_sirfsoc_rx_word_u8;
544                 sspi->tx_word = spi_sirfsoc_tx_word_u8;
545                 break;
546         case 12:
547         case 16:
548                 regval |= (bits_per_word ==  12) ?
549                         SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
550                         SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
551                 sspi->rx_word = spi_sirfsoc_rx_word_u16;
552                 sspi->tx_word = spi_sirfsoc_tx_word_u16;
553                 break;
554         case 32:
555                 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
556                 sspi->rx_word = spi_sirfsoc_rx_word_u32;
557                 sspi->tx_word = spi_sirfsoc_tx_word_u32;
558                 break;
559         default:
560                 BUG();
561         }
562 
563         sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
564         txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
565                                            (sspi->word_width >> 1);
566         rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
567                                            (sspi->word_width >> 1);
568 
569         if (!(spi->mode & SPI_CS_HIGH))
570                 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
571         if (!(spi->mode & SPI_LSB_FIRST))
572                 regval |= SIRFSOC_SPI_TRAN_MSB;
573         if (spi->mode & SPI_CPOL)
574                 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
575 
576         /*
577          * Data should be driven at least 1/2 cycle before the fetch edge
578          * to make sure that data gets stable at the fetch edge.
579          */
580         if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
581             (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
582                 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
583         else
584                 regval |= SIRFSOC_SPI_DRV_POS_EDGE;
585 
586         writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
587                         SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
588                         SIRFSOC_SPI_FIFO_HC(2),
589                 sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
590         writel(SIRFSOC_SPI_FIFO_SC(2) |
591                         SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
592                         SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
593                 sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
594         writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
595         writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
596 
597         if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
598                 regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
599                                 SIRFSOC_SPI_CMD_MODE);
600                 sspi->tx_by_cmd = true;
601         } else {
602                 regval &= ~SIRFSOC_SPI_CMD_MODE;
603                 sspi->tx_by_cmd = false;
604         }
605         /*
606          * it should never set to hardware cs mode because in hardware cs mode,
607          * cs signal can't controlled by driver.
608          */
609         regval |= SIRFSOC_SPI_CS_IO_MODE;
610         writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
611 
612         if (IS_DMA_VALID(t)) {
613                 /* Enable DMA mode for RX, TX */
614                 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
615                 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
616                         sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
617         } else {
618                 /* Enable IO mode for RX, TX */
619                 writel(SIRFSOC_SPI_IO_MODE_SEL,
620                         sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
621                 writel(SIRFSOC_SPI_IO_MODE_SEL,
622                         sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
623         }
624 
625         return 0;
626 }
627 
628 static int spi_sirfsoc_setup(struct spi_device *spi)
629 {
630         struct sirfsoc_spi *sspi;
631 
632         if (!spi->max_speed_hz)
633                 return -EINVAL;
634 
635         sspi = spi_master_get_devdata(spi->master);
636 
637         if (spi->cs_gpio == -ENOENT)
638                 sspi->hw_cs = true;
639         else
640                 sspi->hw_cs = false;
641         return spi_sirfsoc_setup_transfer(spi, NULL);
642 }
643 
644 static int spi_sirfsoc_probe(struct platform_device *pdev)
645 {
646         struct sirfsoc_spi *sspi;
647         struct spi_master *master;
648         struct resource *mem_res;
649         int irq;
650         int i, ret;
651 
652         master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
653         if (!master) {
654                 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
655                 return -ENOMEM;
656         }
657         platform_set_drvdata(pdev, master);
658         sspi = spi_master_get_devdata(master);
659 
660         mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661         sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
662         if (IS_ERR(sspi->base)) {
663                 ret = PTR_ERR(sspi->base);
664                 goto free_master;
665         }
666 
667         irq = platform_get_irq(pdev, 0);
668         if (irq < 0) {
669                 ret = -ENXIO;
670                 goto free_master;
671         }
672         ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
673                                 DRIVER_NAME, sspi);
674         if (ret)
675                 goto free_master;
676 
677         sspi->bitbang.master = master;
678         sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
679         sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
680         sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
681         sspi->bitbang.master->setup = spi_sirfsoc_setup;
682         master->bus_num = pdev->id;
683         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
684         master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
685                                         SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
686         sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
687 
688         /* request DMA channels */
689         sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
690         if (!sspi->rx_chan) {
691                 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
692                 ret = -ENODEV;
693                 goto free_master;
694         }
695         sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
696         if (!sspi->tx_chan) {
697                 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
698                 ret = -ENODEV;
699                 goto free_rx_dma;
700         }
701 
702         sspi->clk = clk_get(&pdev->dev, NULL);
703         if (IS_ERR(sspi->clk)) {
704                 ret = PTR_ERR(sspi->clk);
705                 goto free_tx_dma;
706         }
707         clk_prepare_enable(sspi->clk);
708         sspi->ctrl_freq = clk_get_rate(sspi->clk);
709 
710         init_completion(&sspi->rx_done);
711         init_completion(&sspi->tx_done);
712 
713         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
714         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
715         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
716         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
717         /* We are not using dummy delay between command and data */
718         writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
719 
720         sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
721         if (!sspi->dummypage) {
722                 ret = -ENOMEM;
723                 goto free_clk;
724         }
725 
726         ret = spi_bitbang_start(&sspi->bitbang);
727         if (ret)
728                 goto free_dummypage;
729         for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
730                 if (master->cs_gpios[i] == -ENOENT)
731                         continue;
732                 if (!gpio_is_valid(master->cs_gpios[i])) {
733                         dev_err(&pdev->dev, "no valid gpio\n");
734                         ret = -EINVAL;
735                         goto free_dummypage;
736                 }
737                 ret = devm_gpio_request(&pdev->dev,
738                                 master->cs_gpios[i], DRIVER_NAME);
739                 if (ret) {
740                         dev_err(&pdev->dev, "failed to request gpio\n");
741                         goto free_dummypage;
742                 }
743         }
744         dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
745 
746         return 0;
747 free_dummypage:
748         kfree(sspi->dummypage);
749 free_clk:
750         clk_disable_unprepare(sspi->clk);
751         clk_put(sspi->clk);
752 free_tx_dma:
753         dma_release_channel(sspi->tx_chan);
754 free_rx_dma:
755         dma_release_channel(sspi->rx_chan);
756 free_master:
757         spi_master_put(master);
758 
759         return ret;
760 }
761 
762 static int  spi_sirfsoc_remove(struct platform_device *pdev)
763 {
764         struct spi_master *master;
765         struct sirfsoc_spi *sspi;
766 
767         master = platform_get_drvdata(pdev);
768         sspi = spi_master_get_devdata(master);
769 
770         spi_bitbang_stop(&sspi->bitbang);
771         kfree(sspi->dummypage);
772         clk_disable_unprepare(sspi->clk);
773         clk_put(sspi->clk);
774         dma_release_channel(sspi->rx_chan);
775         dma_release_channel(sspi->tx_chan);
776         spi_master_put(master);
777         return 0;
778 }
779 
780 #ifdef CONFIG_PM_SLEEP
781 static int spi_sirfsoc_suspend(struct device *dev)
782 {
783         struct spi_master *master = dev_get_drvdata(dev);
784         struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
785         int ret;
786 
787         ret = spi_master_suspend(master);
788         if (ret)
789                 return ret;
790 
791         clk_disable(sspi->clk);
792         return 0;
793 }
794 
795 static int spi_sirfsoc_resume(struct device *dev)
796 {
797         struct spi_master *master = dev_get_drvdata(dev);
798         struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
799 
800         clk_enable(sspi->clk);
801         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
802         writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
803         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
804         writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
805 
806         return spi_master_resume(master);
807 }
808 #endif
809 
810 static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
811                          spi_sirfsoc_resume);
812 
813 static const struct of_device_id spi_sirfsoc_of_match[] = {
814         { .compatible = "sirf,prima2-spi", },
815         { .compatible = "sirf,marco-spi", },
816         {}
817 };
818 MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
819 
820 static struct platform_driver spi_sirfsoc_driver = {
821         .driver = {
822                 .name = DRIVER_NAME,
823                 .owner = THIS_MODULE,
824                 .pm     = &spi_sirfsoc_pm_ops,
825                 .of_match_table = spi_sirfsoc_of_match,
826         },
827         .probe = spi_sirfsoc_probe,
828         .remove = spi_sirfsoc_remove,
829 };
830 module_platform_driver(spi_sirfsoc_driver);
831 MODULE_DESCRIPTION("SiRF SoC SPI master driver");
832 MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
833 MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
834 MODULE_LICENSE("GPL v2");
835 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us