Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/dma/sirf-dma.c

  1 /*
  2  * DMA controller driver for CSR SiRFprimaII
  3  *
  4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5  *
  6  * Licensed under GPLv2 or later.
  7  */
  8 
  9 #include <linux/module.h>
 10 #include <linux/dmaengine.h>
 11 #include <linux/dma-mapping.h>
 12 #include <linux/pm_runtime.h>
 13 #include <linux/interrupt.h>
 14 #include <linux/io.h>
 15 #include <linux/slab.h>
 16 #include <linux/of_irq.h>
 17 #include <linux/of_address.h>
 18 #include <linux/of_device.h>
 19 #include <linux/of_platform.h>
 20 #include <linux/clk.h>
 21 #include <linux/of_dma.h>
 22 #include <linux/sirfsoc_dma.h>
 23 
 24 #include "dmaengine.h"
 25 
 26 #define SIRFSOC_DMA_DESCRIPTORS                 16
 27 #define SIRFSOC_DMA_CHANNELS                    16
 28 
 29 #define SIRFSOC_DMA_CH_ADDR                     0x00
 30 #define SIRFSOC_DMA_CH_XLEN                     0x04
 31 #define SIRFSOC_DMA_CH_YLEN                     0x08
 32 #define SIRFSOC_DMA_CH_CTRL                     0x0C
 33 
 34 #define SIRFSOC_DMA_WIDTH_0                     0x100
 35 #define SIRFSOC_DMA_CH_VALID                    0x140
 36 #define SIRFSOC_DMA_CH_INT                      0x144
 37 #define SIRFSOC_DMA_INT_EN                      0x148
 38 #define SIRFSOC_DMA_INT_EN_CLR                  0x14C
 39 #define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
 40 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x15C
 41 
 42 #define SIRFSOC_DMA_MODE_CTRL_BIT               4
 43 #define SIRFSOC_DMA_DIR_CTRL_BIT                5
 44 
 45 /* xlen and dma_width register is in 4 bytes boundary */
 46 #define SIRFSOC_DMA_WORD_LEN                    4
 47 
 48 struct sirfsoc_dma_desc {
 49         struct dma_async_tx_descriptor  desc;
 50         struct list_head                node;
 51 
 52         /* SiRFprimaII 2D-DMA parameters */
 53 
 54         int             xlen;           /* DMA xlen */
 55         int             ylen;           /* DMA ylen */
 56         int             width;          /* DMA width */
 57         int             dir;
 58         bool            cyclic;         /* is loop DMA? */
 59         u32             addr;           /* DMA buffer address */
 60 };
 61 
 62 struct sirfsoc_dma_chan {
 63         struct dma_chan                 chan;
 64         struct list_head                free;
 65         struct list_head                prepared;
 66         struct list_head                queued;
 67         struct list_head                active;
 68         struct list_head                completed;
 69         unsigned long                   happened_cyclic;
 70         unsigned long                   completed_cyclic;
 71 
 72         /* Lock for this structure */
 73         spinlock_t                      lock;
 74 
 75         int                             mode;
 76 };
 77 
 78 struct sirfsoc_dma_regs {
 79         u32                             ctrl[SIRFSOC_DMA_CHANNELS];
 80         u32                             interrupt_en;
 81 };
 82 
 83 struct sirfsoc_dma {
 84         struct dma_device               dma;
 85         struct tasklet_struct           tasklet;
 86         struct sirfsoc_dma_chan         channels[SIRFSOC_DMA_CHANNELS];
 87         void __iomem                    *base;
 88         int                             irq;
 89         struct clk                      *clk;
 90         bool                            is_marco;
 91         struct sirfsoc_dma_regs         regs_save;
 92 };
 93 
 94 #define DRV_NAME        "sirfsoc_dma"
 95 
 96 static int sirfsoc_dma_runtime_suspend(struct device *dev);
 97 
 98 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
 99 static inline
100 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
101 {
102         return container_of(c, struct sirfsoc_dma_chan, chan);
103 }
104 
105 /* Convert struct dma_chan to struct sirfsoc_dma */
106 static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
107 {
108         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
109         return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
110 }
111 
112 /* Execute all queued DMA descriptors */
113 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
114 {
115         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
116         int cid = schan->chan.chan_id;
117         struct sirfsoc_dma_desc *sdesc = NULL;
118 
119         /*
120          * lock has been held by functions calling this, so we don't hold
121          * lock again
122          */
123 
124         sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
125                 node);
126         /* Move the first queued descriptor to active list */
127         list_move_tail(&sdesc->node, &schan->active);
128 
129         /* Start the DMA transfer */
130         writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
131                 cid * 4);
132         writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
133                 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
134                 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
135         writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
136                 SIRFSOC_DMA_CH_XLEN);
137         writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
138                 SIRFSOC_DMA_CH_YLEN);
139         writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
140                 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
141 
142         /*
143          * writel has an implict memory write barrier to make sure data is
144          * flushed into memory before starting DMA
145          */
146         writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
147 
148         if (sdesc->cyclic) {
149                 writel((1 << cid) | 1 << (cid + 16) |
150                         readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
151                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
152                 schan->happened_cyclic = schan->completed_cyclic = 0;
153         }
154 }
155 
156 /* Interrupt handler */
157 static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
158 {
159         struct sirfsoc_dma *sdma = data;
160         struct sirfsoc_dma_chan *schan;
161         struct sirfsoc_dma_desc *sdesc = NULL;
162         u32 is;
163         int ch;
164 
165         is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
166         while ((ch = fls(is) - 1) >= 0) {
167                 is &= ~(1 << ch);
168                 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
169                 schan = &sdma->channels[ch];
170 
171                 spin_lock(&schan->lock);
172 
173                 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
174                         node);
175                 if (!sdesc->cyclic) {
176                         /* Execute queued descriptors */
177                         list_splice_tail_init(&schan->active, &schan->completed);
178                         if (!list_empty(&schan->queued))
179                                 sirfsoc_dma_execute(schan);
180                 } else
181                         schan->happened_cyclic++;
182 
183                 spin_unlock(&schan->lock);
184         }
185 
186         /* Schedule tasklet */
187         tasklet_schedule(&sdma->tasklet);
188 
189         return IRQ_HANDLED;
190 }
191 
192 /* process completed descriptors */
193 static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
194 {
195         dma_cookie_t last_cookie = 0;
196         struct sirfsoc_dma_chan *schan;
197         struct sirfsoc_dma_desc *sdesc;
198         struct dma_async_tx_descriptor *desc;
199         unsigned long flags;
200         unsigned long happened_cyclic;
201         LIST_HEAD(list);
202         int i;
203 
204         for (i = 0; i < sdma->dma.chancnt; i++) {
205                 schan = &sdma->channels[i];
206 
207                 /* Get all completed descriptors */
208                 spin_lock_irqsave(&schan->lock, flags);
209                 if (!list_empty(&schan->completed)) {
210                         list_splice_tail_init(&schan->completed, &list);
211                         spin_unlock_irqrestore(&schan->lock, flags);
212 
213                         /* Execute callbacks and run dependencies */
214                         list_for_each_entry(sdesc, &list, node) {
215                                 desc = &sdesc->desc;
216 
217                                 if (desc->callback)
218                                         desc->callback(desc->callback_param);
219 
220                                 last_cookie = desc->cookie;
221                                 dma_run_dependencies(desc);
222                         }
223 
224                         /* Free descriptors */
225                         spin_lock_irqsave(&schan->lock, flags);
226                         list_splice_tail_init(&list, &schan->free);
227                         schan->chan.completed_cookie = last_cookie;
228                         spin_unlock_irqrestore(&schan->lock, flags);
229                 } else {
230                         /* for cyclic channel, desc is always in active list */
231                         sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
232                                 node);
233 
234                         if (!sdesc || (sdesc && !sdesc->cyclic)) {
235                                 /* without active cyclic DMA */
236                                 spin_unlock_irqrestore(&schan->lock, flags);
237                                 continue;
238                         }
239 
240                         /* cyclic DMA */
241                         happened_cyclic = schan->happened_cyclic;
242                         spin_unlock_irqrestore(&schan->lock, flags);
243 
244                         desc = &sdesc->desc;
245                         while (happened_cyclic != schan->completed_cyclic) {
246                                 if (desc->callback)
247                                         desc->callback(desc->callback_param);
248                                 schan->completed_cyclic++;
249                         }
250                 }
251         }
252 }
253 
254 /* DMA Tasklet */
255 static void sirfsoc_dma_tasklet(unsigned long data)
256 {
257         struct sirfsoc_dma *sdma = (void *)data;
258 
259         sirfsoc_dma_process_completed(sdma);
260 }
261 
262 /* Submit descriptor to hardware */
263 static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
264 {
265         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
266         struct sirfsoc_dma_desc *sdesc;
267         unsigned long flags;
268         dma_cookie_t cookie;
269 
270         sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
271 
272         spin_lock_irqsave(&schan->lock, flags);
273 
274         /* Move descriptor to queue */
275         list_move_tail(&sdesc->node, &schan->queued);
276 
277         cookie = dma_cookie_assign(txd);
278 
279         spin_unlock_irqrestore(&schan->lock, flags);
280 
281         return cookie;
282 }
283 
284 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
285         struct dma_slave_config *config)
286 {
287         unsigned long flags;
288 
289         if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
290                 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
291                 return -EINVAL;
292 
293         spin_lock_irqsave(&schan->lock, flags);
294         schan->mode = (config->src_maxburst == 4 ? 1 : 0);
295         spin_unlock_irqrestore(&schan->lock, flags);
296 
297         return 0;
298 }
299 
300 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
301 {
302         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
303         int cid = schan->chan.chan_id;
304         unsigned long flags;
305 
306         spin_lock_irqsave(&schan->lock, flags);
307 
308         if (!sdma->is_marco) {
309                 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
310                         ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
311                 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
312                         & ~((1 << cid) | 1 << (cid + 16)),
313                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
314         } else {
315                 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
316                 writel_relaxed((1 << cid) | 1 << (cid + 16),
317                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
318         }
319 
320         writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
321 
322         list_splice_tail_init(&schan->active, &schan->free);
323         list_splice_tail_init(&schan->queued, &schan->free);
324 
325         spin_unlock_irqrestore(&schan->lock, flags);
326 
327         return 0;
328 }
329 
330 static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
331 {
332         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
333         int cid = schan->chan.chan_id;
334         unsigned long flags;
335 
336         spin_lock_irqsave(&schan->lock, flags);
337 
338         if (!sdma->is_marco)
339                 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
340                         & ~((1 << cid) | 1 << (cid + 16)),
341                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
342         else
343                 writel_relaxed((1 << cid) | 1 << (cid + 16),
344                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
345 
346         spin_unlock_irqrestore(&schan->lock, flags);
347 
348         return 0;
349 }
350 
351 static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
352 {
353         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
354         int cid = schan->chan.chan_id;
355         unsigned long flags;
356 
357         spin_lock_irqsave(&schan->lock, flags);
358 
359         if (!sdma->is_marco)
360                 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
361                         | ((1 << cid) | 1 << (cid + 16)),
362                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
363         else
364                 writel_relaxed((1 << cid) | 1 << (cid + 16),
365                         sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
366 
367         spin_unlock_irqrestore(&schan->lock, flags);
368 
369         return 0;
370 }
371 
372 static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
373         unsigned long arg)
374 {
375         struct dma_slave_config *config;
376         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
377 
378         switch (cmd) {
379         case DMA_PAUSE:
380                 return sirfsoc_dma_pause_chan(schan);
381         case DMA_RESUME:
382                 return sirfsoc_dma_resume_chan(schan);
383         case DMA_TERMINATE_ALL:
384                 return sirfsoc_dma_terminate_all(schan);
385         case DMA_SLAVE_CONFIG:
386                 config = (struct dma_slave_config *)arg;
387                 return sirfsoc_dma_slave_config(schan, config);
388 
389         default:
390                 break;
391         }
392 
393         return -ENOSYS;
394 }
395 
396 /* Alloc channel resources */
397 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
398 {
399         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
400         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
401         struct sirfsoc_dma_desc *sdesc;
402         unsigned long flags;
403         LIST_HEAD(descs);
404         int i;
405 
406         pm_runtime_get_sync(sdma->dma.dev);
407 
408         /* Alloc descriptors for this channel */
409         for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
410                 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
411                 if (!sdesc) {
412                         dev_notice(sdma->dma.dev, "Memory allocation error. "
413                                 "Allocated only %u descriptors\n", i);
414                         break;
415                 }
416 
417                 dma_async_tx_descriptor_init(&sdesc->desc, chan);
418                 sdesc->desc.flags = DMA_CTRL_ACK;
419                 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
420 
421                 list_add_tail(&sdesc->node, &descs);
422         }
423 
424         /* Return error only if no descriptors were allocated */
425         if (i == 0)
426                 return -ENOMEM;
427 
428         spin_lock_irqsave(&schan->lock, flags);
429 
430         list_splice_tail_init(&descs, &schan->free);
431         spin_unlock_irqrestore(&schan->lock, flags);
432 
433         return i;
434 }
435 
436 /* Free channel resources */
437 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
438 {
439         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
440         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
441         struct sirfsoc_dma_desc *sdesc, *tmp;
442         unsigned long flags;
443         LIST_HEAD(descs);
444 
445         spin_lock_irqsave(&schan->lock, flags);
446 
447         /* Channel must be idle */
448         BUG_ON(!list_empty(&schan->prepared));
449         BUG_ON(!list_empty(&schan->queued));
450         BUG_ON(!list_empty(&schan->active));
451         BUG_ON(!list_empty(&schan->completed));
452 
453         /* Move data */
454         list_splice_tail_init(&schan->free, &descs);
455 
456         spin_unlock_irqrestore(&schan->lock, flags);
457 
458         /* Free descriptors */
459         list_for_each_entry_safe(sdesc, tmp, &descs, node)
460                 kfree(sdesc);
461 
462         pm_runtime_put(sdma->dma.dev);
463 }
464 
465 /* Send pending descriptor to hardware */
466 static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
467 {
468         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
469         unsigned long flags;
470 
471         spin_lock_irqsave(&schan->lock, flags);
472 
473         if (list_empty(&schan->active) && !list_empty(&schan->queued))
474                 sirfsoc_dma_execute(schan);
475 
476         spin_unlock_irqrestore(&schan->lock, flags);
477 }
478 
479 /* Check request completion status */
480 static enum dma_status
481 sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
482         struct dma_tx_state *txstate)
483 {
484         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
485         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
486         unsigned long flags;
487         enum dma_status ret;
488         struct sirfsoc_dma_desc *sdesc;
489         int cid = schan->chan.chan_id;
490         unsigned long dma_pos;
491         unsigned long dma_request_bytes;
492         unsigned long residue;
493 
494         spin_lock_irqsave(&schan->lock, flags);
495 
496         sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
497                         node);
498         dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
499                 (sdesc->width * SIRFSOC_DMA_WORD_LEN);
500 
501         ret = dma_cookie_status(chan, cookie, txstate);
502         dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
503                 << 2;
504         residue = dma_request_bytes - (dma_pos - sdesc->addr);
505         dma_set_residue(txstate, residue);
506 
507         spin_unlock_irqrestore(&schan->lock, flags);
508 
509         return ret;
510 }
511 
512 static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
513         struct dma_chan *chan, struct dma_interleaved_template *xt,
514         unsigned long flags)
515 {
516         struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
517         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
518         struct sirfsoc_dma_desc *sdesc = NULL;
519         unsigned long iflags;
520         int ret;
521 
522         if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
523                 ret = -EINVAL;
524                 goto err_dir;
525         }
526 
527         /* Get free descriptor */
528         spin_lock_irqsave(&schan->lock, iflags);
529         if (!list_empty(&schan->free)) {
530                 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
531                         node);
532                 list_del(&sdesc->node);
533         }
534         spin_unlock_irqrestore(&schan->lock, iflags);
535 
536         if (!sdesc) {
537                 /* try to free completed descriptors */
538                 sirfsoc_dma_process_completed(sdma);
539                 ret = 0;
540                 goto no_desc;
541         }
542 
543         /* Place descriptor in prepared list */
544         spin_lock_irqsave(&schan->lock, iflags);
545 
546         /*
547          * Number of chunks in a frame can only be 1 for prima2
548          * and ylen (number of frame - 1) must be at least 0
549          */
550         if ((xt->frame_size == 1) && (xt->numf > 0)) {
551                 sdesc->cyclic = 0;
552                 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
553                 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
554                                 SIRFSOC_DMA_WORD_LEN;
555                 sdesc->ylen = xt->numf - 1;
556                 if (xt->dir == DMA_MEM_TO_DEV) {
557                         sdesc->addr = xt->src_start;
558                         sdesc->dir = 1;
559                 } else {
560                         sdesc->addr = xt->dst_start;
561                         sdesc->dir = 0;
562                 }
563 
564                 list_add_tail(&sdesc->node, &schan->prepared);
565         } else {
566                 pr_err("sirfsoc DMA Invalid xfer\n");
567                 ret = -EINVAL;
568                 goto err_xfer;
569         }
570         spin_unlock_irqrestore(&schan->lock, iflags);
571 
572         return &sdesc->desc;
573 err_xfer:
574         spin_unlock_irqrestore(&schan->lock, iflags);
575 no_desc:
576 err_dir:
577         return ERR_PTR(ret);
578 }
579 
580 static struct dma_async_tx_descriptor *
581 sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
582         size_t buf_len, size_t period_len,
583         enum dma_transfer_direction direction, unsigned long flags, void *context)
584 {
585         struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
586         struct sirfsoc_dma_desc *sdesc = NULL;
587         unsigned long iflags;
588 
589         /*
590          * we only support cycle transfer with 2 period
591          * If the X-length is set to 0, it would be the loop mode.
592          * The DMA address keeps increasing until reaching the end of a loop
593          * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
594          * the DMA address goes back to the beginning of this area.
595          * In loop mode, the DMA data region is divided into two parts, BUFA
596          * and BUFB. DMA controller generates interrupts twice in each loop:
597          * when the DMA address reaches the end of BUFA or the end of the
598          * BUFB
599          */
600         if (buf_len !=  2 * period_len)
601                 return ERR_PTR(-EINVAL);
602 
603         /* Get free descriptor */
604         spin_lock_irqsave(&schan->lock, iflags);
605         if (!list_empty(&schan->free)) {
606                 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
607                         node);
608                 list_del(&sdesc->node);
609         }
610         spin_unlock_irqrestore(&schan->lock, iflags);
611 
612         if (!sdesc)
613                 return NULL;
614 
615         /* Place descriptor in prepared list */
616         spin_lock_irqsave(&schan->lock, iflags);
617         sdesc->addr = addr;
618         sdesc->cyclic = 1;
619         sdesc->xlen = 0;
620         sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
621         sdesc->width = 1;
622         list_add_tail(&sdesc->node, &schan->prepared);
623         spin_unlock_irqrestore(&schan->lock, iflags);
624 
625         return &sdesc->desc;
626 }
627 
628 /*
629  * The DMA controller consists of 16 independent DMA channels.
630  * Each channel is allocated to a different function
631  */
632 bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
633 {
634         unsigned int ch_nr = (unsigned int) chan_id;
635 
636         if (ch_nr == chan->chan_id +
637                 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
638                 return true;
639 
640         return false;
641 }
642 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
643 
644 #define SIRFSOC_DMA_BUSWIDTHS \
645         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
646         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
647         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
648         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650 
651 static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
652         struct dma_slave_caps *caps)
653 {
654         caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655         caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
656         caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
657         caps->cmd_pause = true;
658         caps->cmd_terminate = true;
659 
660         return 0;
661 }
662 
663 static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664         struct of_dma *ofdma)
665 {
666         struct sirfsoc_dma *sdma = ofdma->of_dma_data;
667         unsigned int request = dma_spec->args[0];
668 
669         if (request >= SIRFSOC_DMA_CHANNELS)
670                 return NULL;
671 
672         return dma_get_slave_channel(&sdma->channels[request].chan);
673 }
674 
675 static int sirfsoc_dma_probe(struct platform_device *op)
676 {
677         struct device_node *dn = op->dev.of_node;
678         struct device *dev = &op->dev;
679         struct dma_device *dma;
680         struct sirfsoc_dma *sdma;
681         struct sirfsoc_dma_chan *schan;
682         struct resource res;
683         ulong regs_start, regs_size;
684         u32 id;
685         int ret, i;
686 
687         sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
688         if (!sdma) {
689                 dev_err(dev, "Memory exhausted!\n");
690                 return -ENOMEM;
691         }
692 
693         if (of_device_is_compatible(dn, "sirf,marco-dmac"))
694                 sdma->is_marco = true;
695 
696         if (of_property_read_u32(dn, "cell-index", &id)) {
697                 dev_err(dev, "Fail to get DMAC index\n");
698                 return -ENODEV;
699         }
700 
701         sdma->irq = irq_of_parse_and_map(dn, 0);
702         if (sdma->irq == NO_IRQ) {
703                 dev_err(dev, "Error mapping IRQ!\n");
704                 return -EINVAL;
705         }
706 
707         sdma->clk = devm_clk_get(dev, NULL);
708         if (IS_ERR(sdma->clk)) {
709                 dev_err(dev, "failed to get a clock.\n");
710                 return PTR_ERR(sdma->clk);
711         }
712 
713         ret = of_address_to_resource(dn, 0, &res);
714         if (ret) {
715                 dev_err(dev, "Error parsing memory region!\n");
716                 goto irq_dispose;
717         }
718 
719         regs_start = res.start;
720         regs_size = resource_size(&res);
721 
722         sdma->base = devm_ioremap(dev, regs_start, regs_size);
723         if (!sdma->base) {
724                 dev_err(dev, "Error mapping memory region!\n");
725                 ret = -ENOMEM;
726                 goto irq_dispose;
727         }
728 
729         ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
730         if (ret) {
731                 dev_err(dev, "Error requesting IRQ!\n");
732                 ret = -EINVAL;
733                 goto irq_dispose;
734         }
735 
736         dma = &sdma->dma;
737         dma->dev = dev;
738         dma->chancnt = SIRFSOC_DMA_CHANNELS;
739 
740         dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
741         dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
742         dma->device_issue_pending = sirfsoc_dma_issue_pending;
743         dma->device_control = sirfsoc_dma_control;
744         dma->device_tx_status = sirfsoc_dma_tx_status;
745         dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
746         dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
747         dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
748 
749         INIT_LIST_HEAD(&dma->channels);
750         dma_cap_set(DMA_SLAVE, dma->cap_mask);
751         dma_cap_set(DMA_CYCLIC, dma->cap_mask);
752         dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
753         dma_cap_set(DMA_PRIVATE, dma->cap_mask);
754 
755         for (i = 0; i < dma->chancnt; i++) {
756                 schan = &sdma->channels[i];
757 
758                 schan->chan.device = dma;
759                 dma_cookie_init(&schan->chan);
760 
761                 INIT_LIST_HEAD(&schan->free);
762                 INIT_LIST_HEAD(&schan->prepared);
763                 INIT_LIST_HEAD(&schan->queued);
764                 INIT_LIST_HEAD(&schan->active);
765                 INIT_LIST_HEAD(&schan->completed);
766 
767                 spin_lock_init(&schan->lock);
768                 list_add_tail(&schan->chan.device_node, &dma->channels);
769         }
770 
771         tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
772 
773         /* Register DMA engine */
774         dev_set_drvdata(dev, sdma);
775 
776         ret = dma_async_device_register(dma);
777         if (ret)
778                 goto free_irq;
779 
780         /* Device-tree DMA controller registration */
781         ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
782         if (ret) {
783                 dev_err(dev, "failed to register DMA controller\n");
784                 goto unreg_dma_dev;
785         }
786 
787         pm_runtime_enable(&op->dev);
788         dev_info(dev, "initialized SIRFSOC DMAC driver\n");
789 
790         return 0;
791 
792 unreg_dma_dev:
793         dma_async_device_unregister(dma);
794 free_irq:
795         free_irq(sdma->irq, sdma);
796 irq_dispose:
797         irq_dispose_mapping(sdma->irq);
798         return ret;
799 }
800 
801 static int sirfsoc_dma_remove(struct platform_device *op)
802 {
803         struct device *dev = &op->dev;
804         struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
805 
806         of_dma_controller_free(op->dev.of_node);
807         dma_async_device_unregister(&sdma->dma);
808         free_irq(sdma->irq, sdma);
809         irq_dispose_mapping(sdma->irq);
810         pm_runtime_disable(&op->dev);
811         if (!pm_runtime_status_suspended(&op->dev))
812                 sirfsoc_dma_runtime_suspend(&op->dev);
813 
814         return 0;
815 }
816 
817 static int sirfsoc_dma_runtime_suspend(struct device *dev)
818 {
819         struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
820 
821         clk_disable_unprepare(sdma->clk);
822         return 0;
823 }
824 
825 static int sirfsoc_dma_runtime_resume(struct device *dev)
826 {
827         struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
828         int ret;
829 
830         ret = clk_prepare_enable(sdma->clk);
831         if (ret < 0) {
832                 dev_err(dev, "clk_enable failed: %d\n", ret);
833                 return ret;
834         }
835         return 0;
836 }
837 
838 static int sirfsoc_dma_pm_suspend(struct device *dev)
839 {
840         struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
841         struct sirfsoc_dma_regs *save = &sdma->regs_save;
842         struct sirfsoc_dma_desc *sdesc;
843         struct sirfsoc_dma_chan *schan;
844         int ch;
845         int ret;
846 
847         /*
848          * if we were runtime-suspended before, resume to enable clock
849          * before accessing register
850          */
851         if (pm_runtime_status_suspended(dev)) {
852                 ret = sirfsoc_dma_runtime_resume(dev);
853                 if (ret < 0)
854                         return ret;
855         }
856 
857         /*
858          * DMA controller will lose all registers while suspending
859          * so we need to save registers for active channels
860          */
861         for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
862                 schan = &sdma->channels[ch];
863                 if (list_empty(&schan->active))
864                         continue;
865                 sdesc = list_first_entry(&schan->active,
866                         struct sirfsoc_dma_desc,
867                         node);
868                 save->ctrl[ch] = readl_relaxed(sdma->base +
869                         ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
870         }
871         save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
872 
873         /* Disable clock */
874         sirfsoc_dma_runtime_suspend(dev);
875 
876         return 0;
877 }
878 
879 static int sirfsoc_dma_pm_resume(struct device *dev)
880 {
881         struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
882         struct sirfsoc_dma_regs *save = &sdma->regs_save;
883         struct sirfsoc_dma_desc *sdesc;
884         struct sirfsoc_dma_chan *schan;
885         int ch;
886         int ret;
887 
888         /* Enable clock before accessing register */
889         ret = sirfsoc_dma_runtime_resume(dev);
890         if (ret < 0)
891                 return ret;
892 
893         writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
894         for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
895                 schan = &sdma->channels[ch];
896                 if (list_empty(&schan->active))
897                         continue;
898                 sdesc = list_first_entry(&schan->active,
899                         struct sirfsoc_dma_desc,
900                         node);
901                 writel_relaxed(sdesc->width,
902                         sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
903                 writel_relaxed(sdesc->xlen,
904                         sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
905                 writel_relaxed(sdesc->ylen,
906                         sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
907                 writel_relaxed(save->ctrl[ch],
908                         sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
909                 writel_relaxed(sdesc->addr >> 2,
910                         sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
911         }
912 
913         /* if we were runtime-suspended before, suspend again */
914         if (pm_runtime_status_suspended(dev))
915                 sirfsoc_dma_runtime_suspend(dev);
916 
917         return 0;
918 }
919 
920 static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
921         SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
922         SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
923 };
924 
925 static struct of_device_id sirfsoc_dma_match[] = {
926         { .compatible = "sirf,prima2-dmac", },
927         { .compatible = "sirf,marco-dmac", },
928         {},
929 };
930 
931 static struct platform_driver sirfsoc_dma_driver = {
932         .probe          = sirfsoc_dma_probe,
933         .remove         = sirfsoc_dma_remove,
934         .driver = {
935                 .name = DRV_NAME,
936                 .owner = THIS_MODULE,
937                 .pm = &sirfsoc_dma_pm_ops,
938                 .of_match_table = sirfsoc_dma_match,
939         },
940 };
941 
942 static __init int sirfsoc_dma_init(void)
943 {
944         return platform_driver_register(&sirfsoc_dma_driver);
945 }
946 
947 static void __exit sirfsoc_dma_exit(void)
948 {
949         platform_driver_unregister(&sirfsoc_dma_driver);
950 }
951 
952 subsys_initcall(sirfsoc_dma_init);
953 module_exit(sirfsoc_dma_exit);
954 
955 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
956         "Barry Song <baohua.song@csr.com>");
957 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
958 MODULE_LICENSE("GPL v2");
959 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us