Version:  2.0.40 2.2.26 2.4.37 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18

Linux/drivers/dma/at_hdmac.c

  1 /*
  2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3  *
  4  * Copyright (C) 2008 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License, or
  9  * (at your option) any later version.
 10  *
 11  *
 12  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
 13  * The only Atmel DMA Controller that is not covered by this driver is the one
 14  * found on AT91SAM9263.
 15  */
 16 
 17 #include <dt-bindings/dma/at91.h>
 18 #include <linux/clk.h>
 19 #include <linux/dmaengine.h>
 20 #include <linux/dma-mapping.h>
 21 #include <linux/dmapool.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/module.h>
 24 #include <linux/platform_device.h>
 25 #include <linux/slab.h>
 26 #include <linux/of.h>
 27 #include <linux/of_device.h>
 28 #include <linux/of_dma.h>
 29 
 30 #include "at_hdmac_regs.h"
 31 #include "dmaengine.h"
 32 
 33 /*
 34  * Glossary
 35  * --------
 36  *
 37  * at_hdmac             : Name of the ATmel AHB DMA Controller
 38  * at_dma_ / atdma      : ATmel DMA controller entity related
 39  * atc_ / atchan        : ATmel DMA Channel entity related
 40  */
 41 
 42 #define ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
 43 #define ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
 44                                 |ATC_DIF(AT_DMA_MEM_IF))
 45 
 46 /*
 47  * Initial number of descriptors to allocate for each channel. This could
 48  * be increased during dma usage.
 49  */
 50 static unsigned int init_nr_desc_per_channel = 64;
 51 module_param(init_nr_desc_per_channel, uint, 0644);
 52 MODULE_PARM_DESC(init_nr_desc_per_channel,
 53                  "initial descriptors per channel (default: 64)");
 54 
 55 
 56 /* prototypes */
 57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
 58 static void atc_issue_pending(struct dma_chan *chan);
 59 
 60 
 61 /*----------------------------------------------------------------------*/
 62 
 63 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
 64 {
 65         return list_first_entry(&atchan->active_list,
 66                                 struct at_desc, desc_node);
 67 }
 68 
 69 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
 70 {
 71         return list_first_entry(&atchan->queue,
 72                                 struct at_desc, desc_node);
 73 }
 74 
 75 /**
 76  * atc_alloc_descriptor - allocate and return an initialized descriptor
 77  * @chan: the channel to allocate descriptors for
 78  * @gfp_flags: GFP allocation flags
 79  *
 80  * Note: The ack-bit is positioned in the descriptor flag at creation time
 81  *       to make initial allocation more convenient. This bit will be cleared
 82  *       and control will be given to client at usage time (during
 83  *       preparation functions).
 84  */
 85 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 86                                             gfp_t gfp_flags)
 87 {
 88         struct at_desc  *desc = NULL;
 89         struct at_dma   *atdma = to_at_dma(chan->device);
 90         dma_addr_t phys;
 91 
 92         desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
 93         if (desc) {
 94                 memset(desc, 0, sizeof(struct at_desc));
 95                 INIT_LIST_HEAD(&desc->tx_list);
 96                 dma_async_tx_descriptor_init(&desc->txd, chan);
 97                 /* txd.flags will be overwritten in prep functions */
 98                 desc->txd.flags = DMA_CTRL_ACK;
 99                 desc->txd.tx_submit = atc_tx_submit;
100                 desc->txd.phys = phys;
101         }
102 
103         return desc;
104 }
105 
106 /**
107  * atc_desc_get - get an unused descriptor from free_list
108  * @atchan: channel we want a new descriptor for
109  */
110 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
111 {
112         struct at_desc *desc, *_desc;
113         struct at_desc *ret = NULL;
114         unsigned long flags;
115         unsigned int i = 0;
116         LIST_HEAD(tmp_list);
117 
118         spin_lock_irqsave(&atchan->lock, flags);
119         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
120                 i++;
121                 if (async_tx_test_ack(&desc->txd)) {
122                         list_del(&desc->desc_node);
123                         ret = desc;
124                         break;
125                 }
126                 dev_dbg(chan2dev(&atchan->chan_common),
127                                 "desc %p not ACKed\n", desc);
128         }
129         spin_unlock_irqrestore(&atchan->lock, flags);
130         dev_vdbg(chan2dev(&atchan->chan_common),
131                 "scanned %u descriptors on freelist\n", i);
132 
133         /* no more descriptor available in initial pool: create one more */
134         if (!ret) {
135                 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
136                 if (ret) {
137                         spin_lock_irqsave(&atchan->lock, flags);
138                         atchan->descs_allocated++;
139                         spin_unlock_irqrestore(&atchan->lock, flags);
140                 } else {
141                         dev_err(chan2dev(&atchan->chan_common),
142                                         "not enough descriptors available\n");
143                 }
144         }
145 
146         return ret;
147 }
148 
149 /**
150  * atc_desc_put - move a descriptor, including any children, to the free list
151  * @atchan: channel we work on
152  * @desc: descriptor, at the head of a chain, to move to free list
153  */
154 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
155 {
156         if (desc) {
157                 struct at_desc *child;
158                 unsigned long flags;
159 
160                 spin_lock_irqsave(&atchan->lock, flags);
161                 list_for_each_entry(child, &desc->tx_list, desc_node)
162                         dev_vdbg(chan2dev(&atchan->chan_common),
163                                         "moving child desc %p to freelist\n",
164                                         child);
165                 list_splice_init(&desc->tx_list, &atchan->free_list);
166                 dev_vdbg(chan2dev(&atchan->chan_common),
167                          "moving desc %p to freelist\n", desc);
168                 list_add(&desc->desc_node, &atchan->free_list);
169                 spin_unlock_irqrestore(&atchan->lock, flags);
170         }
171 }
172 
173 /**
174  * atc_desc_chain - build chain adding a descriptor
175  * @first: address of first descriptor of the chain
176  * @prev: address of previous descriptor of the chain
177  * @desc: descriptor to queue
178  *
179  * Called from prep_* functions
180  */
181 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
182                            struct at_desc *desc)
183 {
184         if (!(*first)) {
185                 *first = desc;
186         } else {
187                 /* inform the HW lli about chaining */
188                 (*prev)->lli.dscr = desc->txd.phys;
189                 /* insert the link descriptor to the LD ring */
190                 list_add_tail(&desc->desc_node,
191                                 &(*first)->tx_list);
192         }
193         *prev = desc;
194 }
195 
196 /**
197  * atc_dostart - starts the DMA engine for real
198  * @atchan: the channel we want to start
199  * @first: first descriptor in the list we want to begin with
200  *
201  * Called with atchan->lock held and bh disabled
202  */
203 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
204 {
205         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
206 
207         /* ASSERT:  channel is idle */
208         if (atc_chan_is_enabled(atchan)) {
209                 dev_err(chan2dev(&atchan->chan_common),
210                         "BUG: Attempted to start non-idle channel\n");
211                 dev_err(chan2dev(&atchan->chan_common),
212                         "  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
213                         channel_readl(atchan, SADDR),
214                         channel_readl(atchan, DADDR),
215                         channel_readl(atchan, CTRLA),
216                         channel_readl(atchan, CTRLB),
217                         channel_readl(atchan, DSCR));
218 
219                 /* The tasklet will hopefully advance the queue... */
220                 return;
221         }
222 
223         vdbg_dump_regs(atchan);
224 
225         channel_writel(atchan, SADDR, 0);
226         channel_writel(atchan, DADDR, 0);
227         channel_writel(atchan, CTRLA, 0);
228         channel_writel(atchan, CTRLB, 0);
229         channel_writel(atchan, DSCR, first->txd.phys);
230         dma_writel(atdma, CHER, atchan->mask);
231 
232         vdbg_dump_regs(atchan);
233 }
234 
235 /*
236  * atc_get_current_descriptors -
237  * locate the descriptor which equal to physical address in DSCR
238  * @atchan: the channel we want to start
239  * @dscr_addr: physical descriptor address in DSCR
240  */
241 static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan,
242                                                         u32 dscr_addr)
243 {
244         struct at_desc  *desc, *_desc, *child, *desc_cur = NULL;
245 
246         list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
247                 if (desc->lli.dscr == dscr_addr) {
248                         desc_cur = desc;
249                         break;
250                 }
251 
252                 list_for_each_entry(child, &desc->tx_list, desc_node) {
253                         if (child->lli.dscr == dscr_addr) {
254                                 desc_cur = child;
255                                 break;
256                         }
257                 }
258         }
259 
260         return desc_cur;
261 }
262 
263 /*
264  * atc_get_bytes_left -
265  * Get the number of bytes residue in dma buffer,
266  * @chan: the channel we want to start
267  */
268 static int atc_get_bytes_left(struct dma_chan *chan)
269 {
270         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
271         struct at_dma           *atdma = to_at_dma(chan->device);
272         int     chan_id = atchan->chan_common.chan_id;
273         struct at_desc *desc_first = atc_first_active(atchan);
274         struct at_desc *desc_cur;
275         int ret = 0, count = 0;
276 
277         /*
278          * Initialize necessary values in the first time.
279          * remain_desc record remain desc length.
280          */
281         if (atchan->remain_desc == 0)
282                 /* First descriptor embedds the transaction length */
283                 atchan->remain_desc = desc_first->len;
284 
285         /*
286          * This happens when current descriptor transfer complete.
287          * The residual buffer size should reduce current descriptor length.
288          */
289         if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
290                 clear_bit(ATC_IS_BTC, &atchan->status);
291                 desc_cur = atc_get_current_descriptors(atchan,
292                                                 channel_readl(atchan, DSCR));
293                 if (!desc_cur) {
294                         ret = -EINVAL;
295                         goto out;
296                 }
297 
298                 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
299                         << desc_first->tx_width;
300                 if (atchan->remain_desc < count) {
301                         ret = -EINVAL;
302                         goto out;
303                 }
304 
305                 atchan->remain_desc -= count;
306                 ret = atchan->remain_desc;
307         } else {
308                 /*
309                  * Get residual bytes when current
310                  * descriptor transfer in progress.
311                  */
312                 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
313                                 << (desc_first->tx_width);
314                 ret = atchan->remain_desc - count;
315         }
316         /*
317          * Check fifo empty.
318          */
319         if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
320                 atc_issue_pending(chan);
321 
322 out:
323         return ret;
324 }
325 
326 /**
327  * atc_chain_complete - finish work for one transaction chain
328  * @atchan: channel we work on
329  * @desc: descriptor at the head of the chain we want do complete
330  *
331  * Called with atchan->lock held and bh disabled */
332 static void
333 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
334 {
335         struct dma_async_tx_descriptor  *txd = &desc->txd;
336 
337         dev_vdbg(chan2dev(&atchan->chan_common),
338                 "descriptor %u complete\n", txd->cookie);
339 
340         /* mark the descriptor as complete for non cyclic cases only */
341         if (!atc_chan_is_cyclic(atchan))
342                 dma_cookie_complete(txd);
343 
344         /* move children to free_list */
345         list_splice_init(&desc->tx_list, &atchan->free_list);
346         /* move myself to free_list */
347         list_move(&desc->desc_node, &atchan->free_list);
348 
349         dma_descriptor_unmap(txd);
350         /* for cyclic transfers,
351          * no need to replay callback function while stopping */
352         if (!atc_chan_is_cyclic(atchan)) {
353                 dma_async_tx_callback   callback = txd->callback;
354                 void                    *param = txd->callback_param;
355 
356                 /*
357                  * The API requires that no submissions are done from a
358                  * callback, so we don't need to drop the lock here
359                  */
360                 if (callback)
361                         callback(param);
362         }
363 
364         dma_run_dependencies(txd);
365 }
366 
367 /**
368  * atc_complete_all - finish work for all transactions
369  * @atchan: channel to complete transactions for
370  *
371  * Eventually submit queued descriptors if any
372  *
373  * Assume channel is idle while calling this function
374  * Called with atchan->lock held and bh disabled
375  */
376 static void atc_complete_all(struct at_dma_chan *atchan)
377 {
378         struct at_desc *desc, *_desc;
379         LIST_HEAD(list);
380 
381         dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
382 
383         /*
384          * Submit queued descriptors ASAP, i.e. before we go through
385          * the completed ones.
386          */
387         if (!list_empty(&atchan->queue))
388                 atc_dostart(atchan, atc_first_queued(atchan));
389         /* empty active_list now it is completed */
390         list_splice_init(&atchan->active_list, &list);
391         /* empty queue list by moving descriptors (if any) to active_list */
392         list_splice_init(&atchan->queue, &atchan->active_list);
393 
394         list_for_each_entry_safe(desc, _desc, &list, desc_node)
395                 atc_chain_complete(atchan, desc);
396 }
397 
398 /**
399  * atc_advance_work - at the end of a transaction, move forward
400  * @atchan: channel where the transaction ended
401  *
402  * Called with atchan->lock held and bh disabled
403  */
404 static void atc_advance_work(struct at_dma_chan *atchan)
405 {
406         dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
407 
408         if (atc_chan_is_enabled(atchan))
409                 return;
410 
411         if (list_empty(&atchan->active_list) ||
412             list_is_singular(&atchan->active_list)) {
413                 atc_complete_all(atchan);
414         } else {
415                 atc_chain_complete(atchan, atc_first_active(atchan));
416                 /* advance work */
417                 atc_dostart(atchan, atc_first_active(atchan));
418         }
419 }
420 
421 
422 /**
423  * atc_handle_error - handle errors reported by DMA controller
424  * @atchan: channel where error occurs
425  *
426  * Called with atchan->lock held and bh disabled
427  */
428 static void atc_handle_error(struct at_dma_chan *atchan)
429 {
430         struct at_desc *bad_desc;
431         struct at_desc *child;
432 
433         /*
434          * The descriptor currently at the head of the active list is
435          * broked. Since we don't have any way to report errors, we'll
436          * just have to scream loudly and try to carry on.
437          */
438         bad_desc = atc_first_active(atchan);
439         list_del_init(&bad_desc->desc_node);
440 
441         /* As we are stopped, take advantage to push queued descriptors
442          * in active_list */
443         list_splice_init(&atchan->queue, atchan->active_list.prev);
444 
445         /* Try to restart the controller */
446         if (!list_empty(&atchan->active_list))
447                 atc_dostart(atchan, atc_first_active(atchan));
448 
449         /*
450          * KERN_CRITICAL may seem harsh, but since this only happens
451          * when someone submits a bad physical address in a
452          * descriptor, we should consider ourselves lucky that the
453          * controller flagged an error instead of scribbling over
454          * random memory locations.
455          */
456         dev_crit(chan2dev(&atchan->chan_common),
457                         "Bad descriptor submitted for DMA!\n");
458         dev_crit(chan2dev(&atchan->chan_common),
459                         "  cookie: %d\n", bad_desc->txd.cookie);
460         atc_dump_lli(atchan, &bad_desc->lli);
461         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
462                 atc_dump_lli(atchan, &child->lli);
463 
464         /* Pretend the descriptor completed successfully */
465         atc_chain_complete(atchan, bad_desc);
466 }
467 
468 /**
469  * atc_handle_cyclic - at the end of a period, run callback function
470  * @atchan: channel used for cyclic operations
471  *
472  * Called with atchan->lock held and bh disabled
473  */
474 static void atc_handle_cyclic(struct at_dma_chan *atchan)
475 {
476         struct at_desc                  *first = atc_first_active(atchan);
477         struct dma_async_tx_descriptor  *txd = &first->txd;
478         dma_async_tx_callback           callback = txd->callback;
479         void                            *param = txd->callback_param;
480 
481         dev_vdbg(chan2dev(&atchan->chan_common),
482                         "new cyclic period llp 0x%08x\n",
483                         channel_readl(atchan, DSCR));
484 
485         if (callback)
486                 callback(param);
487 }
488 
489 /*--  IRQ & Tasklet  ---------------------------------------------------*/
490 
491 static void atc_tasklet(unsigned long data)
492 {
493         struct at_dma_chan *atchan = (struct at_dma_chan *)data;
494         unsigned long flags;
495 
496         spin_lock_irqsave(&atchan->lock, flags);
497         if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
498                 atc_handle_error(atchan);
499         else if (atc_chan_is_cyclic(atchan))
500                 atc_handle_cyclic(atchan);
501         else
502                 atc_advance_work(atchan);
503 
504         spin_unlock_irqrestore(&atchan->lock, flags);
505 }
506 
507 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
508 {
509         struct at_dma           *atdma = (struct at_dma *)dev_id;
510         struct at_dma_chan      *atchan;
511         int                     i;
512         u32                     status, pending, imr;
513         int                     ret = IRQ_NONE;
514 
515         do {
516                 imr = dma_readl(atdma, EBCIMR);
517                 status = dma_readl(atdma, EBCISR);
518                 pending = status & imr;
519 
520                 if (!pending)
521                         break;
522 
523                 dev_vdbg(atdma->dma_common.dev,
524                         "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
525                          status, imr, pending);
526 
527                 for (i = 0; i < atdma->dma_common.chancnt; i++) {
528                         atchan = &atdma->chan[i];
529                         if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
530                                 if (pending & AT_DMA_ERR(i)) {
531                                         /* Disable channel on AHB error */
532                                         dma_writel(atdma, CHDR,
533                                                 AT_DMA_RES(i) | atchan->mask);
534                                         /* Give information to tasklet */
535                                         set_bit(ATC_IS_ERROR, &atchan->status);
536                                 }
537                                 if (pending & AT_DMA_BTC(i))
538                                         set_bit(ATC_IS_BTC, &atchan->status);
539                                 tasklet_schedule(&atchan->tasklet);
540                                 ret = IRQ_HANDLED;
541                         }
542                 }
543 
544         } while (pending);
545 
546         return ret;
547 }
548 
549 
550 /*--  DMA Engine API  --------------------------------------------------*/
551 
552 /**
553  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
554  * @desc: descriptor at the head of the transaction chain
555  *
556  * Queue chain if DMA engine is working already
557  *
558  * Cookie increment and adding to active_list or queue must be atomic
559  */
560 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
561 {
562         struct at_desc          *desc = txd_to_at_desc(tx);
563         struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
564         dma_cookie_t            cookie;
565         unsigned long           flags;
566 
567         spin_lock_irqsave(&atchan->lock, flags);
568         cookie = dma_cookie_assign(tx);
569 
570         if (list_empty(&atchan->active_list)) {
571                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
572                                 desc->txd.cookie);
573                 atc_dostart(atchan, desc);
574                 list_add_tail(&desc->desc_node, &atchan->active_list);
575         } else {
576                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
577                                 desc->txd.cookie);
578                 list_add_tail(&desc->desc_node, &atchan->queue);
579         }
580 
581         spin_unlock_irqrestore(&atchan->lock, flags);
582 
583         return cookie;
584 }
585 
586 /**
587  * atc_prep_dma_memcpy - prepare a memcpy operation
588  * @chan: the channel to prepare operation on
589  * @dest: operation virtual destination address
590  * @src: operation virtual source address
591  * @len: operation length
592  * @flags: tx descriptor status flags
593  */
594 static struct dma_async_tx_descriptor *
595 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
596                 size_t len, unsigned long flags)
597 {
598         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
599         struct at_desc          *desc = NULL;
600         struct at_desc          *first = NULL;
601         struct at_desc          *prev = NULL;
602         size_t                  xfer_count;
603         size_t                  offset;
604         unsigned int            src_width;
605         unsigned int            dst_width;
606         u32                     ctrla;
607         u32                     ctrlb;
608 
609         dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
610                         dest, src, len, flags);
611 
612         if (unlikely(!len)) {
613                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
614                 return NULL;
615         }
616 
617         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
618                 | ATC_SRC_ADDR_MODE_INCR
619                 | ATC_DST_ADDR_MODE_INCR
620                 | ATC_FC_MEM2MEM;
621 
622         /*
623          * We can be a lot more clever here, but this should take care
624          * of the most common optimization.
625          */
626         if (!((src | dest  | len) & 3)) {
627                 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
628                 src_width = dst_width = 2;
629         } else if (!((src | dest | len) & 1)) {
630                 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
631                 src_width = dst_width = 1;
632         } else {
633                 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
634                 src_width = dst_width = 0;
635         }
636 
637         for (offset = 0; offset < len; offset += xfer_count << src_width) {
638                 xfer_count = min_t(size_t, (len - offset) >> src_width,
639                                 ATC_BTSIZE_MAX);
640 
641                 desc = atc_desc_get(atchan);
642                 if (!desc)
643                         goto err_desc_get;
644 
645                 desc->lli.saddr = src + offset;
646                 desc->lli.daddr = dest + offset;
647                 desc->lli.ctrla = ctrla | xfer_count;
648                 desc->lli.ctrlb = ctrlb;
649 
650                 desc->txd.cookie = 0;
651 
652                 atc_desc_chain(&first, &prev, desc);
653         }
654 
655         /* First descriptor of the chain embedds additional information */
656         first->txd.cookie = -EBUSY;
657         first->len = len;
658         first->tx_width = src_width;
659 
660         /* set end-of-link to the last link descriptor of list*/
661         set_desc_eol(desc);
662 
663         first->txd.flags = flags; /* client is in control of this ack */
664 
665         return &first->txd;
666 
667 err_desc_get:
668         atc_desc_put(atchan, first);
669         return NULL;
670 }
671 
672 
673 /**
674  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
675  * @chan: DMA channel
676  * @sgl: scatterlist to transfer to/from
677  * @sg_len: number of entries in @scatterlist
678  * @direction: DMA direction
679  * @flags: tx descriptor status flags
680  * @context: transaction context (ignored)
681  */
682 static struct dma_async_tx_descriptor *
683 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
684                 unsigned int sg_len, enum dma_transfer_direction direction,
685                 unsigned long flags, void *context)
686 {
687         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
688         struct at_dma_slave     *atslave = chan->private;
689         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
690         struct at_desc          *first = NULL;
691         struct at_desc          *prev = NULL;
692         u32                     ctrla;
693         u32                     ctrlb;
694         dma_addr_t              reg;
695         unsigned int            reg_width;
696         unsigned int            mem_width;
697         unsigned int            i;
698         struct scatterlist      *sg;
699         size_t                  total_len = 0;
700 
701         dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
702                         sg_len,
703                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
704                         flags);
705 
706         if (unlikely(!atslave || !sg_len)) {
707                 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
708                 return NULL;
709         }
710 
711         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
712                 | ATC_DCSIZE(sconfig->dst_maxburst);
713         ctrlb = ATC_IEN;
714 
715         switch (direction) {
716         case DMA_MEM_TO_DEV:
717                 reg_width = convert_buswidth(sconfig->dst_addr_width);
718                 ctrla |=  ATC_DST_WIDTH(reg_width);
719                 ctrlb |=  ATC_DST_ADDR_MODE_FIXED
720                         | ATC_SRC_ADDR_MODE_INCR
721                         | ATC_FC_MEM2PER
722                         | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
723                 reg = sconfig->dst_addr;
724                 for_each_sg(sgl, sg, sg_len, i) {
725                         struct at_desc  *desc;
726                         u32             len;
727                         u32             mem;
728 
729                         desc = atc_desc_get(atchan);
730                         if (!desc)
731                                 goto err_desc_get;
732 
733                         mem = sg_dma_address(sg);
734                         len = sg_dma_len(sg);
735                         if (unlikely(!len)) {
736                                 dev_dbg(chan2dev(chan),
737                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
738                                 goto err;
739                         }
740                         mem_width = 2;
741                         if (unlikely(mem & 3 || len & 3))
742                                 mem_width = 0;
743 
744                         desc->lli.saddr = mem;
745                         desc->lli.daddr = reg;
746                         desc->lli.ctrla = ctrla
747                                         | ATC_SRC_WIDTH(mem_width)
748                                         | len >> mem_width;
749                         desc->lli.ctrlb = ctrlb;
750 
751                         atc_desc_chain(&first, &prev, desc);
752                         total_len += len;
753                 }
754                 break;
755         case DMA_DEV_TO_MEM:
756                 reg_width = convert_buswidth(sconfig->src_addr_width);
757                 ctrla |=  ATC_SRC_WIDTH(reg_width);
758                 ctrlb |=  ATC_DST_ADDR_MODE_INCR
759                         | ATC_SRC_ADDR_MODE_FIXED
760                         | ATC_FC_PER2MEM
761                         | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
762 
763                 reg = sconfig->src_addr;
764                 for_each_sg(sgl, sg, sg_len, i) {
765                         struct at_desc  *desc;
766                         u32             len;
767                         u32             mem;
768 
769                         desc = atc_desc_get(atchan);
770                         if (!desc)
771                                 goto err_desc_get;
772 
773                         mem = sg_dma_address(sg);
774                         len = sg_dma_len(sg);
775                         if (unlikely(!len)) {
776                                 dev_dbg(chan2dev(chan),
777                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
778                                 goto err;
779                         }
780                         mem_width = 2;
781                         if (unlikely(mem & 3 || len & 3))
782                                 mem_width = 0;
783 
784                         desc->lli.saddr = reg;
785                         desc->lli.daddr = mem;
786                         desc->lli.ctrla = ctrla
787                                         | ATC_DST_WIDTH(mem_width)
788                                         | len >> reg_width;
789                         desc->lli.ctrlb = ctrlb;
790 
791                         atc_desc_chain(&first, &prev, desc);
792                         total_len += len;
793                 }
794                 break;
795         default:
796                 return NULL;
797         }
798 
799         /* set end-of-link to the last link descriptor of list*/
800         set_desc_eol(prev);
801 
802         /* First descriptor of the chain embedds additional information */
803         first->txd.cookie = -EBUSY;
804         first->len = total_len;
805         first->tx_width = reg_width;
806 
807         /* first link descriptor of list is responsible of flags */
808         first->txd.flags = flags; /* client is in control of this ack */
809 
810         return &first->txd;
811 
812 err_desc_get:
813         dev_err(chan2dev(chan), "not enough descriptors available\n");
814 err:
815         atc_desc_put(atchan, first);
816         return NULL;
817 }
818 
819 /**
820  * atc_dma_cyclic_check_values
821  * Check for too big/unaligned periods and unaligned DMA buffer
822  */
823 static int
824 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
825                 size_t period_len)
826 {
827         if (period_len > (ATC_BTSIZE_MAX << reg_width))
828                 goto err_out;
829         if (unlikely(period_len & ((1 << reg_width) - 1)))
830                 goto err_out;
831         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
832                 goto err_out;
833 
834         return 0;
835 
836 err_out:
837         return -EINVAL;
838 }
839 
840 /**
841  * atc_dma_cyclic_fill_desc - Fill one period descriptor
842  */
843 static int
844 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
845                 unsigned int period_index, dma_addr_t buf_addr,
846                 unsigned int reg_width, size_t period_len,
847                 enum dma_transfer_direction direction)
848 {
849         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
850         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
851         u32                     ctrla;
852 
853         /* prepare common CRTLA value */
854         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
855                 | ATC_DCSIZE(sconfig->dst_maxburst)
856                 | ATC_DST_WIDTH(reg_width)
857                 | ATC_SRC_WIDTH(reg_width)
858                 | period_len >> reg_width;
859 
860         switch (direction) {
861         case DMA_MEM_TO_DEV:
862                 desc->lli.saddr = buf_addr + (period_len * period_index);
863                 desc->lli.daddr = sconfig->dst_addr;
864                 desc->lli.ctrla = ctrla;
865                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
866                                 | ATC_SRC_ADDR_MODE_INCR
867                                 | ATC_FC_MEM2PER
868                                 | ATC_SIF(atchan->mem_if)
869                                 | ATC_DIF(atchan->per_if);
870                 break;
871 
872         case DMA_DEV_TO_MEM:
873                 desc->lli.saddr = sconfig->src_addr;
874                 desc->lli.daddr = buf_addr + (period_len * period_index);
875                 desc->lli.ctrla = ctrla;
876                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
877                                 | ATC_SRC_ADDR_MODE_FIXED
878                                 | ATC_FC_PER2MEM
879                                 | ATC_SIF(atchan->per_if)
880                                 | ATC_DIF(atchan->mem_if);
881                 break;
882 
883         default:
884                 return -EINVAL;
885         }
886 
887         return 0;
888 }
889 
890 /**
891  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
892  * @chan: the DMA channel to prepare
893  * @buf_addr: physical DMA address where the buffer starts
894  * @buf_len: total number of bytes for the entire buffer
895  * @period_len: number of bytes for each period
896  * @direction: transfer direction, to or from device
897  * @flags: tx descriptor status flags
898  */
899 static struct dma_async_tx_descriptor *
900 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
901                 size_t period_len, enum dma_transfer_direction direction,
902                 unsigned long flags)
903 {
904         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
905         struct at_dma_slave     *atslave = chan->private;
906         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
907         struct at_desc          *first = NULL;
908         struct at_desc          *prev = NULL;
909         unsigned long           was_cyclic;
910         unsigned int            reg_width;
911         unsigned int            periods = buf_len / period_len;
912         unsigned int            i;
913 
914         dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
915                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
916                         buf_addr,
917                         periods, buf_len, period_len);
918 
919         if (unlikely(!atslave || !buf_len || !period_len)) {
920                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
921                 return NULL;
922         }
923 
924         was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
925         if (was_cyclic) {
926                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
927                 return NULL;
928         }
929 
930         if (unlikely(!is_slave_direction(direction)))
931                 goto err_out;
932 
933         if (sconfig->direction == DMA_MEM_TO_DEV)
934                 reg_width = convert_buswidth(sconfig->dst_addr_width);
935         else
936                 reg_width = convert_buswidth(sconfig->src_addr_width);
937 
938         /* Check for too big/unaligned periods and unaligned DMA buffer */
939         if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
940                 goto err_out;
941 
942         /* build cyclic linked list */
943         for (i = 0; i < periods; i++) {
944                 struct at_desc  *desc;
945 
946                 desc = atc_desc_get(atchan);
947                 if (!desc)
948                         goto err_desc_get;
949 
950                 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
951                                              reg_width, period_len, direction))
952                         goto err_desc_get;
953 
954                 atc_desc_chain(&first, &prev, desc);
955         }
956 
957         /* lets make a cyclic list */
958         prev->lli.dscr = first->txd.phys;
959 
960         /* First descriptor of the chain embedds additional information */
961         first->txd.cookie = -EBUSY;
962         first->len = buf_len;
963         first->tx_width = reg_width;
964 
965         return &first->txd;
966 
967 err_desc_get:
968         dev_err(chan2dev(chan), "not enough descriptors available\n");
969         atc_desc_put(atchan, first);
970 err_out:
971         clear_bit(ATC_IS_CYCLIC, &atchan->status);
972         return NULL;
973 }
974 
975 static int set_runtime_config(struct dma_chan *chan,
976                               struct dma_slave_config *sconfig)
977 {
978         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
979 
980         /* Check if it is chan is configured for slave transfers */
981         if (!chan->private)
982                 return -EINVAL;
983 
984         memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
985 
986         convert_burst(&atchan->dma_sconfig.src_maxburst);
987         convert_burst(&atchan->dma_sconfig.dst_maxburst);
988 
989         return 0;
990 }
991 
992 
993 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
994                        unsigned long arg)
995 {
996         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
997         struct at_dma           *atdma = to_at_dma(chan->device);
998         int                     chan_id = atchan->chan_common.chan_id;
999         unsigned long           flags;
1000 
1001         LIST_HEAD(list);
1002 
1003         dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
1004 
1005         if (cmd == DMA_PAUSE) {
1006                 spin_lock_irqsave(&atchan->lock, flags);
1007 
1008                 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1009                 set_bit(ATC_IS_PAUSED, &atchan->status);
1010 
1011                 spin_unlock_irqrestore(&atchan->lock, flags);
1012         } else if (cmd == DMA_RESUME) {
1013                 if (!atc_chan_is_paused(atchan))
1014                         return 0;
1015 
1016                 spin_lock_irqsave(&atchan->lock, flags);
1017 
1018                 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1019                 clear_bit(ATC_IS_PAUSED, &atchan->status);
1020 
1021                 spin_unlock_irqrestore(&atchan->lock, flags);
1022         } else if (cmd == DMA_TERMINATE_ALL) {
1023                 struct at_desc  *desc, *_desc;
1024                 /*
1025                  * This is only called when something went wrong elsewhere, so
1026                  * we don't really care about the data. Just disable the
1027                  * channel. We still have to poll the channel enable bit due
1028                  * to AHB/HSB limitations.
1029                  */
1030                 spin_lock_irqsave(&atchan->lock, flags);
1031 
1032                 /* disabling channel: must also remove suspend state */
1033                 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1034 
1035                 /* confirm that this channel is disabled */
1036                 while (dma_readl(atdma, CHSR) & atchan->mask)
1037                         cpu_relax();
1038 
1039                 /* active_list entries will end up before queued entries */
1040                 list_splice_init(&atchan->queue, &list);
1041                 list_splice_init(&atchan->active_list, &list);
1042 
1043                 /* Flush all pending and queued descriptors */
1044                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1045                         atc_chain_complete(atchan, desc);
1046 
1047                 clear_bit(ATC_IS_PAUSED, &atchan->status);
1048                 /* if channel dedicated to cyclic operations, free it */
1049                 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1050 
1051                 spin_unlock_irqrestore(&atchan->lock, flags);
1052         } else if (cmd == DMA_SLAVE_CONFIG) {
1053                 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1054         } else {
1055                 return -ENXIO;
1056         }
1057 
1058         return 0;
1059 }
1060 
1061 /**
1062  * atc_tx_status - poll for transaction completion
1063  * @chan: DMA channel
1064  * @cookie: transaction identifier to check status of
1065  * @txstate: if not %NULL updated with transaction state
1066  *
1067  * If @txstate is passed in, upon return it reflect the driver
1068  * internal state and can be used with dma_async_is_complete() to check
1069  * the status of multiple cookies without re-checking hardware state.
1070  */
1071 static enum dma_status
1072 atc_tx_status(struct dma_chan *chan,
1073                 dma_cookie_t cookie,
1074                 struct dma_tx_state *txstate)
1075 {
1076         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1077         unsigned long           flags;
1078         enum dma_status         ret;
1079         int bytes = 0;
1080 
1081         ret = dma_cookie_status(chan, cookie, txstate);
1082         if (ret == DMA_COMPLETE)
1083                 return ret;
1084         /*
1085          * There's no point calculating the residue if there's
1086          * no txstate to store the value.
1087          */
1088         if (!txstate)
1089                 return DMA_ERROR;
1090 
1091         spin_lock_irqsave(&atchan->lock, flags);
1092 
1093         /*  Get number of bytes left in the active transactions */
1094         bytes = atc_get_bytes_left(chan);
1095 
1096         spin_unlock_irqrestore(&atchan->lock, flags);
1097 
1098         if (unlikely(bytes < 0)) {
1099                 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1100                 return DMA_ERROR;
1101         } else {
1102                 dma_set_residue(txstate, bytes);
1103         }
1104 
1105         dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1106                  ret, cookie, bytes);
1107 
1108         return ret;
1109 }
1110 
1111 /**
1112  * atc_issue_pending - try to finish work
1113  * @chan: target DMA channel
1114  */
1115 static void atc_issue_pending(struct dma_chan *chan)
1116 {
1117         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1118         unsigned long           flags;
1119 
1120         dev_vdbg(chan2dev(chan), "issue_pending\n");
1121 
1122         /* Not needed for cyclic transfers */
1123         if (atc_chan_is_cyclic(atchan))
1124                 return;
1125 
1126         spin_lock_irqsave(&atchan->lock, flags);
1127         atc_advance_work(atchan);
1128         spin_unlock_irqrestore(&atchan->lock, flags);
1129 }
1130 
1131 /**
1132  * atc_alloc_chan_resources - allocate resources for DMA channel
1133  * @chan: allocate descriptor resources for this channel
1134  * @client: current client requesting the channel be ready for requests
1135  *
1136  * return - the number of allocated descriptors
1137  */
1138 static int atc_alloc_chan_resources(struct dma_chan *chan)
1139 {
1140         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1141         struct at_dma           *atdma = to_at_dma(chan->device);
1142         struct at_desc          *desc;
1143         struct at_dma_slave     *atslave;
1144         unsigned long           flags;
1145         int                     i;
1146         u32                     cfg;
1147         LIST_HEAD(tmp_list);
1148 
1149         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1150 
1151         /* ASSERT:  channel is idle */
1152         if (atc_chan_is_enabled(atchan)) {
1153                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1154                 return -EIO;
1155         }
1156 
1157         cfg = ATC_DEFAULT_CFG;
1158 
1159         atslave = chan->private;
1160         if (atslave) {
1161                 /*
1162                  * We need controller-specific data to set up slave
1163                  * transfers.
1164                  */
1165                 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1166 
1167                 /* if cfg configuration specified take it instead of default */
1168                 if (atslave->cfg)
1169                         cfg = atslave->cfg;
1170         }
1171 
1172         /* have we already been set up?
1173          * reconfigure channel but no need to reallocate descriptors */
1174         if (!list_empty(&atchan->free_list))
1175                 return atchan->descs_allocated;
1176 
1177         /* Allocate initial pool of descriptors */
1178         for (i = 0; i < init_nr_desc_per_channel; i++) {
1179                 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1180                 if (!desc) {
1181                         dev_err(atdma->dma_common.dev,
1182                                 "Only %d initial descriptors\n", i);
1183                         break;
1184                 }
1185                 list_add_tail(&desc->desc_node, &tmp_list);
1186         }
1187 
1188         spin_lock_irqsave(&atchan->lock, flags);
1189         atchan->descs_allocated = i;
1190         atchan->remain_desc = 0;
1191         list_splice(&tmp_list, &atchan->free_list);
1192         dma_cookie_init(chan);
1193         spin_unlock_irqrestore(&atchan->lock, flags);
1194 
1195         /* channel parameters */
1196         channel_writel(atchan, CFG, cfg);
1197 
1198         dev_dbg(chan2dev(chan),
1199                 "alloc_chan_resources: allocated %d descriptors\n",
1200                 atchan->descs_allocated);
1201 
1202         return atchan->descs_allocated;
1203 }
1204 
1205 /**
1206  * atc_free_chan_resources - free all channel resources
1207  * @chan: DMA channel
1208  */
1209 static void atc_free_chan_resources(struct dma_chan *chan)
1210 {
1211         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1212         struct at_dma           *atdma = to_at_dma(chan->device);
1213         struct at_desc          *desc, *_desc;
1214         LIST_HEAD(list);
1215 
1216         dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1217                 atchan->descs_allocated);
1218 
1219         /* ASSERT:  channel is idle */
1220         BUG_ON(!list_empty(&atchan->active_list));
1221         BUG_ON(!list_empty(&atchan->queue));
1222         BUG_ON(atc_chan_is_enabled(atchan));
1223 
1224         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1225                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1226                 list_del(&desc->desc_node);
1227                 /* free link descriptor */
1228                 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1229         }
1230         list_splice_init(&atchan->free_list, &list);
1231         atchan->descs_allocated = 0;
1232         atchan->status = 0;
1233         atchan->remain_desc = 0;
1234 
1235         dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1236 }
1237 
1238 #ifdef CONFIG_OF
1239 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1240 {
1241         struct at_dma_slave *atslave = slave;
1242 
1243         if (atslave->dma_dev == chan->device->dev) {
1244                 chan->private = atslave;
1245                 return true;
1246         } else {
1247                 return false;
1248         }
1249 }
1250 
1251 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1252                                      struct of_dma *of_dma)
1253 {
1254         struct dma_chan *chan;
1255         struct at_dma_chan *atchan;
1256         struct at_dma_slave *atslave;
1257         dma_cap_mask_t mask;
1258         unsigned int per_id;
1259         struct platform_device *dmac_pdev;
1260 
1261         if (dma_spec->args_count != 2)
1262                 return NULL;
1263 
1264         dmac_pdev = of_find_device_by_node(dma_spec->np);
1265 
1266         dma_cap_zero(mask);
1267         dma_cap_set(DMA_SLAVE, mask);
1268 
1269         atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1270         if (!atslave)
1271                 return NULL;
1272 
1273         atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1274         /*
1275          * We can fill both SRC_PER and DST_PER, one of these fields will be
1276          * ignored depending on DMA transfer direction.
1277          */
1278         per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1279         atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1280                      | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1281         /*
1282          * We have to translate the value we get from the device tree since
1283          * the half FIFO configuration value had to be 0 to keep backward
1284          * compatibility.
1285          */
1286         switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1287         case AT91_DMA_CFG_FIFOCFG_ALAP:
1288                 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1289                 break;
1290         case AT91_DMA_CFG_FIFOCFG_ASAP:
1291                 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1292                 break;
1293         case AT91_DMA_CFG_FIFOCFG_HALF:
1294         default:
1295                 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1296         }
1297         atslave->dma_dev = &dmac_pdev->dev;
1298 
1299         chan = dma_request_channel(mask, at_dma_filter, atslave);
1300         if (!chan)
1301                 return NULL;
1302 
1303         atchan = to_at_dma_chan(chan);
1304         atchan->per_if = dma_spec->args[0] & 0xff;
1305         atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1306 
1307         return chan;
1308 }
1309 #else
1310 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1311                                      struct of_dma *of_dma)
1312 {
1313         return NULL;
1314 }
1315 #endif
1316 
1317 /*--  Module Management  -----------------------------------------------*/
1318 
1319 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1320 static struct at_dma_platform_data at91sam9rl_config = {
1321         .nr_channels = 2,
1322 };
1323 static struct at_dma_platform_data at91sam9g45_config = {
1324         .nr_channels = 8,
1325 };
1326 
1327 #if defined(CONFIG_OF)
1328 static const struct of_device_id atmel_dma_dt_ids[] = {
1329         {
1330                 .compatible = "atmel,at91sam9rl-dma",
1331                 .data = &at91sam9rl_config,
1332         }, {
1333                 .compatible = "atmel,at91sam9g45-dma",
1334                 .data = &at91sam9g45_config,
1335         }, {
1336                 /* sentinel */
1337         }
1338 };
1339 
1340 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1341 #endif
1342 
1343 static const struct platform_device_id atdma_devtypes[] = {
1344         {
1345                 .name = "at91sam9rl_dma",
1346                 .driver_data = (unsigned long) &at91sam9rl_config,
1347         }, {
1348                 .name = "at91sam9g45_dma",
1349                 .driver_data = (unsigned long) &at91sam9g45_config,
1350         }, {
1351                 /* sentinel */
1352         }
1353 };
1354 
1355 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1356                                                 struct platform_device *pdev)
1357 {
1358         if (pdev->dev.of_node) {
1359                 const struct of_device_id *match;
1360                 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1361                 if (match == NULL)
1362                         return NULL;
1363                 return match->data;
1364         }
1365         return (struct at_dma_platform_data *)
1366                         platform_get_device_id(pdev)->driver_data;
1367 }
1368 
1369 /**
1370  * at_dma_off - disable DMA controller
1371  * @atdma: the Atmel HDAMC device
1372  */
1373 static void at_dma_off(struct at_dma *atdma)
1374 {
1375         dma_writel(atdma, EN, 0);
1376 
1377         /* disable all interrupts */
1378         dma_writel(atdma, EBCIDR, -1L);
1379 
1380         /* confirm that all channels are disabled */
1381         while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1382                 cpu_relax();
1383 }
1384 
1385 static int __init at_dma_probe(struct platform_device *pdev)
1386 {
1387         struct resource         *io;
1388         struct at_dma           *atdma;
1389         size_t                  size;
1390         int                     irq;
1391         int                     err;
1392         int                     i;
1393         const struct at_dma_platform_data *plat_dat;
1394 
1395         /* setup platform data for each SoC */
1396         dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1397         dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1398         dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1399 
1400         /* get DMA parameters from controller type */
1401         plat_dat = at_dma_get_driver_data(pdev);
1402         if (!plat_dat)
1403                 return -ENODEV;
1404 
1405         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1406         if (!io)
1407                 return -EINVAL;
1408 
1409         irq = platform_get_irq(pdev, 0);
1410         if (irq < 0)
1411                 return irq;
1412 
1413         size = sizeof(struct at_dma);
1414         size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1415         atdma = kzalloc(size, GFP_KERNEL);
1416         if (!atdma)
1417                 return -ENOMEM;
1418 
1419         /* discover transaction capabilities */
1420         atdma->dma_common.cap_mask = plat_dat->cap_mask;
1421         atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1422 
1423         size = resource_size(io);
1424         if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1425                 err = -EBUSY;
1426                 goto err_kfree;
1427         }
1428 
1429         atdma->regs = ioremap(io->start, size);
1430         if (!atdma->regs) {
1431                 err = -ENOMEM;
1432                 goto err_release_r;
1433         }
1434 
1435         atdma->clk = clk_get(&pdev->dev, "dma_clk");
1436         if (IS_ERR(atdma->clk)) {
1437                 err = PTR_ERR(atdma->clk);
1438                 goto err_clk;
1439         }
1440         err = clk_prepare_enable(atdma->clk);
1441         if (err)
1442                 goto err_clk_prepare;
1443 
1444         /* force dma off, just in case */
1445         at_dma_off(atdma);
1446 
1447         err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1448         if (err)
1449                 goto err_irq;
1450 
1451         platform_set_drvdata(pdev, atdma);
1452 
1453         /* create a pool of consistent memory blocks for hardware descriptors */
1454         atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1455                         &pdev->dev, sizeof(struct at_desc),
1456                         4 /* word alignment */, 0);
1457         if (!atdma->dma_desc_pool) {
1458                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1459                 err = -ENOMEM;
1460                 goto err_pool_create;
1461         }
1462 
1463         /* clear any pending interrupt */
1464         while (dma_readl(atdma, EBCISR))
1465                 cpu_relax();
1466 
1467         /* initialize channels related values */
1468         INIT_LIST_HEAD(&atdma->dma_common.channels);
1469         for (i = 0; i < plat_dat->nr_channels; i++) {
1470                 struct at_dma_chan      *atchan = &atdma->chan[i];
1471 
1472                 atchan->mem_if = AT_DMA_MEM_IF;
1473                 atchan->per_if = AT_DMA_PER_IF;
1474                 atchan->chan_common.device = &atdma->dma_common;
1475                 dma_cookie_init(&atchan->chan_common);
1476                 list_add_tail(&atchan->chan_common.device_node,
1477                                 &atdma->dma_common.channels);
1478 
1479                 atchan->ch_regs = atdma->regs + ch_regs(i);
1480                 spin_lock_init(&atchan->lock);
1481                 atchan->mask = 1 << i;
1482 
1483                 INIT_LIST_HEAD(&atchan->active_list);
1484                 INIT_LIST_HEAD(&atchan->queue);
1485                 INIT_LIST_HEAD(&atchan->free_list);
1486 
1487                 tasklet_init(&atchan->tasklet, atc_tasklet,
1488                                 (unsigned long)atchan);
1489                 atc_enable_chan_irq(atdma, i);
1490         }
1491 
1492         /* set base routines */
1493         atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1494         atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1495         atdma->dma_common.device_tx_status = atc_tx_status;
1496         atdma->dma_common.device_issue_pending = atc_issue_pending;
1497         atdma->dma_common.dev = &pdev->dev;
1498 
1499         /* set prep routines based on capability */
1500         if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1501                 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1502 
1503         if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1504                 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1505                 /* controller can do slave DMA: can trigger cyclic transfers */
1506                 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1507                 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1508                 atdma->dma_common.device_control = atc_control;
1509         }
1510 
1511         dma_writel(atdma, EN, AT_DMA_ENABLE);
1512 
1513         dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1514           dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1515           dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1516           plat_dat->nr_channels);
1517 
1518         dma_async_device_register(&atdma->dma_common);
1519 
1520         /*
1521          * Do not return an error if the dmac node is not present in order to
1522          * not break the existing way of requesting channel with
1523          * dma_request_channel().
1524          */
1525         if (pdev->dev.of_node) {
1526                 err = of_dma_controller_register(pdev->dev.of_node,
1527                                                  at_dma_xlate, atdma);
1528                 if (err) {
1529                         dev_err(&pdev->dev, "could not register of_dma_controller\n");
1530                         goto err_of_dma_controller_register;
1531                 }
1532         }
1533 
1534         return 0;
1535 
1536 err_of_dma_controller_register:
1537         dma_async_device_unregister(&atdma->dma_common);
1538         dma_pool_destroy(atdma->dma_desc_pool);
1539 err_pool_create:
1540         free_irq(platform_get_irq(pdev, 0), atdma);
1541 err_irq:
1542         clk_disable_unprepare(atdma->clk);
1543 err_clk_prepare:
1544         clk_put(atdma->clk);
1545 err_clk:
1546         iounmap(atdma->regs);
1547         atdma->regs = NULL;
1548 err_release_r:
1549         release_mem_region(io->start, size);
1550 err_kfree:
1551         kfree(atdma);
1552         return err;
1553 }
1554 
1555 static int at_dma_remove(struct platform_device *pdev)
1556 {
1557         struct at_dma           *atdma = platform_get_drvdata(pdev);
1558         struct dma_chan         *chan, *_chan;
1559         struct resource         *io;
1560 
1561         at_dma_off(atdma);
1562         dma_async_device_unregister(&atdma->dma_common);
1563 
1564         dma_pool_destroy(atdma->dma_desc_pool);
1565         free_irq(platform_get_irq(pdev, 0), atdma);
1566 
1567         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1568                         device_node) {
1569                 struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1570 
1571                 /* Disable interrupts */
1572                 atc_disable_chan_irq(atdma, chan->chan_id);
1573 
1574                 tasklet_kill(&atchan->tasklet);
1575                 list_del(&chan->device_node);
1576         }
1577 
1578         clk_disable_unprepare(atdma->clk);
1579         clk_put(atdma->clk);
1580 
1581         iounmap(atdma->regs);
1582         atdma->regs = NULL;
1583 
1584         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1585         release_mem_region(io->start, resource_size(io));
1586 
1587         kfree(atdma);
1588 
1589         return 0;
1590 }
1591 
1592 static void at_dma_shutdown(struct platform_device *pdev)
1593 {
1594         struct at_dma   *atdma = platform_get_drvdata(pdev);
1595 
1596         at_dma_off(platform_get_drvdata(pdev));
1597         clk_disable_unprepare(atdma->clk);
1598 }
1599 
1600 static int at_dma_prepare(struct device *dev)
1601 {
1602         struct platform_device *pdev = to_platform_device(dev);
1603         struct at_dma *atdma = platform_get_drvdata(pdev);
1604         struct dma_chan *chan, *_chan;
1605 
1606         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1607                         device_node) {
1608                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1609                 /* wait for transaction completion (except in cyclic case) */
1610                 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1611                         return -EAGAIN;
1612         }
1613         return 0;
1614 }
1615 
1616 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1617 {
1618         struct dma_chan *chan = &atchan->chan_common;
1619 
1620         /* Channel should be paused by user
1621          * do it anyway even if it is not done already */
1622         if (!atc_chan_is_paused(atchan)) {
1623                 dev_warn(chan2dev(chan),
1624                 "cyclic channel not paused, should be done by channel user\n");
1625                 atc_control(chan, DMA_PAUSE, 0);
1626         }
1627 
1628         /* now preserve additional data for cyclic operations */
1629         /* next descriptor address in the cyclic list */
1630         atchan->save_dscr = channel_readl(atchan, DSCR);
1631 
1632         vdbg_dump_regs(atchan);
1633 }
1634 
1635 static int at_dma_suspend_noirq(struct device *dev)
1636 {
1637         struct platform_device *pdev = to_platform_device(dev);
1638         struct at_dma *atdma = platform_get_drvdata(pdev);
1639         struct dma_chan *chan, *_chan;
1640 
1641         /* preserve data */
1642         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1643                         device_node) {
1644                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1645 
1646                 if (atc_chan_is_cyclic(atchan))
1647                         atc_suspend_cyclic(atchan);
1648                 atchan->save_cfg = channel_readl(atchan, CFG);
1649         }
1650         atdma->save_imr = dma_readl(atdma, EBCIMR);
1651 
1652         /* disable DMA controller */
1653         at_dma_off(atdma);
1654         clk_disable_unprepare(atdma->clk);
1655         return 0;
1656 }
1657 
1658 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1659 {
1660         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
1661 
1662         /* restore channel status for cyclic descriptors list:
1663          * next descriptor in the cyclic list at the time of suspend */
1664         channel_writel(atchan, SADDR, 0);
1665         channel_writel(atchan, DADDR, 0);
1666         channel_writel(atchan, CTRLA, 0);
1667         channel_writel(atchan, CTRLB, 0);
1668         channel_writel(atchan, DSCR, atchan->save_dscr);
1669         dma_writel(atdma, CHER, atchan->mask);
1670 
1671         /* channel pause status should be removed by channel user
1672          * We cannot take the initiative to do it here */
1673 
1674         vdbg_dump_regs(atchan);
1675 }
1676 
1677 static int at_dma_resume_noirq(struct device *dev)
1678 {
1679         struct platform_device *pdev = to_platform_device(dev);
1680         struct at_dma *atdma = platform_get_drvdata(pdev);
1681         struct dma_chan *chan, *_chan;
1682 
1683         /* bring back DMA controller */
1684         clk_prepare_enable(atdma->clk);
1685         dma_writel(atdma, EN, AT_DMA_ENABLE);
1686 
1687         /* clear any pending interrupt */
1688         while (dma_readl(atdma, EBCISR))
1689                 cpu_relax();
1690 
1691         /* restore saved data */
1692         dma_writel(atdma, EBCIER, atdma->save_imr);
1693         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1694                         device_node) {
1695                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1696 
1697                 channel_writel(atchan, CFG, atchan->save_cfg);
1698                 if (atc_chan_is_cyclic(atchan))
1699                         atc_resume_cyclic(atchan);
1700         }
1701         return 0;
1702 }
1703 
1704 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1705         .prepare = at_dma_prepare,
1706         .suspend_noirq = at_dma_suspend_noirq,
1707         .resume_noirq = at_dma_resume_noirq,
1708 };
1709 
1710 static struct platform_driver at_dma_driver = {
1711         .remove         = at_dma_remove,
1712         .shutdown       = at_dma_shutdown,
1713         .id_table       = atdma_devtypes,
1714         .driver = {
1715                 .name   = "at_hdmac",
1716                 .pm     = &at_dma_dev_pm_ops,
1717                 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1718         },
1719 };
1720 
1721 static int __init at_dma_init(void)
1722 {
1723         return platform_driver_probe(&at_dma_driver, at_dma_probe);
1724 }
1725 subsys_initcall(at_dma_init);
1726 
1727 static void __exit at_dma_exit(void)
1728 {
1729         platform_driver_unregister(&at_dma_driver);
1730 }
1731 module_exit(at_dma_exit);
1732 
1733 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1734 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1735 MODULE_LICENSE("GPL");
1736 MODULE_ALIAS("platform:at_hdmac");
1737 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us