Version:  2.0.40 2.2.26 2.4.37 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1

Linux/drivers/dma/at_hdmac.c

  1 /*
  2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3  *
  4  * Copyright (C) 2008 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License, or
  9  * (at your option) any later version.
 10  *
 11  *
 12  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
 13  * The only Atmel DMA Controller that is not covered by this driver is the one
 14  * found on AT91SAM9263.
 15  */
 16 
 17 #include <dt-bindings/dma/at91.h>
 18 #include <linux/clk.h>
 19 #include <linux/dmaengine.h>
 20 #include <linux/dma-mapping.h>
 21 #include <linux/dmapool.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/module.h>
 24 #include <linux/platform_device.h>
 25 #include <linux/slab.h>
 26 #include <linux/of.h>
 27 #include <linux/of_device.h>
 28 #include <linux/of_dma.h>
 29 
 30 #include "at_hdmac_regs.h"
 31 #include "dmaengine.h"
 32 
 33 /*
 34  * Glossary
 35  * --------
 36  *
 37  * at_hdmac             : Name of the ATmel AHB DMA Controller
 38  * at_dma_ / atdma      : ATmel DMA controller entity related
 39  * atc_ / atchan        : ATmel DMA Channel entity related
 40  */
 41 
 42 #define ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
 43 #define ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
 44                                 |ATC_DIF(AT_DMA_MEM_IF))
 45 #define ATC_DMA_BUSWIDTHS\
 46         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 47         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 48         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 49         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 50 
 51 /*
 52  * Initial number of descriptors to allocate for each channel. This could
 53  * be increased during dma usage.
 54  */
 55 static unsigned int init_nr_desc_per_channel = 64;
 56 module_param(init_nr_desc_per_channel, uint, 0644);
 57 MODULE_PARM_DESC(init_nr_desc_per_channel,
 58                  "initial descriptors per channel (default: 64)");
 59 
 60 
 61 /* prototypes */
 62 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
 63 static void atc_issue_pending(struct dma_chan *chan);
 64 
 65 
 66 /*----------------------------------------------------------------------*/
 67 
 68 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
 69                                                 size_t len)
 70 {
 71         unsigned int width;
 72 
 73         if (!((src | dst  | len) & 3))
 74                 width = 2;
 75         else if (!((src | dst | len) & 1))
 76                 width = 1;
 77         else
 78                 width = 0;
 79 
 80         return width;
 81 }
 82 
 83 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
 84 {
 85         return list_first_entry(&atchan->active_list,
 86                                 struct at_desc, desc_node);
 87 }
 88 
 89 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
 90 {
 91         return list_first_entry(&atchan->queue,
 92                                 struct at_desc, desc_node);
 93 }
 94 
 95 /**
 96  * atc_alloc_descriptor - allocate and return an initialized descriptor
 97  * @chan: the channel to allocate descriptors for
 98  * @gfp_flags: GFP allocation flags
 99  *
100  * Note: The ack-bit is positioned in the descriptor flag at creation time
101  *       to make initial allocation more convenient. This bit will be cleared
102  *       and control will be given to client at usage time (during
103  *       preparation functions).
104  */
105 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
106                                             gfp_t gfp_flags)
107 {
108         struct at_desc  *desc = NULL;
109         struct at_dma   *atdma = to_at_dma(chan->device);
110         dma_addr_t phys;
111 
112         desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
113         if (desc) {
114                 memset(desc, 0, sizeof(struct at_desc));
115                 INIT_LIST_HEAD(&desc->tx_list);
116                 dma_async_tx_descriptor_init(&desc->txd, chan);
117                 /* txd.flags will be overwritten in prep functions */
118                 desc->txd.flags = DMA_CTRL_ACK;
119                 desc->txd.tx_submit = atc_tx_submit;
120                 desc->txd.phys = phys;
121         }
122 
123         return desc;
124 }
125 
126 /**
127  * atc_desc_get - get an unused descriptor from free_list
128  * @atchan: channel we want a new descriptor for
129  */
130 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
131 {
132         struct at_desc *desc, *_desc;
133         struct at_desc *ret = NULL;
134         unsigned long flags;
135         unsigned int i = 0;
136         LIST_HEAD(tmp_list);
137 
138         spin_lock_irqsave(&atchan->lock, flags);
139         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
140                 i++;
141                 if (async_tx_test_ack(&desc->txd)) {
142                         list_del(&desc->desc_node);
143                         ret = desc;
144                         break;
145                 }
146                 dev_dbg(chan2dev(&atchan->chan_common),
147                                 "desc %p not ACKed\n", desc);
148         }
149         spin_unlock_irqrestore(&atchan->lock, flags);
150         dev_vdbg(chan2dev(&atchan->chan_common),
151                 "scanned %u descriptors on freelist\n", i);
152 
153         /* no more descriptor available in initial pool: create one more */
154         if (!ret) {
155                 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
156                 if (ret) {
157                         spin_lock_irqsave(&atchan->lock, flags);
158                         atchan->descs_allocated++;
159                         spin_unlock_irqrestore(&atchan->lock, flags);
160                 } else {
161                         dev_err(chan2dev(&atchan->chan_common),
162                                         "not enough descriptors available\n");
163                 }
164         }
165 
166         return ret;
167 }
168 
169 /**
170  * atc_desc_put - move a descriptor, including any children, to the free list
171  * @atchan: channel we work on
172  * @desc: descriptor, at the head of a chain, to move to free list
173  */
174 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
175 {
176         if (desc) {
177                 struct at_desc *child;
178                 unsigned long flags;
179 
180                 spin_lock_irqsave(&atchan->lock, flags);
181                 list_for_each_entry(child, &desc->tx_list, desc_node)
182                         dev_vdbg(chan2dev(&atchan->chan_common),
183                                         "moving child desc %p to freelist\n",
184                                         child);
185                 list_splice_init(&desc->tx_list, &atchan->free_list);
186                 dev_vdbg(chan2dev(&atchan->chan_common),
187                          "moving desc %p to freelist\n", desc);
188                 list_add(&desc->desc_node, &atchan->free_list);
189                 spin_unlock_irqrestore(&atchan->lock, flags);
190         }
191 }
192 
193 /**
194  * atc_desc_chain - build chain adding a descriptor
195  * @first: address of first descriptor of the chain
196  * @prev: address of previous descriptor of the chain
197  * @desc: descriptor to queue
198  *
199  * Called from prep_* functions
200  */
201 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
202                            struct at_desc *desc)
203 {
204         if (!(*first)) {
205                 *first = desc;
206         } else {
207                 /* inform the HW lli about chaining */
208                 (*prev)->lli.dscr = desc->txd.phys;
209                 /* insert the link descriptor to the LD ring */
210                 list_add_tail(&desc->desc_node,
211                                 &(*first)->tx_list);
212         }
213         *prev = desc;
214 }
215 
216 /**
217  * atc_dostart - starts the DMA engine for real
218  * @atchan: the channel we want to start
219  * @first: first descriptor in the list we want to begin with
220  *
221  * Called with atchan->lock held and bh disabled
222  */
223 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
224 {
225         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
226 
227         /* ASSERT:  channel is idle */
228         if (atc_chan_is_enabled(atchan)) {
229                 dev_err(chan2dev(&atchan->chan_common),
230                         "BUG: Attempted to start non-idle channel\n");
231                 dev_err(chan2dev(&atchan->chan_common),
232                         "  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
233                         channel_readl(atchan, SADDR),
234                         channel_readl(atchan, DADDR),
235                         channel_readl(atchan, CTRLA),
236                         channel_readl(atchan, CTRLB),
237                         channel_readl(atchan, DSCR));
238 
239                 /* The tasklet will hopefully advance the queue... */
240                 return;
241         }
242 
243         vdbg_dump_regs(atchan);
244 
245         channel_writel(atchan, SADDR, 0);
246         channel_writel(atchan, DADDR, 0);
247         channel_writel(atchan, CTRLA, 0);
248         channel_writel(atchan, CTRLB, 0);
249         channel_writel(atchan, DSCR, first->txd.phys);
250         dma_writel(atdma, CHER, atchan->mask);
251 
252         vdbg_dump_regs(atchan);
253 }
254 
255 /*
256  * atc_get_desc_by_cookie - get the descriptor of a cookie
257  * @atchan: the DMA channel
258  * @cookie: the cookie to get the descriptor for
259  */
260 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
261                                                 dma_cookie_t cookie)
262 {
263         struct at_desc *desc, *_desc;
264 
265         list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
266                 if (desc->txd.cookie == cookie)
267                         return desc;
268         }
269 
270         list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
271                 if (desc->txd.cookie == cookie)
272                         return desc;
273         }
274 
275         return NULL;
276 }
277 
278 /**
279  * atc_calc_bytes_left - calculates the number of bytes left according to the
280  * value read from CTRLA.
281  *
282  * @current_len: the number of bytes left before reading CTRLA
283  * @ctrla: the value of CTRLA
284  * @desc: the descriptor containing the transfer width
285  */
286 static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
287                                         struct at_desc *desc)
288 {
289         return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
290 }
291 
292 /**
293  * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
294  * to the current value of CTRLA.
295  *
296  * @current_len: the number of bytes left before reading CTRLA
297  * @atchan: the channel to read CTRLA for
298  * @desc: the descriptor containing the transfer width
299  */
300 static inline int atc_calc_bytes_left_from_reg(int current_len,
301                         struct at_dma_chan *atchan, struct at_desc *desc)
302 {
303         u32 ctrla = channel_readl(atchan, CTRLA);
304 
305         return atc_calc_bytes_left(current_len, ctrla, desc);
306 }
307 
308 /**
309  * atc_get_bytes_left - get the number of bytes residue for a cookie
310  * @chan: DMA channel
311  * @cookie: transaction identifier to check status of
312  */
313 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
314 {
315         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
316         struct at_desc *desc_first = atc_first_active(atchan);
317         struct at_desc *desc;
318         int ret;
319         u32 ctrla, dscr;
320 
321         /*
322          * If the cookie doesn't match to the currently running transfer then
323          * we can return the total length of the associated DMA transfer,
324          * because it is still queued.
325          */
326         desc = atc_get_desc_by_cookie(atchan, cookie);
327         if (desc == NULL)
328                 return -EINVAL;
329         else if (desc != desc_first)
330                 return desc->total_len;
331 
332         /* cookie matches to the currently running transfer */
333         ret = desc_first->total_len;
334 
335         if (desc_first->lli.dscr) {
336                 /* hardware linked list transfer */
337 
338                 /*
339                  * Calculate the residue by removing the length of the child
340                  * descriptors already transferred from the total length.
341                  * To get the current child descriptor we can use the value of
342                  * the channel's DSCR register and compare it against the value
343                  * of the hardware linked list structure of each child
344                  * descriptor.
345                  */
346 
347                 ctrla = channel_readl(atchan, CTRLA);
348                 rmb(); /* ensure CTRLA is read before DSCR */
349                 dscr = channel_readl(atchan, DSCR);
350 
351                 /* for the first descriptor we can be more accurate */
352                 if (desc_first->lli.dscr == dscr)
353                         return atc_calc_bytes_left(ret, ctrla, desc_first);
354 
355                 ret -= desc_first->len;
356                 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
357                         if (desc->lli.dscr == dscr)
358                                 break;
359 
360                         ret -= desc->len;
361                 }
362 
363                 /*
364                  * For the last descriptor in the chain we can calculate
365                  * the remaining bytes using the channel's register.
366                  * Note that the transfer width of the first and last
367                  * descriptor may differ.
368                  */
369                 if (!desc->lli.dscr)
370                         ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
371         } else {
372                 /* single transfer */
373                 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
374         }
375 
376         return ret;
377 }
378 
379 /**
380  * atc_chain_complete - finish work for one transaction chain
381  * @atchan: channel we work on
382  * @desc: descriptor at the head of the chain we want do complete
383  *
384  * Called with atchan->lock held and bh disabled */
385 static void
386 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
387 {
388         struct dma_async_tx_descriptor  *txd = &desc->txd;
389 
390         dev_vdbg(chan2dev(&atchan->chan_common),
391                 "descriptor %u complete\n", txd->cookie);
392 
393         /* mark the descriptor as complete for non cyclic cases only */
394         if (!atc_chan_is_cyclic(atchan))
395                 dma_cookie_complete(txd);
396 
397         /* move children to free_list */
398         list_splice_init(&desc->tx_list, &atchan->free_list);
399         /* move myself to free_list */
400         list_move(&desc->desc_node, &atchan->free_list);
401 
402         dma_descriptor_unmap(txd);
403         /* for cyclic transfers,
404          * no need to replay callback function while stopping */
405         if (!atc_chan_is_cyclic(atchan)) {
406                 dma_async_tx_callback   callback = txd->callback;
407                 void                    *param = txd->callback_param;
408 
409                 /*
410                  * The API requires that no submissions are done from a
411                  * callback, so we don't need to drop the lock here
412                  */
413                 if (callback)
414                         callback(param);
415         }
416 
417         dma_run_dependencies(txd);
418 }
419 
420 /**
421  * atc_complete_all - finish work for all transactions
422  * @atchan: channel to complete transactions for
423  *
424  * Eventually submit queued descriptors if any
425  *
426  * Assume channel is idle while calling this function
427  * Called with atchan->lock held and bh disabled
428  */
429 static void atc_complete_all(struct at_dma_chan *atchan)
430 {
431         struct at_desc *desc, *_desc;
432         LIST_HEAD(list);
433 
434         dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
435 
436         /*
437          * Submit queued descriptors ASAP, i.e. before we go through
438          * the completed ones.
439          */
440         if (!list_empty(&atchan->queue))
441                 atc_dostart(atchan, atc_first_queued(atchan));
442         /* empty active_list now it is completed */
443         list_splice_init(&atchan->active_list, &list);
444         /* empty queue list by moving descriptors (if any) to active_list */
445         list_splice_init(&atchan->queue, &atchan->active_list);
446 
447         list_for_each_entry_safe(desc, _desc, &list, desc_node)
448                 atc_chain_complete(atchan, desc);
449 }
450 
451 /**
452  * atc_advance_work - at the end of a transaction, move forward
453  * @atchan: channel where the transaction ended
454  *
455  * Called with atchan->lock held and bh disabled
456  */
457 static void atc_advance_work(struct at_dma_chan *atchan)
458 {
459         dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
460 
461         if (atc_chan_is_enabled(atchan))
462                 return;
463 
464         if (list_empty(&atchan->active_list) ||
465             list_is_singular(&atchan->active_list)) {
466                 atc_complete_all(atchan);
467         } else {
468                 atc_chain_complete(atchan, atc_first_active(atchan));
469                 /* advance work */
470                 atc_dostart(atchan, atc_first_active(atchan));
471         }
472 }
473 
474 
475 /**
476  * atc_handle_error - handle errors reported by DMA controller
477  * @atchan: channel where error occurs
478  *
479  * Called with atchan->lock held and bh disabled
480  */
481 static void atc_handle_error(struct at_dma_chan *atchan)
482 {
483         struct at_desc *bad_desc;
484         struct at_desc *child;
485 
486         /*
487          * The descriptor currently at the head of the active list is
488          * broked. Since we don't have any way to report errors, we'll
489          * just have to scream loudly and try to carry on.
490          */
491         bad_desc = atc_first_active(atchan);
492         list_del_init(&bad_desc->desc_node);
493 
494         /* As we are stopped, take advantage to push queued descriptors
495          * in active_list */
496         list_splice_init(&atchan->queue, atchan->active_list.prev);
497 
498         /* Try to restart the controller */
499         if (!list_empty(&atchan->active_list))
500                 atc_dostart(atchan, atc_first_active(atchan));
501 
502         /*
503          * KERN_CRITICAL may seem harsh, but since this only happens
504          * when someone submits a bad physical address in a
505          * descriptor, we should consider ourselves lucky that the
506          * controller flagged an error instead of scribbling over
507          * random memory locations.
508          */
509         dev_crit(chan2dev(&atchan->chan_common),
510                         "Bad descriptor submitted for DMA!\n");
511         dev_crit(chan2dev(&atchan->chan_common),
512                         "  cookie: %d\n", bad_desc->txd.cookie);
513         atc_dump_lli(atchan, &bad_desc->lli);
514         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
515                 atc_dump_lli(atchan, &child->lli);
516 
517         /* Pretend the descriptor completed successfully */
518         atc_chain_complete(atchan, bad_desc);
519 }
520 
521 /**
522  * atc_handle_cyclic - at the end of a period, run callback function
523  * @atchan: channel used for cyclic operations
524  *
525  * Called with atchan->lock held and bh disabled
526  */
527 static void atc_handle_cyclic(struct at_dma_chan *atchan)
528 {
529         struct at_desc                  *first = atc_first_active(atchan);
530         struct dma_async_tx_descriptor  *txd = &first->txd;
531         dma_async_tx_callback           callback = txd->callback;
532         void                            *param = txd->callback_param;
533 
534         dev_vdbg(chan2dev(&atchan->chan_common),
535                         "new cyclic period llp 0x%08x\n",
536                         channel_readl(atchan, DSCR));
537 
538         if (callback)
539                 callback(param);
540 }
541 
542 /*--  IRQ & Tasklet  ---------------------------------------------------*/
543 
544 static void atc_tasklet(unsigned long data)
545 {
546         struct at_dma_chan *atchan = (struct at_dma_chan *)data;
547         unsigned long flags;
548 
549         spin_lock_irqsave(&atchan->lock, flags);
550         if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
551                 atc_handle_error(atchan);
552         else if (atc_chan_is_cyclic(atchan))
553                 atc_handle_cyclic(atchan);
554         else
555                 atc_advance_work(atchan);
556 
557         spin_unlock_irqrestore(&atchan->lock, flags);
558 }
559 
560 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
561 {
562         struct at_dma           *atdma = (struct at_dma *)dev_id;
563         struct at_dma_chan      *atchan;
564         int                     i;
565         u32                     status, pending, imr;
566         int                     ret = IRQ_NONE;
567 
568         do {
569                 imr = dma_readl(atdma, EBCIMR);
570                 status = dma_readl(atdma, EBCISR);
571                 pending = status & imr;
572 
573                 if (!pending)
574                         break;
575 
576                 dev_vdbg(atdma->dma_common.dev,
577                         "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
578                          status, imr, pending);
579 
580                 for (i = 0; i < atdma->dma_common.chancnt; i++) {
581                         atchan = &atdma->chan[i];
582                         if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
583                                 if (pending & AT_DMA_ERR(i)) {
584                                         /* Disable channel on AHB error */
585                                         dma_writel(atdma, CHDR,
586                                                 AT_DMA_RES(i) | atchan->mask);
587                                         /* Give information to tasklet */
588                                         set_bit(ATC_IS_ERROR, &atchan->status);
589                                 }
590                                 tasklet_schedule(&atchan->tasklet);
591                                 ret = IRQ_HANDLED;
592                         }
593                 }
594 
595         } while (pending);
596 
597         return ret;
598 }
599 
600 
601 /*--  DMA Engine API  --------------------------------------------------*/
602 
603 /**
604  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
605  * @desc: descriptor at the head of the transaction chain
606  *
607  * Queue chain if DMA engine is working already
608  *
609  * Cookie increment and adding to active_list or queue must be atomic
610  */
611 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
612 {
613         struct at_desc          *desc = txd_to_at_desc(tx);
614         struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
615         dma_cookie_t            cookie;
616         unsigned long           flags;
617 
618         spin_lock_irqsave(&atchan->lock, flags);
619         cookie = dma_cookie_assign(tx);
620 
621         if (list_empty(&atchan->active_list)) {
622                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
623                                 desc->txd.cookie);
624                 atc_dostart(atchan, desc);
625                 list_add_tail(&desc->desc_node, &atchan->active_list);
626         } else {
627                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
628                                 desc->txd.cookie);
629                 list_add_tail(&desc->desc_node, &atchan->queue);
630         }
631 
632         spin_unlock_irqrestore(&atchan->lock, flags);
633 
634         return cookie;
635 }
636 
637 /**
638  * atc_prep_dma_memcpy - prepare a memcpy operation
639  * @chan: the channel to prepare operation on
640  * @dest: operation virtual destination address
641  * @src: operation virtual source address
642  * @len: operation length
643  * @flags: tx descriptor status flags
644  */
645 static struct dma_async_tx_descriptor *
646 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
647                 size_t len, unsigned long flags)
648 {
649         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
650         struct at_desc          *desc = NULL;
651         struct at_desc          *first = NULL;
652         struct at_desc          *prev = NULL;
653         size_t                  xfer_count;
654         size_t                  offset;
655         unsigned int            src_width;
656         unsigned int            dst_width;
657         u32                     ctrla;
658         u32                     ctrlb;
659 
660         dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
661                         dest, src, len, flags);
662 
663         if (unlikely(!len)) {
664                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
665                 return NULL;
666         }
667 
668         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
669                 | ATC_SRC_ADDR_MODE_INCR
670                 | ATC_DST_ADDR_MODE_INCR
671                 | ATC_FC_MEM2MEM;
672 
673         /*
674          * We can be a lot more clever here, but this should take care
675          * of the most common optimization.
676          */
677         src_width = dst_width = atc_get_xfer_width(src, dest, len);
678 
679         ctrla = ATC_SRC_WIDTH(src_width) |
680                 ATC_DST_WIDTH(dst_width);
681 
682         for (offset = 0; offset < len; offset += xfer_count << src_width) {
683                 xfer_count = min_t(size_t, (len - offset) >> src_width,
684                                 ATC_BTSIZE_MAX);
685 
686                 desc = atc_desc_get(atchan);
687                 if (!desc)
688                         goto err_desc_get;
689 
690                 desc->lli.saddr = src + offset;
691                 desc->lli.daddr = dest + offset;
692                 desc->lli.ctrla = ctrla | xfer_count;
693                 desc->lli.ctrlb = ctrlb;
694 
695                 desc->txd.cookie = 0;
696                 desc->len = xfer_count << src_width;
697 
698                 atc_desc_chain(&first, &prev, desc);
699         }
700 
701         /* First descriptor of the chain embedds additional information */
702         first->txd.cookie = -EBUSY;
703         first->total_len = len;
704 
705         /* set transfer width for the calculation of the residue */
706         first->tx_width = src_width;
707         prev->tx_width = src_width;
708 
709         /* set end-of-link to the last link descriptor of list*/
710         set_desc_eol(desc);
711 
712         first->txd.flags = flags; /* client is in control of this ack */
713 
714         return &first->txd;
715 
716 err_desc_get:
717         atc_desc_put(atchan, first);
718         return NULL;
719 }
720 
721 
722 /**
723  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
724  * @chan: DMA channel
725  * @sgl: scatterlist to transfer to/from
726  * @sg_len: number of entries in @scatterlist
727  * @direction: DMA direction
728  * @flags: tx descriptor status flags
729  * @context: transaction context (ignored)
730  */
731 static struct dma_async_tx_descriptor *
732 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
733                 unsigned int sg_len, enum dma_transfer_direction direction,
734                 unsigned long flags, void *context)
735 {
736         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
737         struct at_dma_slave     *atslave = chan->private;
738         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
739         struct at_desc          *first = NULL;
740         struct at_desc          *prev = NULL;
741         u32                     ctrla;
742         u32                     ctrlb;
743         dma_addr_t              reg;
744         unsigned int            reg_width;
745         unsigned int            mem_width;
746         unsigned int            i;
747         struct scatterlist      *sg;
748         size_t                  total_len = 0;
749 
750         dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
751                         sg_len,
752                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
753                         flags);
754 
755         if (unlikely(!atslave || !sg_len)) {
756                 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
757                 return NULL;
758         }
759 
760         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
761                 | ATC_DCSIZE(sconfig->dst_maxburst);
762         ctrlb = ATC_IEN;
763 
764         switch (direction) {
765         case DMA_MEM_TO_DEV:
766                 reg_width = convert_buswidth(sconfig->dst_addr_width);
767                 ctrla |=  ATC_DST_WIDTH(reg_width);
768                 ctrlb |=  ATC_DST_ADDR_MODE_FIXED
769                         | ATC_SRC_ADDR_MODE_INCR
770                         | ATC_FC_MEM2PER
771                         | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
772                 reg = sconfig->dst_addr;
773                 for_each_sg(sgl, sg, sg_len, i) {
774                         struct at_desc  *desc;
775                         u32             len;
776                         u32             mem;
777 
778                         desc = atc_desc_get(atchan);
779                         if (!desc)
780                                 goto err_desc_get;
781 
782                         mem = sg_dma_address(sg);
783                         len = sg_dma_len(sg);
784                         if (unlikely(!len)) {
785                                 dev_dbg(chan2dev(chan),
786                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
787                                 goto err;
788                         }
789                         mem_width = 2;
790                         if (unlikely(mem & 3 || len & 3))
791                                 mem_width = 0;
792 
793                         desc->lli.saddr = mem;
794                         desc->lli.daddr = reg;
795                         desc->lli.ctrla = ctrla
796                                         | ATC_SRC_WIDTH(mem_width)
797                                         | len >> mem_width;
798                         desc->lli.ctrlb = ctrlb;
799                         desc->len = len;
800 
801                         atc_desc_chain(&first, &prev, desc);
802                         total_len += len;
803                 }
804                 break;
805         case DMA_DEV_TO_MEM:
806                 reg_width = convert_buswidth(sconfig->src_addr_width);
807                 ctrla |=  ATC_SRC_WIDTH(reg_width);
808                 ctrlb |=  ATC_DST_ADDR_MODE_INCR
809                         | ATC_SRC_ADDR_MODE_FIXED
810                         | ATC_FC_PER2MEM
811                         | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
812 
813                 reg = sconfig->src_addr;
814                 for_each_sg(sgl, sg, sg_len, i) {
815                         struct at_desc  *desc;
816                         u32             len;
817                         u32             mem;
818 
819                         desc = atc_desc_get(atchan);
820                         if (!desc)
821                                 goto err_desc_get;
822 
823                         mem = sg_dma_address(sg);
824                         len = sg_dma_len(sg);
825                         if (unlikely(!len)) {
826                                 dev_dbg(chan2dev(chan),
827                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
828                                 goto err;
829                         }
830                         mem_width = 2;
831                         if (unlikely(mem & 3 || len & 3))
832                                 mem_width = 0;
833 
834                         desc->lli.saddr = reg;
835                         desc->lli.daddr = mem;
836                         desc->lli.ctrla = ctrla
837                                         | ATC_DST_WIDTH(mem_width)
838                                         | len >> reg_width;
839                         desc->lli.ctrlb = ctrlb;
840                         desc->len = len;
841 
842                         atc_desc_chain(&first, &prev, desc);
843                         total_len += len;
844                 }
845                 break;
846         default:
847                 return NULL;
848         }
849 
850         /* set end-of-link to the last link descriptor of list*/
851         set_desc_eol(prev);
852 
853         /* First descriptor of the chain embedds additional information */
854         first->txd.cookie = -EBUSY;
855         first->total_len = total_len;
856 
857         /* set transfer width for the calculation of the residue */
858         first->tx_width = reg_width;
859         prev->tx_width = reg_width;
860 
861         /* first link descriptor of list is responsible of flags */
862         first->txd.flags = flags; /* client is in control of this ack */
863 
864         return &first->txd;
865 
866 err_desc_get:
867         dev_err(chan2dev(chan), "not enough descriptors available\n");
868 err:
869         atc_desc_put(atchan, first);
870         return NULL;
871 }
872 
873 /**
874  * atc_prep_dma_sg - prepare memory to memory scather-gather operation
875  * @chan: the channel to prepare operation on
876  * @dst_sg: destination scatterlist
877  * @dst_nents: number of destination scatterlist entries
878  * @src_sg: source scatterlist
879  * @src_nents: number of source scatterlist entries
880  * @flags: tx descriptor status flags
881  */
882 static struct dma_async_tx_descriptor *
883 atc_prep_dma_sg(struct dma_chan *chan,
884                 struct scatterlist *dst_sg, unsigned int dst_nents,
885                 struct scatterlist *src_sg, unsigned int src_nents,
886                 unsigned long flags)
887 {
888         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
889         struct at_desc          *desc = NULL;
890         struct at_desc          *first = NULL;
891         struct at_desc          *prev = NULL;
892         unsigned int            src_width;
893         unsigned int            dst_width;
894         size_t                  xfer_count;
895         u32                     ctrla;
896         u32                     ctrlb;
897         size_t                  dst_len = 0, src_len = 0;
898         dma_addr_t              dst = 0, src = 0;
899         size_t                  len = 0, total_len = 0;
900 
901         if (unlikely(dst_nents == 0 || src_nents == 0))
902                 return NULL;
903 
904         if (unlikely(dst_sg == NULL || src_sg == NULL))
905                 return NULL;
906 
907         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
908                 | ATC_SRC_ADDR_MODE_INCR
909                 | ATC_DST_ADDR_MODE_INCR
910                 | ATC_FC_MEM2MEM;
911 
912         /*
913          * loop until there is either no more source or no more destination
914          * scatterlist entry
915          */
916         while (true) {
917 
918                 /* prepare the next transfer */
919                 if (dst_len == 0) {
920 
921                         /* no more destination scatterlist entries */
922                         if (!dst_sg || !dst_nents)
923                                 break;
924 
925                         dst = sg_dma_address(dst_sg);
926                         dst_len = sg_dma_len(dst_sg);
927 
928                         dst_sg = sg_next(dst_sg);
929                         dst_nents--;
930                 }
931 
932                 if (src_len == 0) {
933 
934                         /* no more source scatterlist entries */
935                         if (!src_sg || !src_nents)
936                                 break;
937 
938                         src = sg_dma_address(src_sg);
939                         src_len = sg_dma_len(src_sg);
940 
941                         src_sg = sg_next(src_sg);
942                         src_nents--;
943                 }
944 
945                 len = min_t(size_t, src_len, dst_len);
946                 if (len == 0)
947                         continue;
948 
949                 /* take care for the alignment */
950                 src_width = dst_width = atc_get_xfer_width(src, dst, len);
951 
952                 ctrla = ATC_SRC_WIDTH(src_width) |
953                         ATC_DST_WIDTH(dst_width);
954 
955                 /*
956                  * The number of transfers to set up refer to the source width
957                  * that depends on the alignment.
958                  */
959                 xfer_count = len >> src_width;
960                 if (xfer_count > ATC_BTSIZE_MAX) {
961                         xfer_count = ATC_BTSIZE_MAX;
962                         len = ATC_BTSIZE_MAX << src_width;
963                 }
964 
965                 /* create the transfer */
966                 desc = atc_desc_get(atchan);
967                 if (!desc)
968                         goto err_desc_get;
969 
970                 desc->lli.saddr = src;
971                 desc->lli.daddr = dst;
972                 desc->lli.ctrla = ctrla | xfer_count;
973                 desc->lli.ctrlb = ctrlb;
974 
975                 desc->txd.cookie = 0;
976                 desc->len = len;
977 
978                 /*
979                  * Although we only need the transfer width for the first and
980                  * the last descriptor, its easier to set it to all descriptors.
981                  */
982                 desc->tx_width = src_width;
983 
984                 atc_desc_chain(&first, &prev, desc);
985 
986                 /* update the lengths and addresses for the next loop cycle */
987                 dst_len -= len;
988                 src_len -= len;
989                 dst += len;
990                 src += len;
991 
992                 total_len += len;
993         }
994 
995         /* First descriptor of the chain embedds additional information */
996         first->txd.cookie = -EBUSY;
997         first->total_len = total_len;
998 
999         /* set end-of-link to the last link descriptor of list*/
1000         set_desc_eol(desc);
1001 
1002         first->txd.flags = flags; /* client is in control of this ack */
1003 
1004         return &first->txd;
1005 
1006 err_desc_get:
1007         atc_desc_put(atchan, first);
1008         return NULL;
1009 }
1010 
1011 /**
1012  * atc_dma_cyclic_check_values
1013  * Check for too big/unaligned periods and unaligned DMA buffer
1014  */
1015 static int
1016 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1017                 size_t period_len)
1018 {
1019         if (period_len > (ATC_BTSIZE_MAX << reg_width))
1020                 goto err_out;
1021         if (unlikely(period_len & ((1 << reg_width) - 1)))
1022                 goto err_out;
1023         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1024                 goto err_out;
1025 
1026         return 0;
1027 
1028 err_out:
1029         return -EINVAL;
1030 }
1031 
1032 /**
1033  * atc_dma_cyclic_fill_desc - Fill one period descriptor
1034  */
1035 static int
1036 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1037                 unsigned int period_index, dma_addr_t buf_addr,
1038                 unsigned int reg_width, size_t period_len,
1039                 enum dma_transfer_direction direction)
1040 {
1041         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1042         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1043         u32                     ctrla;
1044 
1045         /* prepare common CRTLA value */
1046         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1047                 | ATC_DCSIZE(sconfig->dst_maxburst)
1048                 | ATC_DST_WIDTH(reg_width)
1049                 | ATC_SRC_WIDTH(reg_width)
1050                 | period_len >> reg_width;
1051 
1052         switch (direction) {
1053         case DMA_MEM_TO_DEV:
1054                 desc->lli.saddr = buf_addr + (period_len * period_index);
1055                 desc->lli.daddr = sconfig->dst_addr;
1056                 desc->lli.ctrla = ctrla;
1057                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1058                                 | ATC_SRC_ADDR_MODE_INCR
1059                                 | ATC_FC_MEM2PER
1060                                 | ATC_SIF(atchan->mem_if)
1061                                 | ATC_DIF(atchan->per_if);
1062                 desc->len = period_len;
1063                 break;
1064 
1065         case DMA_DEV_TO_MEM:
1066                 desc->lli.saddr = sconfig->src_addr;
1067                 desc->lli.daddr = buf_addr + (period_len * period_index);
1068                 desc->lli.ctrla = ctrla;
1069                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1070                                 | ATC_SRC_ADDR_MODE_FIXED
1071                                 | ATC_FC_PER2MEM
1072                                 | ATC_SIF(atchan->per_if)
1073                                 | ATC_DIF(atchan->mem_if);
1074                 desc->len = period_len;
1075                 break;
1076 
1077         default:
1078                 return -EINVAL;
1079         }
1080 
1081         return 0;
1082 }
1083 
1084 /**
1085  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1086  * @chan: the DMA channel to prepare
1087  * @buf_addr: physical DMA address where the buffer starts
1088  * @buf_len: total number of bytes for the entire buffer
1089  * @period_len: number of bytes for each period
1090  * @direction: transfer direction, to or from device
1091  * @flags: tx descriptor status flags
1092  */
1093 static struct dma_async_tx_descriptor *
1094 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1095                 size_t period_len, enum dma_transfer_direction direction,
1096                 unsigned long flags)
1097 {
1098         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1099         struct at_dma_slave     *atslave = chan->private;
1100         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1101         struct at_desc          *first = NULL;
1102         struct at_desc          *prev = NULL;
1103         unsigned long           was_cyclic;
1104         unsigned int            reg_width;
1105         unsigned int            periods = buf_len / period_len;
1106         unsigned int            i;
1107 
1108         dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
1109                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1110                         buf_addr,
1111                         periods, buf_len, period_len);
1112 
1113         if (unlikely(!atslave || !buf_len || !period_len)) {
1114                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1115                 return NULL;
1116         }
1117 
1118         was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1119         if (was_cyclic) {
1120                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1121                 return NULL;
1122         }
1123 
1124         if (unlikely(!is_slave_direction(direction)))
1125                 goto err_out;
1126 
1127         if (sconfig->direction == DMA_MEM_TO_DEV)
1128                 reg_width = convert_buswidth(sconfig->dst_addr_width);
1129         else
1130                 reg_width = convert_buswidth(sconfig->src_addr_width);
1131 
1132         /* Check for too big/unaligned periods and unaligned DMA buffer */
1133         if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1134                 goto err_out;
1135 
1136         /* build cyclic linked list */
1137         for (i = 0; i < periods; i++) {
1138                 struct at_desc  *desc;
1139 
1140                 desc = atc_desc_get(atchan);
1141                 if (!desc)
1142                         goto err_desc_get;
1143 
1144                 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1145                                              reg_width, period_len, direction))
1146                         goto err_desc_get;
1147 
1148                 atc_desc_chain(&first, &prev, desc);
1149         }
1150 
1151         /* lets make a cyclic list */
1152         prev->lli.dscr = first->txd.phys;
1153 
1154         /* First descriptor of the chain embedds additional information */
1155         first->txd.cookie = -EBUSY;
1156         first->total_len = buf_len;
1157         first->tx_width = reg_width;
1158 
1159         return &first->txd;
1160 
1161 err_desc_get:
1162         dev_err(chan2dev(chan), "not enough descriptors available\n");
1163         atc_desc_put(atchan, first);
1164 err_out:
1165         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1166         return NULL;
1167 }
1168 
1169 static int atc_config(struct dma_chan *chan,
1170                       struct dma_slave_config *sconfig)
1171 {
1172         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1173 
1174         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1175 
1176         /* Check if it is chan is configured for slave transfers */
1177         if (!chan->private)
1178                 return -EINVAL;
1179 
1180         memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1181 
1182         convert_burst(&atchan->dma_sconfig.src_maxburst);
1183         convert_burst(&atchan->dma_sconfig.dst_maxburst);
1184 
1185         return 0;
1186 }
1187 
1188 static int atc_pause(struct dma_chan *chan)
1189 {
1190         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1191         struct at_dma           *atdma = to_at_dma(chan->device);
1192         int                     chan_id = atchan->chan_common.chan_id;
1193         unsigned long           flags;
1194 
1195         LIST_HEAD(list);
1196 
1197         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1198 
1199         spin_lock_irqsave(&atchan->lock, flags);
1200 
1201         dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1202         set_bit(ATC_IS_PAUSED, &atchan->status);
1203 
1204         spin_unlock_irqrestore(&atchan->lock, flags);
1205 
1206         return 0;
1207 }
1208 
1209 static int atc_resume(struct dma_chan *chan)
1210 {
1211         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1212         struct at_dma           *atdma = to_at_dma(chan->device);
1213         int                     chan_id = atchan->chan_common.chan_id;
1214         unsigned long           flags;
1215 
1216         LIST_HEAD(list);
1217 
1218         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1219 
1220         if (!atc_chan_is_paused(atchan))
1221                 return 0;
1222 
1223         spin_lock_irqsave(&atchan->lock, flags);
1224 
1225         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1226         clear_bit(ATC_IS_PAUSED, &atchan->status);
1227 
1228         spin_unlock_irqrestore(&atchan->lock, flags);
1229 
1230         return 0;
1231 }
1232 
1233 static int atc_terminate_all(struct dma_chan *chan)
1234 {
1235         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1236         struct at_dma           *atdma = to_at_dma(chan->device);
1237         int                     chan_id = atchan->chan_common.chan_id;
1238         struct at_desc          *desc, *_desc;
1239         unsigned long           flags;
1240 
1241         LIST_HEAD(list);
1242 
1243         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1244 
1245         /*
1246          * This is only called when something went wrong elsewhere, so
1247          * we don't really care about the data. Just disable the
1248          * channel. We still have to poll the channel enable bit due
1249          * to AHB/HSB limitations.
1250          */
1251         spin_lock_irqsave(&atchan->lock, flags);
1252 
1253         /* disabling channel: must also remove suspend state */
1254         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1255 
1256         /* confirm that this channel is disabled */
1257         while (dma_readl(atdma, CHSR) & atchan->mask)
1258                 cpu_relax();
1259 
1260         /* active_list entries will end up before queued entries */
1261         list_splice_init(&atchan->queue, &list);
1262         list_splice_init(&atchan->active_list, &list);
1263 
1264         /* Flush all pending and queued descriptors */
1265         list_for_each_entry_safe(desc, _desc, &list, desc_node)
1266                 atc_chain_complete(atchan, desc);
1267 
1268         clear_bit(ATC_IS_PAUSED, &atchan->status);
1269         /* if channel dedicated to cyclic operations, free it */
1270         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1271 
1272         spin_unlock_irqrestore(&atchan->lock, flags);
1273 
1274         return 0;
1275 }
1276 
1277 /**
1278  * atc_tx_status - poll for transaction completion
1279  * @chan: DMA channel
1280  * @cookie: transaction identifier to check status of
1281  * @txstate: if not %NULL updated with transaction state
1282  *
1283  * If @txstate is passed in, upon return it reflect the driver
1284  * internal state and can be used with dma_async_is_complete() to check
1285  * the status of multiple cookies without re-checking hardware state.
1286  */
1287 static enum dma_status
1288 atc_tx_status(struct dma_chan *chan,
1289                 dma_cookie_t cookie,
1290                 struct dma_tx_state *txstate)
1291 {
1292         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1293         unsigned long           flags;
1294         enum dma_status         ret;
1295         int bytes = 0;
1296 
1297         ret = dma_cookie_status(chan, cookie, txstate);
1298         if (ret == DMA_COMPLETE)
1299                 return ret;
1300         /*
1301          * There's no point calculating the residue if there's
1302          * no txstate to store the value.
1303          */
1304         if (!txstate)
1305                 return DMA_ERROR;
1306 
1307         spin_lock_irqsave(&atchan->lock, flags);
1308 
1309         /*  Get number of bytes left in the active transactions */
1310         bytes = atc_get_bytes_left(chan, cookie);
1311 
1312         spin_unlock_irqrestore(&atchan->lock, flags);
1313 
1314         if (unlikely(bytes < 0)) {
1315                 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1316                 return DMA_ERROR;
1317         } else {
1318                 dma_set_residue(txstate, bytes);
1319         }
1320 
1321         dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1322                  ret, cookie, bytes);
1323 
1324         return ret;
1325 }
1326 
1327 /**
1328  * atc_issue_pending - try to finish work
1329  * @chan: target DMA channel
1330  */
1331 static void atc_issue_pending(struct dma_chan *chan)
1332 {
1333         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1334         unsigned long           flags;
1335 
1336         dev_vdbg(chan2dev(chan), "issue_pending\n");
1337 
1338         /* Not needed for cyclic transfers */
1339         if (atc_chan_is_cyclic(atchan))
1340                 return;
1341 
1342         spin_lock_irqsave(&atchan->lock, flags);
1343         atc_advance_work(atchan);
1344         spin_unlock_irqrestore(&atchan->lock, flags);
1345 }
1346 
1347 /**
1348  * atc_alloc_chan_resources - allocate resources for DMA channel
1349  * @chan: allocate descriptor resources for this channel
1350  * @client: current client requesting the channel be ready for requests
1351  *
1352  * return - the number of allocated descriptors
1353  */
1354 static int atc_alloc_chan_resources(struct dma_chan *chan)
1355 {
1356         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1357         struct at_dma           *atdma = to_at_dma(chan->device);
1358         struct at_desc          *desc;
1359         struct at_dma_slave     *atslave;
1360         unsigned long           flags;
1361         int                     i;
1362         u32                     cfg;
1363         LIST_HEAD(tmp_list);
1364 
1365         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1366 
1367         /* ASSERT:  channel is idle */
1368         if (atc_chan_is_enabled(atchan)) {
1369                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1370                 return -EIO;
1371         }
1372 
1373         cfg = ATC_DEFAULT_CFG;
1374 
1375         atslave = chan->private;
1376         if (atslave) {
1377                 /*
1378                  * We need controller-specific data to set up slave
1379                  * transfers.
1380                  */
1381                 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1382 
1383                 /* if cfg configuration specified take it instead of default */
1384                 if (atslave->cfg)
1385                         cfg = atslave->cfg;
1386         }
1387 
1388         /* have we already been set up?
1389          * reconfigure channel but no need to reallocate descriptors */
1390         if (!list_empty(&atchan->free_list))
1391                 return atchan->descs_allocated;
1392 
1393         /* Allocate initial pool of descriptors */
1394         for (i = 0; i < init_nr_desc_per_channel; i++) {
1395                 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1396                 if (!desc) {
1397                         dev_err(atdma->dma_common.dev,
1398                                 "Only %d initial descriptors\n", i);
1399                         break;
1400                 }
1401                 list_add_tail(&desc->desc_node, &tmp_list);
1402         }
1403 
1404         spin_lock_irqsave(&atchan->lock, flags);
1405         atchan->descs_allocated = i;
1406         list_splice(&tmp_list, &atchan->free_list);
1407         dma_cookie_init(chan);
1408         spin_unlock_irqrestore(&atchan->lock, flags);
1409 
1410         /* channel parameters */
1411         channel_writel(atchan, CFG, cfg);
1412 
1413         dev_dbg(chan2dev(chan),
1414                 "alloc_chan_resources: allocated %d descriptors\n",
1415                 atchan->descs_allocated);
1416 
1417         return atchan->descs_allocated;
1418 }
1419 
1420 /**
1421  * atc_free_chan_resources - free all channel resources
1422  * @chan: DMA channel
1423  */
1424 static void atc_free_chan_resources(struct dma_chan *chan)
1425 {
1426         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1427         struct at_dma           *atdma = to_at_dma(chan->device);
1428         struct at_desc          *desc, *_desc;
1429         LIST_HEAD(list);
1430 
1431         dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1432                 atchan->descs_allocated);
1433 
1434         /* ASSERT:  channel is idle */
1435         BUG_ON(!list_empty(&atchan->active_list));
1436         BUG_ON(!list_empty(&atchan->queue));
1437         BUG_ON(atc_chan_is_enabled(atchan));
1438 
1439         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1440                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1441                 list_del(&desc->desc_node);
1442                 /* free link descriptor */
1443                 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1444         }
1445         list_splice_init(&atchan->free_list, &list);
1446         atchan->descs_allocated = 0;
1447         atchan->status = 0;
1448 
1449         dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1450 }
1451 
1452 #ifdef CONFIG_OF
1453 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1454 {
1455         struct at_dma_slave *atslave = slave;
1456 
1457         if (atslave->dma_dev == chan->device->dev) {
1458                 chan->private = atslave;
1459                 return true;
1460         } else {
1461                 return false;
1462         }
1463 }
1464 
1465 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1466                                      struct of_dma *of_dma)
1467 {
1468         struct dma_chan *chan;
1469         struct at_dma_chan *atchan;
1470         struct at_dma_slave *atslave;
1471         dma_cap_mask_t mask;
1472         unsigned int per_id;
1473         struct platform_device *dmac_pdev;
1474 
1475         if (dma_spec->args_count != 2)
1476                 return NULL;
1477 
1478         dmac_pdev = of_find_device_by_node(dma_spec->np);
1479 
1480         dma_cap_zero(mask);
1481         dma_cap_set(DMA_SLAVE, mask);
1482 
1483         atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1484         if (!atslave)
1485                 return NULL;
1486 
1487         atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1488         /*
1489          * We can fill both SRC_PER and DST_PER, one of these fields will be
1490          * ignored depending on DMA transfer direction.
1491          */
1492         per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1493         atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1494                      | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1495         /*
1496          * We have to translate the value we get from the device tree since
1497          * the half FIFO configuration value had to be 0 to keep backward
1498          * compatibility.
1499          */
1500         switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1501         case AT91_DMA_CFG_FIFOCFG_ALAP:
1502                 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1503                 break;
1504         case AT91_DMA_CFG_FIFOCFG_ASAP:
1505                 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1506                 break;
1507         case AT91_DMA_CFG_FIFOCFG_HALF:
1508         default:
1509                 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1510         }
1511         atslave->dma_dev = &dmac_pdev->dev;
1512 
1513         chan = dma_request_channel(mask, at_dma_filter, atslave);
1514         if (!chan)
1515                 return NULL;
1516 
1517         atchan = to_at_dma_chan(chan);
1518         atchan->per_if = dma_spec->args[0] & 0xff;
1519         atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1520 
1521         return chan;
1522 }
1523 #else
1524 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1525                                      struct of_dma *of_dma)
1526 {
1527         return NULL;
1528 }
1529 #endif
1530 
1531 /*--  Module Management  -----------------------------------------------*/
1532 
1533 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1534 static struct at_dma_platform_data at91sam9rl_config = {
1535         .nr_channels = 2,
1536 };
1537 static struct at_dma_platform_data at91sam9g45_config = {
1538         .nr_channels = 8,
1539 };
1540 
1541 #if defined(CONFIG_OF)
1542 static const struct of_device_id atmel_dma_dt_ids[] = {
1543         {
1544                 .compatible = "atmel,at91sam9rl-dma",
1545                 .data = &at91sam9rl_config,
1546         }, {
1547                 .compatible = "atmel,at91sam9g45-dma",
1548                 .data = &at91sam9g45_config,
1549         }, {
1550                 /* sentinel */
1551         }
1552 };
1553 
1554 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1555 #endif
1556 
1557 static const struct platform_device_id atdma_devtypes[] = {
1558         {
1559                 .name = "at91sam9rl_dma",
1560                 .driver_data = (unsigned long) &at91sam9rl_config,
1561         }, {
1562                 .name = "at91sam9g45_dma",
1563                 .driver_data = (unsigned long) &at91sam9g45_config,
1564         }, {
1565                 /* sentinel */
1566         }
1567 };
1568 
1569 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1570                                                 struct platform_device *pdev)
1571 {
1572         if (pdev->dev.of_node) {
1573                 const struct of_device_id *match;
1574                 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1575                 if (match == NULL)
1576                         return NULL;
1577                 return match->data;
1578         }
1579         return (struct at_dma_platform_data *)
1580                         platform_get_device_id(pdev)->driver_data;
1581 }
1582 
1583 /**
1584  * at_dma_off - disable DMA controller
1585  * @atdma: the Atmel HDAMC device
1586  */
1587 static void at_dma_off(struct at_dma *atdma)
1588 {
1589         dma_writel(atdma, EN, 0);
1590 
1591         /* disable all interrupts */
1592         dma_writel(atdma, EBCIDR, -1L);
1593 
1594         /* confirm that all channels are disabled */
1595         while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1596                 cpu_relax();
1597 }
1598 
1599 static int __init at_dma_probe(struct platform_device *pdev)
1600 {
1601         struct resource         *io;
1602         struct at_dma           *atdma;
1603         size_t                  size;
1604         int                     irq;
1605         int                     err;
1606         int                     i;
1607         const struct at_dma_platform_data *plat_dat;
1608 
1609         /* setup platform data for each SoC */
1610         dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1611         dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
1612         dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1613         dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1614         dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
1615 
1616         /* get DMA parameters from controller type */
1617         plat_dat = at_dma_get_driver_data(pdev);
1618         if (!plat_dat)
1619                 return -ENODEV;
1620 
1621         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1622         if (!io)
1623                 return -EINVAL;
1624 
1625         irq = platform_get_irq(pdev, 0);
1626         if (irq < 0)
1627                 return irq;
1628 
1629         size = sizeof(struct at_dma);
1630         size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1631         atdma = kzalloc(size, GFP_KERNEL);
1632         if (!atdma)
1633                 return -ENOMEM;
1634 
1635         /* discover transaction capabilities */
1636         atdma->dma_common.cap_mask = plat_dat->cap_mask;
1637         atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1638 
1639         size = resource_size(io);
1640         if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1641                 err = -EBUSY;
1642                 goto err_kfree;
1643         }
1644 
1645         atdma->regs = ioremap(io->start, size);
1646         if (!atdma->regs) {
1647                 err = -ENOMEM;
1648                 goto err_release_r;
1649         }
1650 
1651         atdma->clk = clk_get(&pdev->dev, "dma_clk");
1652         if (IS_ERR(atdma->clk)) {
1653                 err = PTR_ERR(atdma->clk);
1654                 goto err_clk;
1655         }
1656         err = clk_prepare_enable(atdma->clk);
1657         if (err)
1658                 goto err_clk_prepare;
1659 
1660         /* force dma off, just in case */
1661         at_dma_off(atdma);
1662 
1663         err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1664         if (err)
1665                 goto err_irq;
1666 
1667         platform_set_drvdata(pdev, atdma);
1668 
1669         /* create a pool of consistent memory blocks for hardware descriptors */
1670         atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1671                         &pdev->dev, sizeof(struct at_desc),
1672                         4 /* word alignment */, 0);
1673         if (!atdma->dma_desc_pool) {
1674                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1675                 err = -ENOMEM;
1676                 goto err_pool_create;
1677         }
1678 
1679         /* clear any pending interrupt */
1680         while (dma_readl(atdma, EBCISR))
1681                 cpu_relax();
1682 
1683         /* initialize channels related values */
1684         INIT_LIST_HEAD(&atdma->dma_common.channels);
1685         for (i = 0; i < plat_dat->nr_channels; i++) {
1686                 struct at_dma_chan      *atchan = &atdma->chan[i];
1687 
1688                 atchan->mem_if = AT_DMA_MEM_IF;
1689                 atchan->per_if = AT_DMA_PER_IF;
1690                 atchan->chan_common.device = &atdma->dma_common;
1691                 dma_cookie_init(&atchan->chan_common);
1692                 list_add_tail(&atchan->chan_common.device_node,
1693                                 &atdma->dma_common.channels);
1694 
1695                 atchan->ch_regs = atdma->regs + ch_regs(i);
1696                 spin_lock_init(&atchan->lock);
1697                 atchan->mask = 1 << i;
1698 
1699                 INIT_LIST_HEAD(&atchan->active_list);
1700                 INIT_LIST_HEAD(&atchan->queue);
1701                 INIT_LIST_HEAD(&atchan->free_list);
1702 
1703                 tasklet_init(&atchan->tasklet, atc_tasklet,
1704                                 (unsigned long)atchan);
1705                 atc_enable_chan_irq(atdma, i);
1706         }
1707 
1708         /* set base routines */
1709         atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1710         atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1711         atdma->dma_common.device_tx_status = atc_tx_status;
1712         atdma->dma_common.device_issue_pending = atc_issue_pending;
1713         atdma->dma_common.dev = &pdev->dev;
1714 
1715         /* set prep routines based on capability */
1716         if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1717                 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1718 
1719         if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1720                 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1721                 /* controller can do slave DMA: can trigger cyclic transfers */
1722                 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1723                 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1724                 atdma->dma_common.device_config = atc_config;
1725                 atdma->dma_common.device_pause = atc_pause;
1726                 atdma->dma_common.device_resume = atc_resume;
1727                 atdma->dma_common.device_terminate_all = atc_terminate_all;
1728                 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1729                 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1730                 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1731                 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1732         }
1733 
1734         if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
1735                 atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
1736 
1737         dma_writel(atdma, EN, AT_DMA_ENABLE);
1738 
1739         dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1740           dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1741           dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1742           dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
1743           plat_dat->nr_channels);
1744 
1745         dma_async_device_register(&atdma->dma_common);
1746 
1747         /*
1748          * Do not return an error if the dmac node is not present in order to
1749          * not break the existing way of requesting channel with
1750          * dma_request_channel().
1751          */
1752         if (pdev->dev.of_node) {
1753                 err = of_dma_controller_register(pdev->dev.of_node,
1754                                                  at_dma_xlate, atdma);
1755                 if (err) {
1756                         dev_err(&pdev->dev, "could not register of_dma_controller\n");
1757                         goto err_of_dma_controller_register;
1758                 }
1759         }
1760 
1761         return 0;
1762 
1763 err_of_dma_controller_register:
1764         dma_async_device_unregister(&atdma->dma_common);
1765         dma_pool_destroy(atdma->dma_desc_pool);
1766 err_pool_create:
1767         free_irq(platform_get_irq(pdev, 0), atdma);
1768 err_irq:
1769         clk_disable_unprepare(atdma->clk);
1770 err_clk_prepare:
1771         clk_put(atdma->clk);
1772 err_clk:
1773         iounmap(atdma->regs);
1774         atdma->regs = NULL;
1775 err_release_r:
1776         release_mem_region(io->start, size);
1777 err_kfree:
1778         kfree(atdma);
1779         return err;
1780 }
1781 
1782 static int at_dma_remove(struct platform_device *pdev)
1783 {
1784         struct at_dma           *atdma = platform_get_drvdata(pdev);
1785         struct dma_chan         *chan, *_chan;
1786         struct resource         *io;
1787 
1788         at_dma_off(atdma);
1789         dma_async_device_unregister(&atdma->dma_common);
1790 
1791         dma_pool_destroy(atdma->dma_desc_pool);
1792         free_irq(platform_get_irq(pdev, 0), atdma);
1793 
1794         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1795                         device_node) {
1796                 struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1797 
1798                 /* Disable interrupts */
1799                 atc_disable_chan_irq(atdma, chan->chan_id);
1800 
1801                 tasklet_kill(&atchan->tasklet);
1802                 list_del(&chan->device_node);
1803         }
1804 
1805         clk_disable_unprepare(atdma->clk);
1806         clk_put(atdma->clk);
1807 
1808         iounmap(atdma->regs);
1809         atdma->regs = NULL;
1810 
1811         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1812         release_mem_region(io->start, resource_size(io));
1813 
1814         kfree(atdma);
1815 
1816         return 0;
1817 }
1818 
1819 static void at_dma_shutdown(struct platform_device *pdev)
1820 {
1821         struct at_dma   *atdma = platform_get_drvdata(pdev);
1822 
1823         at_dma_off(platform_get_drvdata(pdev));
1824         clk_disable_unprepare(atdma->clk);
1825 }
1826 
1827 static int at_dma_prepare(struct device *dev)
1828 {
1829         struct platform_device *pdev = to_platform_device(dev);
1830         struct at_dma *atdma = platform_get_drvdata(pdev);
1831         struct dma_chan *chan, *_chan;
1832 
1833         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1834                         device_node) {
1835                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1836                 /* wait for transaction completion (except in cyclic case) */
1837                 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1838                         return -EAGAIN;
1839         }
1840         return 0;
1841 }
1842 
1843 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1844 {
1845         struct dma_chan *chan = &atchan->chan_common;
1846 
1847         /* Channel should be paused by user
1848          * do it anyway even if it is not done already */
1849         if (!atc_chan_is_paused(atchan)) {
1850                 dev_warn(chan2dev(chan),
1851                 "cyclic channel not paused, should be done by channel user\n");
1852                 atc_pause(chan);
1853         }
1854 
1855         /* now preserve additional data for cyclic operations */
1856         /* next descriptor address in the cyclic list */
1857         atchan->save_dscr = channel_readl(atchan, DSCR);
1858 
1859         vdbg_dump_regs(atchan);
1860 }
1861 
1862 static int at_dma_suspend_noirq(struct device *dev)
1863 {
1864         struct platform_device *pdev = to_platform_device(dev);
1865         struct at_dma *atdma = platform_get_drvdata(pdev);
1866         struct dma_chan *chan, *_chan;
1867 
1868         /* preserve data */
1869         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1870                         device_node) {
1871                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1872 
1873                 if (atc_chan_is_cyclic(atchan))
1874                         atc_suspend_cyclic(atchan);
1875                 atchan->save_cfg = channel_readl(atchan, CFG);
1876         }
1877         atdma->save_imr = dma_readl(atdma, EBCIMR);
1878 
1879         /* disable DMA controller */
1880         at_dma_off(atdma);
1881         clk_disable_unprepare(atdma->clk);
1882         return 0;
1883 }
1884 
1885 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1886 {
1887         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
1888 
1889         /* restore channel status for cyclic descriptors list:
1890          * next descriptor in the cyclic list at the time of suspend */
1891         channel_writel(atchan, SADDR, 0);
1892         channel_writel(atchan, DADDR, 0);
1893         channel_writel(atchan, CTRLA, 0);
1894         channel_writel(atchan, CTRLB, 0);
1895         channel_writel(atchan, DSCR, atchan->save_dscr);
1896         dma_writel(atdma, CHER, atchan->mask);
1897 
1898         /* channel pause status should be removed by channel user
1899          * We cannot take the initiative to do it here */
1900 
1901         vdbg_dump_regs(atchan);
1902 }
1903 
1904 static int at_dma_resume_noirq(struct device *dev)
1905 {
1906         struct platform_device *pdev = to_platform_device(dev);
1907         struct at_dma *atdma = platform_get_drvdata(pdev);
1908         struct dma_chan *chan, *_chan;
1909 
1910         /* bring back DMA controller */
1911         clk_prepare_enable(atdma->clk);
1912         dma_writel(atdma, EN, AT_DMA_ENABLE);
1913 
1914         /* clear any pending interrupt */
1915         while (dma_readl(atdma, EBCISR))
1916                 cpu_relax();
1917 
1918         /* restore saved data */
1919         dma_writel(atdma, EBCIER, atdma->save_imr);
1920         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1921                         device_node) {
1922                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1923 
1924                 channel_writel(atchan, CFG, atchan->save_cfg);
1925                 if (atc_chan_is_cyclic(atchan))
1926                         atc_resume_cyclic(atchan);
1927         }
1928         return 0;
1929 }
1930 
1931 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1932         .prepare = at_dma_prepare,
1933         .suspend_noirq = at_dma_suspend_noirq,
1934         .resume_noirq = at_dma_resume_noirq,
1935 };
1936 
1937 static struct platform_driver at_dma_driver = {
1938         .remove         = at_dma_remove,
1939         .shutdown       = at_dma_shutdown,
1940         .id_table       = atdma_devtypes,
1941         .driver = {
1942                 .name   = "at_hdmac",
1943                 .pm     = &at_dma_dev_pm_ops,
1944                 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1945         },
1946 };
1947 
1948 static int __init at_dma_init(void)
1949 {
1950         return platform_driver_probe(&at_dma_driver, at_dma_probe);
1951 }
1952 subsys_initcall(at_dma_init);
1953 
1954 static void __exit at_dma_exit(void)
1955 {
1956         platform_driver_unregister(&at_dma_driver);
1957 }
1958 module_exit(at_dma_exit);
1959 
1960 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1961 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1962 MODULE_LICENSE("GPL");
1963 MODULE_ALIAS("platform:at_hdmac");
1964 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us