Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/drivers/dma/at_hdmac.c

  1 /*
  2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3  *
  4  * Copyright (C) 2008 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License, or
  9  * (at your option) any later version.
 10  *
 11  *
 12  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
 13  * The only Atmel DMA Controller that is not covered by this driver is the one
 14  * found on AT91SAM9263.
 15  */
 16 
 17 #include <dt-bindings/dma/at91.h>
 18 #include <linux/clk.h>
 19 #include <linux/dmaengine.h>
 20 #include <linux/dma-mapping.h>
 21 #include <linux/dmapool.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/module.h>
 24 #include <linux/platform_device.h>
 25 #include <linux/slab.h>
 26 #include <linux/of.h>
 27 #include <linux/of_device.h>
 28 #include <linux/of_dma.h>
 29 
 30 #include "at_hdmac_regs.h"
 31 #include "dmaengine.h"
 32 
 33 /*
 34  * Glossary
 35  * --------
 36  *
 37  * at_hdmac             : Name of the ATmel AHB DMA Controller
 38  * at_dma_ / atdma      : ATmel DMA controller entity related
 39  * atc_ / atchan        : ATmel DMA Channel entity related
 40  */
 41 
 42 #define ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
 43 #define ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
 44                                 |ATC_DIF(AT_DMA_MEM_IF))
 45 #define ATC_DMA_BUSWIDTHS\
 46         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 47         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 48         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 49         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 50 
 51 #define ATC_MAX_DSCR_TRIALS     10
 52 
 53 /*
 54  * Initial number of descriptors to allocate for each channel. This could
 55  * be increased during dma usage.
 56  */
 57 static unsigned int init_nr_desc_per_channel = 64;
 58 module_param(init_nr_desc_per_channel, uint, 0644);
 59 MODULE_PARM_DESC(init_nr_desc_per_channel,
 60                  "initial descriptors per channel (default: 64)");
 61 
 62 
 63 /* prototypes */
 64 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
 65 static void atc_issue_pending(struct dma_chan *chan);
 66 
 67 
 68 /*----------------------------------------------------------------------*/
 69 
 70 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
 71                                                 size_t len)
 72 {
 73         unsigned int width;
 74 
 75         if (!((src | dst  | len) & 3))
 76                 width = 2;
 77         else if (!((src | dst | len) & 1))
 78                 width = 1;
 79         else
 80                 width = 0;
 81 
 82         return width;
 83 }
 84 
 85 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
 86 {
 87         return list_first_entry(&atchan->active_list,
 88                                 struct at_desc, desc_node);
 89 }
 90 
 91 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
 92 {
 93         return list_first_entry(&atchan->queue,
 94                                 struct at_desc, desc_node);
 95 }
 96 
 97 /**
 98  * atc_alloc_descriptor - allocate and return an initialized descriptor
 99  * @chan: the channel to allocate descriptors for
100  * @gfp_flags: GFP allocation flags
101  *
102  * Note: The ack-bit is positioned in the descriptor flag at creation time
103  *       to make initial allocation more convenient. This bit will be cleared
104  *       and control will be given to client at usage time (during
105  *       preparation functions).
106  */
107 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
108                                             gfp_t gfp_flags)
109 {
110         struct at_desc  *desc = NULL;
111         struct at_dma   *atdma = to_at_dma(chan->device);
112         dma_addr_t phys;
113 
114         desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
115         if (desc) {
116                 INIT_LIST_HEAD(&desc->tx_list);
117                 dma_async_tx_descriptor_init(&desc->txd, chan);
118                 /* txd.flags will be overwritten in prep functions */
119                 desc->txd.flags = DMA_CTRL_ACK;
120                 desc->txd.tx_submit = atc_tx_submit;
121                 desc->txd.phys = phys;
122         }
123 
124         return desc;
125 }
126 
127 /**
128  * atc_desc_get - get an unused descriptor from free_list
129  * @atchan: channel we want a new descriptor for
130  */
131 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
132 {
133         struct at_desc *desc, *_desc;
134         struct at_desc *ret = NULL;
135         unsigned long flags;
136         unsigned int i = 0;
137         LIST_HEAD(tmp_list);
138 
139         spin_lock_irqsave(&atchan->lock, flags);
140         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
141                 i++;
142                 if (async_tx_test_ack(&desc->txd)) {
143                         list_del(&desc->desc_node);
144                         ret = desc;
145                         break;
146                 }
147                 dev_dbg(chan2dev(&atchan->chan_common),
148                                 "desc %p not ACKed\n", desc);
149         }
150         spin_unlock_irqrestore(&atchan->lock, flags);
151         dev_vdbg(chan2dev(&atchan->chan_common),
152                 "scanned %u descriptors on freelist\n", i);
153 
154         /* no more descriptor available in initial pool: create one more */
155         if (!ret) {
156                 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
157                 if (ret) {
158                         spin_lock_irqsave(&atchan->lock, flags);
159                         atchan->descs_allocated++;
160                         spin_unlock_irqrestore(&atchan->lock, flags);
161                 } else {
162                         dev_err(chan2dev(&atchan->chan_common),
163                                         "not enough descriptors available\n");
164                 }
165         }
166 
167         return ret;
168 }
169 
170 /**
171  * atc_desc_put - move a descriptor, including any children, to the free list
172  * @atchan: channel we work on
173  * @desc: descriptor, at the head of a chain, to move to free list
174  */
175 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
176 {
177         if (desc) {
178                 struct at_desc *child;
179                 unsigned long flags;
180 
181                 spin_lock_irqsave(&atchan->lock, flags);
182                 list_for_each_entry(child, &desc->tx_list, desc_node)
183                         dev_vdbg(chan2dev(&atchan->chan_common),
184                                         "moving child desc %p to freelist\n",
185                                         child);
186                 list_splice_init(&desc->tx_list, &atchan->free_list);
187                 dev_vdbg(chan2dev(&atchan->chan_common),
188                          "moving desc %p to freelist\n", desc);
189                 list_add(&desc->desc_node, &atchan->free_list);
190                 spin_unlock_irqrestore(&atchan->lock, flags);
191         }
192 }
193 
194 /**
195  * atc_desc_chain - build chain adding a descriptor
196  * @first: address of first descriptor of the chain
197  * @prev: address of previous descriptor of the chain
198  * @desc: descriptor to queue
199  *
200  * Called from prep_* functions
201  */
202 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
203                            struct at_desc *desc)
204 {
205         if (!(*first)) {
206                 *first = desc;
207         } else {
208                 /* inform the HW lli about chaining */
209                 (*prev)->lli.dscr = desc->txd.phys;
210                 /* insert the link descriptor to the LD ring */
211                 list_add_tail(&desc->desc_node,
212                                 &(*first)->tx_list);
213         }
214         *prev = desc;
215 }
216 
217 /**
218  * atc_dostart - starts the DMA engine for real
219  * @atchan: the channel we want to start
220  * @first: first descriptor in the list we want to begin with
221  *
222  * Called with atchan->lock held and bh disabled
223  */
224 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
225 {
226         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
227 
228         /* ASSERT:  channel is idle */
229         if (atc_chan_is_enabled(atchan)) {
230                 dev_err(chan2dev(&atchan->chan_common),
231                         "BUG: Attempted to start non-idle channel\n");
232                 dev_err(chan2dev(&atchan->chan_common),
233                         "  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
234                         channel_readl(atchan, SADDR),
235                         channel_readl(atchan, DADDR),
236                         channel_readl(atchan, CTRLA),
237                         channel_readl(atchan, CTRLB),
238                         channel_readl(atchan, DSCR));
239 
240                 /* The tasklet will hopefully advance the queue... */
241                 return;
242         }
243 
244         vdbg_dump_regs(atchan);
245 
246         channel_writel(atchan, SADDR, 0);
247         channel_writel(atchan, DADDR, 0);
248         channel_writel(atchan, CTRLA, 0);
249         channel_writel(atchan, CTRLB, 0);
250         channel_writel(atchan, DSCR, first->txd.phys);
251         channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
252                        ATC_SPIP_BOUNDARY(first->boundary));
253         channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
254                        ATC_DPIP_BOUNDARY(first->boundary));
255         dma_writel(atdma, CHER, atchan->mask);
256 
257         vdbg_dump_regs(atchan);
258 }
259 
260 /*
261  * atc_get_desc_by_cookie - get the descriptor of a cookie
262  * @atchan: the DMA channel
263  * @cookie: the cookie to get the descriptor for
264  */
265 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
266                                                 dma_cookie_t cookie)
267 {
268         struct at_desc *desc, *_desc;
269 
270         list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
271                 if (desc->txd.cookie == cookie)
272                         return desc;
273         }
274 
275         list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
276                 if (desc->txd.cookie == cookie)
277                         return desc;
278         }
279 
280         return NULL;
281 }
282 
283 /**
284  * atc_calc_bytes_left - calculates the number of bytes left according to the
285  * value read from CTRLA.
286  *
287  * @current_len: the number of bytes left before reading CTRLA
288  * @ctrla: the value of CTRLA
289  */
290 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 {
292         u32 btsize = (ctrla & ATC_BTSIZE_MAX);
293         u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
294 
295         /*
296          * According to the datasheet, when reading the Control A Register
297          * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
298          * number of transfers completed on the Source Interface.
299          * So btsize is always a number of source width transfers.
300          */
301         return current_len - (btsize << src_width);
302 }
303 
304 /**
305  * atc_get_bytes_left - get the number of bytes residue for a cookie
306  * @chan: DMA channel
307  * @cookie: transaction identifier to check status of
308  */
309 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
310 {
311         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
312         struct at_desc *desc_first = atc_first_active(atchan);
313         struct at_desc *desc;
314         int ret;
315         u32 ctrla, dscr, trials;
316 
317         /*
318          * If the cookie doesn't match to the currently running transfer then
319          * we can return the total length of the associated DMA transfer,
320          * because it is still queued.
321          */
322         desc = atc_get_desc_by_cookie(atchan, cookie);
323         if (desc == NULL)
324                 return -EINVAL;
325         else if (desc != desc_first)
326                 return desc->total_len;
327 
328         /* cookie matches to the currently running transfer */
329         ret = desc_first->total_len;
330 
331         if (desc_first->lli.dscr) {
332                 /* hardware linked list transfer */
333 
334                 /*
335                  * Calculate the residue by removing the length of the child
336                  * descriptors already transferred from the total length.
337                  * To get the current child descriptor we can use the value of
338                  * the channel's DSCR register and compare it against the value
339                  * of the hardware linked list structure of each child
340                  * descriptor.
341                  *
342                  * The CTRLA register provides us with the amount of data
343                  * already read from the source for the current child
344                  * descriptor. So we can compute a more accurate residue by also
345                  * removing the number of bytes corresponding to this amount of
346                  * data.
347                  *
348                  * However, the DSCR and CTRLA registers cannot be read both
349                  * atomically. Hence a race condition may occur: the first read
350                  * register may refer to one child descriptor whereas the second
351                  * read may refer to a later child descriptor in the list
352                  * because of the DMA transfer progression inbetween the two
353                  * reads.
354                  *
355                  * One solution could have been to pause the DMA transfer, read
356                  * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
357                  * this approach presents some drawbacks:
358                  * - If the DMA transfer is paused, RX overruns or TX underruns
359                  *   are more likey to occur depending on the system latency.
360                  *   Taking the USART driver as an example, it uses a cyclic DMA
361                  *   transfer to read data from the Receive Holding Register
362                  *   (RHR) to avoid RX overruns since the RHR is not protected
363                  *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
364                  *   to compute the residue would break the USART driver design.
365                  * - The atc_pause() function masks interrupts but we'd rather
366                  *   avoid to do so for system latency purpose.
367                  *
368                  * Then we'd rather use another solution: the DSCR is read a
369                  * first time, the CTRLA is read in turn, next the DSCR is read
370                  * a second time. If the two consecutive read values of the DSCR
371                  * are the same then we assume both refers to the very same
372                  * child descriptor as well as the CTRLA value read inbetween
373                  * does. For cyclic tranfers, the assumption is that a full loop
374                  * is "not so fast".
375                  * If the two DSCR values are different, we read again the CTRLA
376                  * then the DSCR till two consecutive read values from DSCR are
377                  * equal or till the maxium trials is reach.
378                  * This algorithm is very unlikely not to find a stable value for
379                  * DSCR.
380                  */
381 
382                 dscr = channel_readl(atchan, DSCR);
383                 rmb(); /* ensure DSCR is read before CTRLA */
384                 ctrla = channel_readl(atchan, CTRLA);
385                 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
386                         u32 new_dscr;
387 
388                         rmb(); /* ensure DSCR is read after CTRLA */
389                         new_dscr = channel_readl(atchan, DSCR);
390 
391                         /*
392                          * If the DSCR register value has not changed inside the
393                          * DMA controller since the previous read, we assume
394                          * that both the dscr and ctrla values refers to the
395                          * very same descriptor.
396                          */
397                         if (likely(new_dscr == dscr))
398                                 break;
399 
400                         /*
401                          * DSCR has changed inside the DMA controller, so the
402                          * previouly read value of CTRLA may refer to an already
403                          * processed descriptor hence could be outdated.
404                          * We need to update ctrla to match the current
405                          * descriptor.
406                          */
407                         dscr = new_dscr;
408                         rmb(); /* ensure DSCR is read before CTRLA */
409                         ctrla = channel_readl(atchan, CTRLA);
410                 }
411                 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
412                         return -ETIMEDOUT;
413 
414                 /* for the first descriptor we can be more accurate */
415                 if (desc_first->lli.dscr == dscr)
416                         return atc_calc_bytes_left(ret, ctrla);
417 
418                 ret -= desc_first->len;
419                 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
420                         if (desc->lli.dscr == dscr)
421                                 break;
422 
423                         ret -= desc->len;
424                 }
425 
426                 /*
427                  * For the current descriptor in the chain we can calculate
428                  * the remaining bytes using the channel's register.
429                  */
430                 ret = atc_calc_bytes_left(ret, ctrla);
431         } else {
432                 /* single transfer */
433                 ctrla = channel_readl(atchan, CTRLA);
434                 ret = atc_calc_bytes_left(ret, ctrla);
435         }
436 
437         return ret;
438 }
439 
440 /**
441  * atc_chain_complete - finish work for one transaction chain
442  * @atchan: channel we work on
443  * @desc: descriptor at the head of the chain we want do complete
444  *
445  * Called with atchan->lock held and bh disabled */
446 static void
447 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
448 {
449         struct dma_async_tx_descriptor  *txd = &desc->txd;
450         struct at_dma                   *atdma = to_at_dma(atchan->chan_common.device);
451 
452         dev_vdbg(chan2dev(&atchan->chan_common),
453                 "descriptor %u complete\n", txd->cookie);
454 
455         /* mark the descriptor as complete for non cyclic cases only */
456         if (!atc_chan_is_cyclic(atchan))
457                 dma_cookie_complete(txd);
458 
459         /* If the transfer was a memset, free our temporary buffer */
460         if (desc->memset_buffer) {
461                 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
462                               desc->memset_paddr);
463                 desc->memset_buffer = false;
464         }
465 
466         /* move children to free_list */
467         list_splice_init(&desc->tx_list, &atchan->free_list);
468         /* move myself to free_list */
469         list_move(&desc->desc_node, &atchan->free_list);
470 
471         dma_descriptor_unmap(txd);
472         /* for cyclic transfers,
473          * no need to replay callback function while stopping */
474         if (!atc_chan_is_cyclic(atchan)) {
475                 /*
476                  * The API requires that no submissions are done from a
477                  * callback, so we don't need to drop the lock here
478                  */
479                 dmaengine_desc_get_callback_invoke(txd, NULL);
480         }
481 
482         dma_run_dependencies(txd);
483 }
484 
485 /**
486  * atc_complete_all - finish work for all transactions
487  * @atchan: channel to complete transactions for
488  *
489  * Eventually submit queued descriptors if any
490  *
491  * Assume channel is idle while calling this function
492  * Called with atchan->lock held and bh disabled
493  */
494 static void atc_complete_all(struct at_dma_chan *atchan)
495 {
496         struct at_desc *desc, *_desc;
497         LIST_HEAD(list);
498 
499         dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
500 
501         /*
502          * Submit queued descriptors ASAP, i.e. before we go through
503          * the completed ones.
504          */
505         if (!list_empty(&atchan->queue))
506                 atc_dostart(atchan, atc_first_queued(atchan));
507         /* empty active_list now it is completed */
508         list_splice_init(&atchan->active_list, &list);
509         /* empty queue list by moving descriptors (if any) to active_list */
510         list_splice_init(&atchan->queue, &atchan->active_list);
511 
512         list_for_each_entry_safe(desc, _desc, &list, desc_node)
513                 atc_chain_complete(atchan, desc);
514 }
515 
516 /**
517  * atc_advance_work - at the end of a transaction, move forward
518  * @atchan: channel where the transaction ended
519  *
520  * Called with atchan->lock held and bh disabled
521  */
522 static void atc_advance_work(struct at_dma_chan *atchan)
523 {
524         dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
525 
526         if (atc_chan_is_enabled(atchan))
527                 return;
528 
529         if (list_empty(&atchan->active_list) ||
530             list_is_singular(&atchan->active_list)) {
531                 atc_complete_all(atchan);
532         } else {
533                 atc_chain_complete(atchan, atc_first_active(atchan));
534                 /* advance work */
535                 atc_dostart(atchan, atc_first_active(atchan));
536         }
537 }
538 
539 
540 /**
541  * atc_handle_error - handle errors reported by DMA controller
542  * @atchan: channel where error occurs
543  *
544  * Called with atchan->lock held and bh disabled
545  */
546 static void atc_handle_error(struct at_dma_chan *atchan)
547 {
548         struct at_desc *bad_desc;
549         struct at_desc *child;
550 
551         /*
552          * The descriptor currently at the head of the active list is
553          * broked. Since we don't have any way to report errors, we'll
554          * just have to scream loudly and try to carry on.
555          */
556         bad_desc = atc_first_active(atchan);
557         list_del_init(&bad_desc->desc_node);
558 
559         /* As we are stopped, take advantage to push queued descriptors
560          * in active_list */
561         list_splice_init(&atchan->queue, atchan->active_list.prev);
562 
563         /* Try to restart the controller */
564         if (!list_empty(&atchan->active_list))
565                 atc_dostart(atchan, atc_first_active(atchan));
566 
567         /*
568          * KERN_CRITICAL may seem harsh, but since this only happens
569          * when someone submits a bad physical address in a
570          * descriptor, we should consider ourselves lucky that the
571          * controller flagged an error instead of scribbling over
572          * random memory locations.
573          */
574         dev_crit(chan2dev(&atchan->chan_common),
575                         "Bad descriptor submitted for DMA!\n");
576         dev_crit(chan2dev(&atchan->chan_common),
577                         "  cookie: %d\n", bad_desc->txd.cookie);
578         atc_dump_lli(atchan, &bad_desc->lli);
579         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
580                 atc_dump_lli(atchan, &child->lli);
581 
582         /* Pretend the descriptor completed successfully */
583         atc_chain_complete(atchan, bad_desc);
584 }
585 
586 /**
587  * atc_handle_cyclic - at the end of a period, run callback function
588  * @atchan: channel used for cyclic operations
589  *
590  * Called with atchan->lock held and bh disabled
591  */
592 static void atc_handle_cyclic(struct at_dma_chan *atchan)
593 {
594         struct at_desc                  *first = atc_first_active(atchan);
595         struct dma_async_tx_descriptor  *txd = &first->txd;
596 
597         dev_vdbg(chan2dev(&atchan->chan_common),
598                         "new cyclic period llp 0x%08x\n",
599                         channel_readl(atchan, DSCR));
600 
601         dmaengine_desc_get_callback_invoke(txd, NULL);
602 }
603 
604 /*--  IRQ & Tasklet  ---------------------------------------------------*/
605 
606 static void atc_tasklet(unsigned long data)
607 {
608         struct at_dma_chan *atchan = (struct at_dma_chan *)data;
609         unsigned long flags;
610 
611         spin_lock_irqsave(&atchan->lock, flags);
612         if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
613                 atc_handle_error(atchan);
614         else if (atc_chan_is_cyclic(atchan))
615                 atc_handle_cyclic(atchan);
616         else
617                 atc_advance_work(atchan);
618 
619         spin_unlock_irqrestore(&atchan->lock, flags);
620 }
621 
622 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
623 {
624         struct at_dma           *atdma = (struct at_dma *)dev_id;
625         struct at_dma_chan      *atchan;
626         int                     i;
627         u32                     status, pending, imr;
628         int                     ret = IRQ_NONE;
629 
630         do {
631                 imr = dma_readl(atdma, EBCIMR);
632                 status = dma_readl(atdma, EBCISR);
633                 pending = status & imr;
634 
635                 if (!pending)
636                         break;
637 
638                 dev_vdbg(atdma->dma_common.dev,
639                         "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
640                          status, imr, pending);
641 
642                 for (i = 0; i < atdma->dma_common.chancnt; i++) {
643                         atchan = &atdma->chan[i];
644                         if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
645                                 if (pending & AT_DMA_ERR(i)) {
646                                         /* Disable channel on AHB error */
647                                         dma_writel(atdma, CHDR,
648                                                 AT_DMA_RES(i) | atchan->mask);
649                                         /* Give information to tasklet */
650                                         set_bit(ATC_IS_ERROR, &atchan->status);
651                                 }
652                                 tasklet_schedule(&atchan->tasklet);
653                                 ret = IRQ_HANDLED;
654                         }
655                 }
656 
657         } while (pending);
658 
659         return ret;
660 }
661 
662 
663 /*--  DMA Engine API  --------------------------------------------------*/
664 
665 /**
666  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
667  * @desc: descriptor at the head of the transaction chain
668  *
669  * Queue chain if DMA engine is working already
670  *
671  * Cookie increment and adding to active_list or queue must be atomic
672  */
673 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
674 {
675         struct at_desc          *desc = txd_to_at_desc(tx);
676         struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
677         dma_cookie_t            cookie;
678         unsigned long           flags;
679 
680         spin_lock_irqsave(&atchan->lock, flags);
681         cookie = dma_cookie_assign(tx);
682 
683         if (list_empty(&atchan->active_list)) {
684                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
685                                 desc->txd.cookie);
686                 atc_dostart(atchan, desc);
687                 list_add_tail(&desc->desc_node, &atchan->active_list);
688         } else {
689                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
690                                 desc->txd.cookie);
691                 list_add_tail(&desc->desc_node, &atchan->queue);
692         }
693 
694         spin_unlock_irqrestore(&atchan->lock, flags);
695 
696         return cookie;
697 }
698 
699 /**
700  * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
701  * @chan: the channel to prepare operation on
702  * @xt: Interleaved transfer template
703  * @flags: tx descriptor status flags
704  */
705 static struct dma_async_tx_descriptor *
706 atc_prep_dma_interleaved(struct dma_chan *chan,
707                          struct dma_interleaved_template *xt,
708                          unsigned long flags)
709 {
710         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
711         struct data_chunk       *first = xt->sgl;
712         struct at_desc          *desc = NULL;
713         size_t                  xfer_count;
714         unsigned int            dwidth;
715         u32                     ctrla;
716         u32                     ctrlb;
717         size_t                  len = 0;
718         int                     i;
719 
720         if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
721                 return NULL;
722 
723         dev_info(chan2dev(chan),
724                  "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
725                 __func__, &xt->src_start, &xt->dst_start, xt->numf,
726                 xt->frame_size, flags);
727 
728         /*
729          * The controller can only "skip" X bytes every Y bytes, so we
730          * need to make sure we are given a template that fit that
731          * description, ie a template with chunks that always have the
732          * same size, with the same ICGs.
733          */
734         for (i = 0; i < xt->frame_size; i++) {
735                 struct data_chunk *chunk = xt->sgl + i;
736 
737                 if ((chunk->size != xt->sgl->size) ||
738                     (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
739                     (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
740                         dev_err(chan2dev(chan),
741                                 "%s: the controller can transfer only identical chunks\n",
742                                 __func__);
743                         return NULL;
744                 }
745 
746                 len += chunk->size;
747         }
748 
749         dwidth = atc_get_xfer_width(xt->src_start,
750                                     xt->dst_start, len);
751 
752         xfer_count = len >> dwidth;
753         if (xfer_count > ATC_BTSIZE_MAX) {
754                 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
755                 return NULL;
756         }
757 
758         ctrla = ATC_SRC_WIDTH(dwidth) |
759                 ATC_DST_WIDTH(dwidth);
760 
761         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
762                 | ATC_SRC_ADDR_MODE_INCR
763                 | ATC_DST_ADDR_MODE_INCR
764                 | ATC_SRC_PIP
765                 | ATC_DST_PIP
766                 | ATC_FC_MEM2MEM;
767 
768         /* create the transfer */
769         desc = atc_desc_get(atchan);
770         if (!desc) {
771                 dev_err(chan2dev(chan),
772                         "%s: couldn't allocate our descriptor\n", __func__);
773                 return NULL;
774         }
775 
776         desc->lli.saddr = xt->src_start;
777         desc->lli.daddr = xt->dst_start;
778         desc->lli.ctrla = ctrla | xfer_count;
779         desc->lli.ctrlb = ctrlb;
780 
781         desc->boundary = first->size >> dwidth;
782         desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
783         desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
784 
785         desc->txd.cookie = -EBUSY;
786         desc->total_len = desc->len = len;
787 
788         /* set end-of-link to the last link descriptor of list*/
789         set_desc_eol(desc);
790 
791         desc->txd.flags = flags; /* client is in control of this ack */
792 
793         return &desc->txd;
794 }
795 
796 /**
797  * atc_prep_dma_memcpy - prepare a memcpy operation
798  * @chan: the channel to prepare operation on
799  * @dest: operation virtual destination address
800  * @src: operation virtual source address
801  * @len: operation length
802  * @flags: tx descriptor status flags
803  */
804 static struct dma_async_tx_descriptor *
805 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
806                 size_t len, unsigned long flags)
807 {
808         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
809         struct at_desc          *desc = NULL;
810         struct at_desc          *first = NULL;
811         struct at_desc          *prev = NULL;
812         size_t                  xfer_count;
813         size_t                  offset;
814         unsigned int            src_width;
815         unsigned int            dst_width;
816         u32                     ctrla;
817         u32                     ctrlb;
818 
819         dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
820                         &dest, &src, len, flags);
821 
822         if (unlikely(!len)) {
823                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
824                 return NULL;
825         }
826 
827         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
828                 | ATC_SRC_ADDR_MODE_INCR
829                 | ATC_DST_ADDR_MODE_INCR
830                 | ATC_FC_MEM2MEM;
831 
832         /*
833          * We can be a lot more clever here, but this should take care
834          * of the most common optimization.
835          */
836         src_width = dst_width = atc_get_xfer_width(src, dest, len);
837 
838         ctrla = ATC_SRC_WIDTH(src_width) |
839                 ATC_DST_WIDTH(dst_width);
840 
841         for (offset = 0; offset < len; offset += xfer_count << src_width) {
842                 xfer_count = min_t(size_t, (len - offset) >> src_width,
843                                 ATC_BTSIZE_MAX);
844 
845                 desc = atc_desc_get(atchan);
846                 if (!desc)
847                         goto err_desc_get;
848 
849                 desc->lli.saddr = src + offset;
850                 desc->lli.daddr = dest + offset;
851                 desc->lli.ctrla = ctrla | xfer_count;
852                 desc->lli.ctrlb = ctrlb;
853 
854                 desc->txd.cookie = 0;
855                 desc->len = xfer_count << src_width;
856 
857                 atc_desc_chain(&first, &prev, desc);
858         }
859 
860         /* First descriptor of the chain embedds additional information */
861         first->txd.cookie = -EBUSY;
862         first->total_len = len;
863 
864         /* set end-of-link to the last link descriptor of list*/
865         set_desc_eol(desc);
866 
867         first->txd.flags = flags; /* client is in control of this ack */
868 
869         return &first->txd;
870 
871 err_desc_get:
872         atc_desc_put(atchan, first);
873         return NULL;
874 }
875 
876 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
877                                               dma_addr_t psrc,
878                                               dma_addr_t pdst,
879                                               size_t len)
880 {
881         struct at_dma_chan *atchan = to_at_dma_chan(chan);
882         struct at_desc *desc;
883         size_t xfer_count;
884 
885         u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
886         u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
887                 ATC_SRC_ADDR_MODE_FIXED |
888                 ATC_DST_ADDR_MODE_INCR |
889                 ATC_FC_MEM2MEM;
890 
891         xfer_count = len >> 2;
892         if (xfer_count > ATC_BTSIZE_MAX) {
893                 dev_err(chan2dev(chan), "%s: buffer is too big\n",
894                         __func__);
895                 return NULL;
896         }
897 
898         desc = atc_desc_get(atchan);
899         if (!desc) {
900                 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
901                         __func__);
902                 return NULL;
903         }
904 
905         desc->lli.saddr = psrc;
906         desc->lli.daddr = pdst;
907         desc->lli.ctrla = ctrla | xfer_count;
908         desc->lli.ctrlb = ctrlb;
909 
910         desc->txd.cookie = 0;
911         desc->len = len;
912 
913         return desc;
914 }
915 
916 /**
917  * atc_prep_dma_memset - prepare a memcpy operation
918  * @chan: the channel to prepare operation on
919  * @dest: operation virtual destination address
920  * @value: value to set memory buffer to
921  * @len: operation length
922  * @flags: tx descriptor status flags
923  */
924 static struct dma_async_tx_descriptor *
925 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
926                     size_t len, unsigned long flags)
927 {
928         struct at_dma           *atdma = to_at_dma(chan->device);
929         struct at_desc          *desc;
930         void __iomem            *vaddr;
931         dma_addr_t              paddr;
932 
933         dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
934                 &dest, value, len, flags);
935 
936         if (unlikely(!len)) {
937                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
938                 return NULL;
939         }
940 
941         if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
942                 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
943                         __func__);
944                 return NULL;
945         }
946 
947         vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
948         if (!vaddr) {
949                 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
950                         __func__);
951                 return NULL;
952         }
953         *(u32*)vaddr = value;
954 
955         desc = atc_create_memset_desc(chan, paddr, dest, len);
956         if (!desc) {
957                 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
958                         __func__);
959                 goto err_free_buffer;
960         }
961 
962         desc->memset_paddr = paddr;
963         desc->memset_vaddr = vaddr;
964         desc->memset_buffer = true;
965 
966         desc->txd.cookie = -EBUSY;
967         desc->total_len = len;
968 
969         /* set end-of-link on the descriptor */
970         set_desc_eol(desc);
971 
972         desc->txd.flags = flags;
973 
974         return &desc->txd;
975 
976 err_free_buffer:
977         dma_pool_free(atdma->memset_pool, vaddr, paddr);
978         return NULL;
979 }
980 
981 static struct dma_async_tx_descriptor *
982 atc_prep_dma_memset_sg(struct dma_chan *chan,
983                        struct scatterlist *sgl,
984                        unsigned int sg_len, int value,
985                        unsigned long flags)
986 {
987         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
988         struct at_dma           *atdma = to_at_dma(chan->device);
989         struct at_desc          *desc = NULL, *first = NULL, *prev = NULL;
990         struct scatterlist      *sg;
991         void __iomem            *vaddr;
992         dma_addr_t              paddr;
993         size_t                  total_len = 0;
994         int                     i;
995 
996         dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
997                  value, sg_len, flags);
998 
999         if (unlikely(!sgl || !sg_len)) {
1000                 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1001                         __func__);
1002                 return NULL;
1003         }
1004 
1005         vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1006         if (!vaddr) {
1007                 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1008                         __func__);
1009                 return NULL;
1010         }
1011         *(u32*)vaddr = value;
1012 
1013         for_each_sg(sgl, sg, sg_len, i) {
1014                 dma_addr_t dest = sg_dma_address(sg);
1015                 size_t len = sg_dma_len(sg);
1016 
1017                 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1018                          __func__, &dest, len);
1019 
1020                 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1021                         dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1022                                 __func__);
1023                         goto err_put_desc;
1024                 }
1025 
1026                 desc = atc_create_memset_desc(chan, paddr, dest, len);
1027                 if (!desc)
1028                         goto err_put_desc;
1029 
1030                 atc_desc_chain(&first, &prev, desc);
1031 
1032                 total_len += len;
1033         }
1034 
1035         /*
1036          * Only set the buffer pointers on the last descriptor to
1037          * avoid free'ing while we have our transfer still going
1038          */
1039         desc->memset_paddr = paddr;
1040         desc->memset_vaddr = vaddr;
1041         desc->memset_buffer = true;
1042 
1043         first->txd.cookie = -EBUSY;
1044         first->total_len = total_len;
1045 
1046         /* set end-of-link on the descriptor */
1047         set_desc_eol(desc);
1048 
1049         first->txd.flags = flags;
1050 
1051         return &first->txd;
1052 
1053 err_put_desc:
1054         atc_desc_put(atchan, first);
1055         return NULL;
1056 }
1057 
1058 /**
1059  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1060  * @chan: DMA channel
1061  * @sgl: scatterlist to transfer to/from
1062  * @sg_len: number of entries in @scatterlist
1063  * @direction: DMA direction
1064  * @flags: tx descriptor status flags
1065  * @context: transaction context (ignored)
1066  */
1067 static struct dma_async_tx_descriptor *
1068 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1069                 unsigned int sg_len, enum dma_transfer_direction direction,
1070                 unsigned long flags, void *context)
1071 {
1072         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1073         struct at_dma_slave     *atslave = chan->private;
1074         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1075         struct at_desc          *first = NULL;
1076         struct at_desc          *prev = NULL;
1077         u32                     ctrla;
1078         u32                     ctrlb;
1079         dma_addr_t              reg;
1080         unsigned int            reg_width;
1081         unsigned int            mem_width;
1082         unsigned int            i;
1083         struct scatterlist      *sg;
1084         size_t                  total_len = 0;
1085 
1086         dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1087                         sg_len,
1088                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1089                         flags);
1090 
1091         if (unlikely(!atslave || !sg_len)) {
1092                 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1093                 return NULL;
1094         }
1095 
1096         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1097                 | ATC_DCSIZE(sconfig->dst_maxburst);
1098         ctrlb = ATC_IEN;
1099 
1100         switch (direction) {
1101         case DMA_MEM_TO_DEV:
1102                 reg_width = convert_buswidth(sconfig->dst_addr_width);
1103                 ctrla |=  ATC_DST_WIDTH(reg_width);
1104                 ctrlb |=  ATC_DST_ADDR_MODE_FIXED
1105                         | ATC_SRC_ADDR_MODE_INCR
1106                         | ATC_FC_MEM2PER
1107                         | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1108                 reg = sconfig->dst_addr;
1109                 for_each_sg(sgl, sg, sg_len, i) {
1110                         struct at_desc  *desc;
1111                         u32             len;
1112                         u32             mem;
1113 
1114                         desc = atc_desc_get(atchan);
1115                         if (!desc)
1116                                 goto err_desc_get;
1117 
1118                         mem = sg_dma_address(sg);
1119                         len = sg_dma_len(sg);
1120                         if (unlikely(!len)) {
1121                                 dev_dbg(chan2dev(chan),
1122                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
1123                                 goto err;
1124                         }
1125                         mem_width = 2;
1126                         if (unlikely(mem & 3 || len & 3))
1127                                 mem_width = 0;
1128 
1129                         desc->lli.saddr = mem;
1130                         desc->lli.daddr = reg;
1131                         desc->lli.ctrla = ctrla
1132                                         | ATC_SRC_WIDTH(mem_width)
1133                                         | len >> mem_width;
1134                         desc->lli.ctrlb = ctrlb;
1135                         desc->len = len;
1136 
1137                         atc_desc_chain(&first, &prev, desc);
1138                         total_len += len;
1139                 }
1140                 break;
1141         case DMA_DEV_TO_MEM:
1142                 reg_width = convert_buswidth(sconfig->src_addr_width);
1143                 ctrla |=  ATC_SRC_WIDTH(reg_width);
1144                 ctrlb |=  ATC_DST_ADDR_MODE_INCR
1145                         | ATC_SRC_ADDR_MODE_FIXED
1146                         | ATC_FC_PER2MEM
1147                         | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1148 
1149                 reg = sconfig->src_addr;
1150                 for_each_sg(sgl, sg, sg_len, i) {
1151                         struct at_desc  *desc;
1152                         u32             len;
1153                         u32             mem;
1154 
1155                         desc = atc_desc_get(atchan);
1156                         if (!desc)
1157                                 goto err_desc_get;
1158 
1159                         mem = sg_dma_address(sg);
1160                         len = sg_dma_len(sg);
1161                         if (unlikely(!len)) {
1162                                 dev_dbg(chan2dev(chan),
1163                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
1164                                 goto err;
1165                         }
1166                         mem_width = 2;
1167                         if (unlikely(mem & 3 || len & 3))
1168                                 mem_width = 0;
1169 
1170                         desc->lli.saddr = reg;
1171                         desc->lli.daddr = mem;
1172                         desc->lli.ctrla = ctrla
1173                                         | ATC_DST_WIDTH(mem_width)
1174                                         | len >> reg_width;
1175                         desc->lli.ctrlb = ctrlb;
1176                         desc->len = len;
1177 
1178                         atc_desc_chain(&first, &prev, desc);
1179                         total_len += len;
1180                 }
1181                 break;
1182         default:
1183                 return NULL;
1184         }
1185 
1186         /* set end-of-link to the last link descriptor of list*/
1187         set_desc_eol(prev);
1188 
1189         /* First descriptor of the chain embedds additional information */
1190         first->txd.cookie = -EBUSY;
1191         first->total_len = total_len;
1192 
1193         /* first link descriptor of list is responsible of flags */
1194         first->txd.flags = flags; /* client is in control of this ack */
1195 
1196         return &first->txd;
1197 
1198 err_desc_get:
1199         dev_err(chan2dev(chan), "not enough descriptors available\n");
1200 err:
1201         atc_desc_put(atchan, first);
1202         return NULL;
1203 }
1204 
1205 /**
1206  * atc_prep_dma_sg - prepare memory to memory scather-gather operation
1207  * @chan: the channel to prepare operation on
1208  * @dst_sg: destination scatterlist
1209  * @dst_nents: number of destination scatterlist entries
1210  * @src_sg: source scatterlist
1211  * @src_nents: number of source scatterlist entries
1212  * @flags: tx descriptor status flags
1213  */
1214 static struct dma_async_tx_descriptor *
1215 atc_prep_dma_sg(struct dma_chan *chan,
1216                 struct scatterlist *dst_sg, unsigned int dst_nents,
1217                 struct scatterlist *src_sg, unsigned int src_nents,
1218                 unsigned long flags)
1219 {
1220         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1221         struct at_desc          *desc = NULL;
1222         struct at_desc          *first = NULL;
1223         struct at_desc          *prev = NULL;
1224         unsigned int            src_width;
1225         unsigned int            dst_width;
1226         size_t                  xfer_count;
1227         u32                     ctrla;
1228         u32                     ctrlb;
1229         size_t                  dst_len = 0, src_len = 0;
1230         dma_addr_t              dst = 0, src = 0;
1231         size_t                  len = 0, total_len = 0;
1232 
1233         if (unlikely(dst_nents == 0 || src_nents == 0))
1234                 return NULL;
1235 
1236         if (unlikely(dst_sg == NULL || src_sg == NULL))
1237                 return NULL;
1238 
1239         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
1240                 | ATC_SRC_ADDR_MODE_INCR
1241                 | ATC_DST_ADDR_MODE_INCR
1242                 | ATC_FC_MEM2MEM;
1243 
1244         /*
1245          * loop until there is either no more source or no more destination
1246          * scatterlist entry
1247          */
1248         while (true) {
1249 
1250                 /* prepare the next transfer */
1251                 if (dst_len == 0) {
1252 
1253                         /* no more destination scatterlist entries */
1254                         if (!dst_sg || !dst_nents)
1255                                 break;
1256 
1257                         dst = sg_dma_address(dst_sg);
1258                         dst_len = sg_dma_len(dst_sg);
1259 
1260                         dst_sg = sg_next(dst_sg);
1261                         dst_nents--;
1262                 }
1263 
1264                 if (src_len == 0) {
1265 
1266                         /* no more source scatterlist entries */
1267                         if (!src_sg || !src_nents)
1268                                 break;
1269 
1270                         src = sg_dma_address(src_sg);
1271                         src_len = sg_dma_len(src_sg);
1272 
1273                         src_sg = sg_next(src_sg);
1274                         src_nents--;
1275                 }
1276 
1277                 len = min_t(size_t, src_len, dst_len);
1278                 if (len == 0)
1279                         continue;
1280 
1281                 /* take care for the alignment */
1282                 src_width = dst_width = atc_get_xfer_width(src, dst, len);
1283 
1284                 ctrla = ATC_SRC_WIDTH(src_width) |
1285                         ATC_DST_WIDTH(dst_width);
1286 
1287                 /*
1288                  * The number of transfers to set up refer to the source width
1289                  * that depends on the alignment.
1290                  */
1291                 xfer_count = len >> src_width;
1292                 if (xfer_count > ATC_BTSIZE_MAX) {
1293                         xfer_count = ATC_BTSIZE_MAX;
1294                         len = ATC_BTSIZE_MAX << src_width;
1295                 }
1296 
1297                 /* create the transfer */
1298                 desc = atc_desc_get(atchan);
1299                 if (!desc)
1300                         goto err_desc_get;
1301 
1302                 desc->lli.saddr = src;
1303                 desc->lli.daddr = dst;
1304                 desc->lli.ctrla = ctrla | xfer_count;
1305                 desc->lli.ctrlb = ctrlb;
1306 
1307                 desc->txd.cookie = 0;
1308                 desc->len = len;
1309 
1310                 atc_desc_chain(&first, &prev, desc);
1311 
1312                 /* update the lengths and addresses for the next loop cycle */
1313                 dst_len -= len;
1314                 src_len -= len;
1315                 dst += len;
1316                 src += len;
1317 
1318                 total_len += len;
1319         }
1320 
1321         /* First descriptor of the chain embedds additional information */
1322         first->txd.cookie = -EBUSY;
1323         first->total_len = total_len;
1324 
1325         /* set end-of-link to the last link descriptor of list*/
1326         set_desc_eol(desc);
1327 
1328         first->txd.flags = flags; /* client is in control of this ack */
1329 
1330         return &first->txd;
1331 
1332 err_desc_get:
1333         atc_desc_put(atchan, first);
1334         return NULL;
1335 }
1336 
1337 /**
1338  * atc_dma_cyclic_check_values
1339  * Check for too big/unaligned periods and unaligned DMA buffer
1340  */
1341 static int
1342 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1343                 size_t period_len)
1344 {
1345         if (period_len > (ATC_BTSIZE_MAX << reg_width))
1346                 goto err_out;
1347         if (unlikely(period_len & ((1 << reg_width) - 1)))
1348                 goto err_out;
1349         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1350                 goto err_out;
1351 
1352         return 0;
1353 
1354 err_out:
1355         return -EINVAL;
1356 }
1357 
1358 /**
1359  * atc_dma_cyclic_fill_desc - Fill one period descriptor
1360  */
1361 static int
1362 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1363                 unsigned int period_index, dma_addr_t buf_addr,
1364                 unsigned int reg_width, size_t period_len,
1365                 enum dma_transfer_direction direction)
1366 {
1367         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1368         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1369         u32                     ctrla;
1370 
1371         /* prepare common CRTLA value */
1372         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1373                 | ATC_DCSIZE(sconfig->dst_maxburst)
1374                 | ATC_DST_WIDTH(reg_width)
1375                 | ATC_SRC_WIDTH(reg_width)
1376                 | period_len >> reg_width;
1377 
1378         switch (direction) {
1379         case DMA_MEM_TO_DEV:
1380                 desc->lli.saddr = buf_addr + (period_len * period_index);
1381                 desc->lli.daddr = sconfig->dst_addr;
1382                 desc->lli.ctrla = ctrla;
1383                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1384                                 | ATC_SRC_ADDR_MODE_INCR
1385                                 | ATC_FC_MEM2PER
1386                                 | ATC_SIF(atchan->mem_if)
1387                                 | ATC_DIF(atchan->per_if);
1388                 desc->len = period_len;
1389                 break;
1390 
1391         case DMA_DEV_TO_MEM:
1392                 desc->lli.saddr = sconfig->src_addr;
1393                 desc->lli.daddr = buf_addr + (period_len * period_index);
1394                 desc->lli.ctrla = ctrla;
1395                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1396                                 | ATC_SRC_ADDR_MODE_FIXED
1397                                 | ATC_FC_PER2MEM
1398                                 | ATC_SIF(atchan->per_if)
1399                                 | ATC_DIF(atchan->mem_if);
1400                 desc->len = period_len;
1401                 break;
1402 
1403         default:
1404                 return -EINVAL;
1405         }
1406 
1407         return 0;
1408 }
1409 
1410 /**
1411  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1412  * @chan: the DMA channel to prepare
1413  * @buf_addr: physical DMA address where the buffer starts
1414  * @buf_len: total number of bytes for the entire buffer
1415  * @period_len: number of bytes for each period
1416  * @direction: transfer direction, to or from device
1417  * @flags: tx descriptor status flags
1418  */
1419 static struct dma_async_tx_descriptor *
1420 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1421                 size_t period_len, enum dma_transfer_direction direction,
1422                 unsigned long flags)
1423 {
1424         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1425         struct at_dma_slave     *atslave = chan->private;
1426         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1427         struct at_desc          *first = NULL;
1428         struct at_desc          *prev = NULL;
1429         unsigned long           was_cyclic;
1430         unsigned int            reg_width;
1431         unsigned int            periods = buf_len / period_len;
1432         unsigned int            i;
1433 
1434         dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1435                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1436                         &buf_addr,
1437                         periods, buf_len, period_len);
1438 
1439         if (unlikely(!atslave || !buf_len || !period_len)) {
1440                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1441                 return NULL;
1442         }
1443 
1444         was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1445         if (was_cyclic) {
1446                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1447                 return NULL;
1448         }
1449 
1450         if (unlikely(!is_slave_direction(direction)))
1451                 goto err_out;
1452 
1453         if (sconfig->direction == DMA_MEM_TO_DEV)
1454                 reg_width = convert_buswidth(sconfig->dst_addr_width);
1455         else
1456                 reg_width = convert_buswidth(sconfig->src_addr_width);
1457 
1458         /* Check for too big/unaligned periods and unaligned DMA buffer */
1459         if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1460                 goto err_out;
1461 
1462         /* build cyclic linked list */
1463         for (i = 0; i < periods; i++) {
1464                 struct at_desc  *desc;
1465 
1466                 desc = atc_desc_get(atchan);
1467                 if (!desc)
1468                         goto err_desc_get;
1469 
1470                 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1471                                              reg_width, period_len, direction))
1472                         goto err_desc_get;
1473 
1474                 atc_desc_chain(&first, &prev, desc);
1475         }
1476 
1477         /* lets make a cyclic list */
1478         prev->lli.dscr = first->txd.phys;
1479 
1480         /* First descriptor of the chain embedds additional information */
1481         first->txd.cookie = -EBUSY;
1482         first->total_len = buf_len;
1483 
1484         return &first->txd;
1485 
1486 err_desc_get:
1487         dev_err(chan2dev(chan), "not enough descriptors available\n");
1488         atc_desc_put(atchan, first);
1489 err_out:
1490         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1491         return NULL;
1492 }
1493 
1494 static int atc_config(struct dma_chan *chan,
1495                       struct dma_slave_config *sconfig)
1496 {
1497         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1498 
1499         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1500 
1501         /* Check if it is chan is configured for slave transfers */
1502         if (!chan->private)
1503                 return -EINVAL;
1504 
1505         memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1506 
1507         convert_burst(&atchan->dma_sconfig.src_maxburst);
1508         convert_burst(&atchan->dma_sconfig.dst_maxburst);
1509 
1510         return 0;
1511 }
1512 
1513 static int atc_pause(struct dma_chan *chan)
1514 {
1515         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1516         struct at_dma           *atdma = to_at_dma(chan->device);
1517         int                     chan_id = atchan->chan_common.chan_id;
1518         unsigned long           flags;
1519 
1520         LIST_HEAD(list);
1521 
1522         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1523 
1524         spin_lock_irqsave(&atchan->lock, flags);
1525 
1526         dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1527         set_bit(ATC_IS_PAUSED, &atchan->status);
1528 
1529         spin_unlock_irqrestore(&atchan->lock, flags);
1530 
1531         return 0;
1532 }
1533 
1534 static int atc_resume(struct dma_chan *chan)
1535 {
1536         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1537         struct at_dma           *atdma = to_at_dma(chan->device);
1538         int                     chan_id = atchan->chan_common.chan_id;
1539         unsigned long           flags;
1540 
1541         LIST_HEAD(list);
1542 
1543         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1544 
1545         if (!atc_chan_is_paused(atchan))
1546                 return 0;
1547 
1548         spin_lock_irqsave(&atchan->lock, flags);
1549 
1550         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1551         clear_bit(ATC_IS_PAUSED, &atchan->status);
1552 
1553         spin_unlock_irqrestore(&atchan->lock, flags);
1554 
1555         return 0;
1556 }
1557 
1558 static int atc_terminate_all(struct dma_chan *chan)
1559 {
1560         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1561         struct at_dma           *atdma = to_at_dma(chan->device);
1562         int                     chan_id = atchan->chan_common.chan_id;
1563         struct at_desc          *desc, *_desc;
1564         unsigned long           flags;
1565 
1566         LIST_HEAD(list);
1567 
1568         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1569 
1570         /*
1571          * This is only called when something went wrong elsewhere, so
1572          * we don't really care about the data. Just disable the
1573          * channel. We still have to poll the channel enable bit due
1574          * to AHB/HSB limitations.
1575          */
1576         spin_lock_irqsave(&atchan->lock, flags);
1577 
1578         /* disabling channel: must also remove suspend state */
1579         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1580 
1581         /* confirm that this channel is disabled */
1582         while (dma_readl(atdma, CHSR) & atchan->mask)
1583                 cpu_relax();
1584 
1585         /* active_list entries will end up before queued entries */
1586         list_splice_init(&atchan->queue, &list);
1587         list_splice_init(&atchan->active_list, &list);
1588 
1589         /* Flush all pending and queued descriptors */
1590         list_for_each_entry_safe(desc, _desc, &list, desc_node)
1591                 atc_chain_complete(atchan, desc);
1592 
1593         clear_bit(ATC_IS_PAUSED, &atchan->status);
1594         /* if channel dedicated to cyclic operations, free it */
1595         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1596 
1597         spin_unlock_irqrestore(&atchan->lock, flags);
1598 
1599         return 0;
1600 }
1601 
1602 /**
1603  * atc_tx_status - poll for transaction completion
1604  * @chan: DMA channel
1605  * @cookie: transaction identifier to check status of
1606  * @txstate: if not %NULL updated with transaction state
1607  *
1608  * If @txstate is passed in, upon return it reflect the driver
1609  * internal state and can be used with dma_async_is_complete() to check
1610  * the status of multiple cookies without re-checking hardware state.
1611  */
1612 static enum dma_status
1613 atc_tx_status(struct dma_chan *chan,
1614                 dma_cookie_t cookie,
1615                 struct dma_tx_state *txstate)
1616 {
1617         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1618         unsigned long           flags;
1619         enum dma_status         ret;
1620         int bytes = 0;
1621 
1622         ret = dma_cookie_status(chan, cookie, txstate);
1623         if (ret == DMA_COMPLETE)
1624                 return ret;
1625         /*
1626          * There's no point calculating the residue if there's
1627          * no txstate to store the value.
1628          */
1629         if (!txstate)
1630                 return DMA_ERROR;
1631 
1632         spin_lock_irqsave(&atchan->lock, flags);
1633 
1634         /*  Get number of bytes left in the active transactions */
1635         bytes = atc_get_bytes_left(chan, cookie);
1636 
1637         spin_unlock_irqrestore(&atchan->lock, flags);
1638 
1639         if (unlikely(bytes < 0)) {
1640                 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1641                 return DMA_ERROR;
1642         } else {
1643                 dma_set_residue(txstate, bytes);
1644         }
1645 
1646         dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1647                  ret, cookie, bytes);
1648 
1649         return ret;
1650 }
1651 
1652 /**
1653  * atc_issue_pending - try to finish work
1654  * @chan: target DMA channel
1655  */
1656 static void atc_issue_pending(struct dma_chan *chan)
1657 {
1658         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1659         unsigned long           flags;
1660 
1661         dev_vdbg(chan2dev(chan), "issue_pending\n");
1662 
1663         /* Not needed for cyclic transfers */
1664         if (atc_chan_is_cyclic(atchan))
1665                 return;
1666 
1667         spin_lock_irqsave(&atchan->lock, flags);
1668         atc_advance_work(atchan);
1669         spin_unlock_irqrestore(&atchan->lock, flags);
1670 }
1671 
1672 /**
1673  * atc_alloc_chan_resources - allocate resources for DMA channel
1674  * @chan: allocate descriptor resources for this channel
1675  * @client: current client requesting the channel be ready for requests
1676  *
1677  * return - the number of allocated descriptors
1678  */
1679 static int atc_alloc_chan_resources(struct dma_chan *chan)
1680 {
1681         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1682         struct at_dma           *atdma = to_at_dma(chan->device);
1683         struct at_desc          *desc;
1684         struct at_dma_slave     *atslave;
1685         unsigned long           flags;
1686         int                     i;
1687         u32                     cfg;
1688         LIST_HEAD(tmp_list);
1689 
1690         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1691 
1692         /* ASSERT:  channel is idle */
1693         if (atc_chan_is_enabled(atchan)) {
1694                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1695                 return -EIO;
1696         }
1697 
1698         cfg = ATC_DEFAULT_CFG;
1699 
1700         atslave = chan->private;
1701         if (atslave) {
1702                 /*
1703                  * We need controller-specific data to set up slave
1704                  * transfers.
1705                  */
1706                 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1707 
1708                 /* if cfg configuration specified take it instead of default */
1709                 if (atslave->cfg)
1710                         cfg = atslave->cfg;
1711         }
1712 
1713         /* have we already been set up?
1714          * reconfigure channel but no need to reallocate descriptors */
1715         if (!list_empty(&atchan->free_list))
1716                 return atchan->descs_allocated;
1717 
1718         /* Allocate initial pool of descriptors */
1719         for (i = 0; i < init_nr_desc_per_channel; i++) {
1720                 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1721                 if (!desc) {
1722                         dev_err(atdma->dma_common.dev,
1723                                 "Only %d initial descriptors\n", i);
1724                         break;
1725                 }
1726                 list_add_tail(&desc->desc_node, &tmp_list);
1727         }
1728 
1729         spin_lock_irqsave(&atchan->lock, flags);
1730         atchan->descs_allocated = i;
1731         list_splice(&tmp_list, &atchan->free_list);
1732         dma_cookie_init(chan);
1733         spin_unlock_irqrestore(&atchan->lock, flags);
1734 
1735         /* channel parameters */
1736         channel_writel(atchan, CFG, cfg);
1737 
1738         dev_dbg(chan2dev(chan),
1739                 "alloc_chan_resources: allocated %d descriptors\n",
1740                 atchan->descs_allocated);
1741 
1742         return atchan->descs_allocated;
1743 }
1744 
1745 /**
1746  * atc_free_chan_resources - free all channel resources
1747  * @chan: DMA channel
1748  */
1749 static void atc_free_chan_resources(struct dma_chan *chan)
1750 {
1751         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1752         struct at_dma           *atdma = to_at_dma(chan->device);
1753         struct at_desc          *desc, *_desc;
1754         LIST_HEAD(list);
1755 
1756         dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1757                 atchan->descs_allocated);
1758 
1759         /* ASSERT:  channel is idle */
1760         BUG_ON(!list_empty(&atchan->active_list));
1761         BUG_ON(!list_empty(&atchan->queue));
1762         BUG_ON(atc_chan_is_enabled(atchan));
1763 
1764         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1765                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1766                 list_del(&desc->desc_node);
1767                 /* free link descriptor */
1768                 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1769         }
1770         list_splice_init(&atchan->free_list, &list);
1771         atchan->descs_allocated = 0;
1772         atchan->status = 0;
1773 
1774         dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1775 }
1776 
1777 #ifdef CONFIG_OF
1778 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1779 {
1780         struct at_dma_slave *atslave = slave;
1781 
1782         if (atslave->dma_dev == chan->device->dev) {
1783                 chan->private = atslave;
1784                 return true;
1785         } else {
1786                 return false;
1787         }
1788 }
1789 
1790 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1791                                      struct of_dma *of_dma)
1792 {
1793         struct dma_chan *chan;
1794         struct at_dma_chan *atchan;
1795         struct at_dma_slave *atslave;
1796         dma_cap_mask_t mask;
1797         unsigned int per_id;
1798         struct platform_device *dmac_pdev;
1799 
1800         if (dma_spec->args_count != 2)
1801                 return NULL;
1802 
1803         dmac_pdev = of_find_device_by_node(dma_spec->np);
1804 
1805         dma_cap_zero(mask);
1806         dma_cap_set(DMA_SLAVE, mask);
1807 
1808         atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1809         if (!atslave)
1810                 return NULL;
1811 
1812         atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1813         /*
1814          * We can fill both SRC_PER and DST_PER, one of these fields will be
1815          * ignored depending on DMA transfer direction.
1816          */
1817         per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1818         atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1819                      | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1820         /*
1821          * We have to translate the value we get from the device tree since
1822          * the half FIFO configuration value had to be 0 to keep backward
1823          * compatibility.
1824          */
1825         switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1826         case AT91_DMA_CFG_FIFOCFG_ALAP:
1827                 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1828                 break;
1829         case AT91_DMA_CFG_FIFOCFG_ASAP:
1830                 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1831                 break;
1832         case AT91_DMA_CFG_FIFOCFG_HALF:
1833         default:
1834                 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1835         }
1836         atslave->dma_dev = &dmac_pdev->dev;
1837 
1838         chan = dma_request_channel(mask, at_dma_filter, atslave);
1839         if (!chan)
1840                 return NULL;
1841 
1842         atchan = to_at_dma_chan(chan);
1843         atchan->per_if = dma_spec->args[0] & 0xff;
1844         atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1845 
1846         return chan;
1847 }
1848 #else
1849 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1850                                      struct of_dma *of_dma)
1851 {
1852         return NULL;
1853 }
1854 #endif
1855 
1856 /*--  Module Management  -----------------------------------------------*/
1857 
1858 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1859 static struct at_dma_platform_data at91sam9rl_config = {
1860         .nr_channels = 2,
1861 };
1862 static struct at_dma_platform_data at91sam9g45_config = {
1863         .nr_channels = 8,
1864 };
1865 
1866 #if defined(CONFIG_OF)
1867 static const struct of_device_id atmel_dma_dt_ids[] = {
1868         {
1869                 .compatible = "atmel,at91sam9rl-dma",
1870                 .data = &at91sam9rl_config,
1871         }, {
1872                 .compatible = "atmel,at91sam9g45-dma",
1873                 .data = &at91sam9g45_config,
1874         }, {
1875                 /* sentinel */
1876         }
1877 };
1878 
1879 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1880 #endif
1881 
1882 static const struct platform_device_id atdma_devtypes[] = {
1883         {
1884                 .name = "at91sam9rl_dma",
1885                 .driver_data = (unsigned long) &at91sam9rl_config,
1886         }, {
1887                 .name = "at91sam9g45_dma",
1888                 .driver_data = (unsigned long) &at91sam9g45_config,
1889         }, {
1890                 /* sentinel */
1891         }
1892 };
1893 
1894 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1895                                                 struct platform_device *pdev)
1896 {
1897         if (pdev->dev.of_node) {
1898                 const struct of_device_id *match;
1899                 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1900                 if (match == NULL)
1901                         return NULL;
1902                 return match->data;
1903         }
1904         return (struct at_dma_platform_data *)
1905                         platform_get_device_id(pdev)->driver_data;
1906 }
1907 
1908 /**
1909  * at_dma_off - disable DMA controller
1910  * @atdma: the Atmel HDAMC device
1911  */
1912 static void at_dma_off(struct at_dma *atdma)
1913 {
1914         dma_writel(atdma, EN, 0);
1915 
1916         /* disable all interrupts */
1917         dma_writel(atdma, EBCIDR, -1L);
1918 
1919         /* confirm that all channels are disabled */
1920         while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1921                 cpu_relax();
1922 }
1923 
1924 static int __init at_dma_probe(struct platform_device *pdev)
1925 {
1926         struct resource         *io;
1927         struct at_dma           *atdma;
1928         size_t                  size;
1929         int                     irq;
1930         int                     err;
1931         int                     i;
1932         const struct at_dma_platform_data *plat_dat;
1933 
1934         /* setup platform data for each SoC */
1935         dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1936         dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
1937         dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1938         dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1939         dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1940         dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1941         dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1942         dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1943         dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
1944 
1945         /* get DMA parameters from controller type */
1946         plat_dat = at_dma_get_driver_data(pdev);
1947         if (!plat_dat)
1948                 return -ENODEV;
1949 
1950         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1951         if (!io)
1952                 return -EINVAL;
1953 
1954         irq = platform_get_irq(pdev, 0);
1955         if (irq < 0)
1956                 return irq;
1957 
1958         size = sizeof(struct at_dma);
1959         size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1960         atdma = kzalloc(size, GFP_KERNEL);
1961         if (!atdma)
1962                 return -ENOMEM;
1963 
1964         /* discover transaction capabilities */
1965         atdma->dma_common.cap_mask = plat_dat->cap_mask;
1966         atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1967 
1968         size = resource_size(io);
1969         if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1970                 err = -EBUSY;
1971                 goto err_kfree;
1972         }
1973 
1974         atdma->regs = ioremap(io->start, size);
1975         if (!atdma->regs) {
1976                 err = -ENOMEM;
1977                 goto err_release_r;
1978         }
1979 
1980         atdma->clk = clk_get(&pdev->dev, "dma_clk");
1981         if (IS_ERR(atdma->clk)) {
1982                 err = PTR_ERR(atdma->clk);
1983                 goto err_clk;
1984         }
1985         err = clk_prepare_enable(atdma->clk);
1986         if (err)
1987                 goto err_clk_prepare;
1988 
1989         /* force dma off, just in case */
1990         at_dma_off(atdma);
1991 
1992         err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1993         if (err)
1994                 goto err_irq;
1995 
1996         platform_set_drvdata(pdev, atdma);
1997 
1998         /* create a pool of consistent memory blocks for hardware descriptors */
1999         atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
2000                         &pdev->dev, sizeof(struct at_desc),
2001                         4 /* word alignment */, 0);
2002         if (!atdma->dma_desc_pool) {
2003                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
2004                 err = -ENOMEM;
2005                 goto err_desc_pool_create;
2006         }
2007 
2008         /* create a pool of consistent memory blocks for memset blocks */
2009         atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
2010                                              &pdev->dev, sizeof(int), 4, 0);
2011         if (!atdma->memset_pool) {
2012                 dev_err(&pdev->dev, "No memory for memset dma pool\n");
2013                 err = -ENOMEM;
2014                 goto err_memset_pool_create;
2015         }
2016 
2017         /* clear any pending interrupt */
2018         while (dma_readl(atdma, EBCISR))
2019                 cpu_relax();
2020 
2021         /* initialize channels related values */
2022         INIT_LIST_HEAD(&atdma->dma_common.channels);
2023         for (i = 0; i < plat_dat->nr_channels; i++) {
2024                 struct at_dma_chan      *atchan = &atdma->chan[i];
2025 
2026                 atchan->mem_if = AT_DMA_MEM_IF;
2027                 atchan->per_if = AT_DMA_PER_IF;
2028                 atchan->chan_common.device = &atdma->dma_common;
2029                 dma_cookie_init(&atchan->chan_common);
2030                 list_add_tail(&atchan->chan_common.device_node,
2031                                 &atdma->dma_common.channels);
2032 
2033                 atchan->ch_regs = atdma->regs + ch_regs(i);
2034                 spin_lock_init(&atchan->lock);
2035                 atchan->mask = 1 << i;
2036 
2037                 INIT_LIST_HEAD(&atchan->active_list);
2038                 INIT_LIST_HEAD(&atchan->queue);
2039                 INIT_LIST_HEAD(&atchan->free_list);
2040 
2041                 tasklet_init(&atchan->tasklet, atc_tasklet,
2042                                 (unsigned long)atchan);
2043                 atc_enable_chan_irq(atdma, i);
2044         }
2045 
2046         /* set base routines */
2047         atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
2048         atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
2049         atdma->dma_common.device_tx_status = atc_tx_status;
2050         atdma->dma_common.device_issue_pending = atc_issue_pending;
2051         atdma->dma_common.dev = &pdev->dev;
2052 
2053         /* set prep routines based on capability */
2054         if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
2055                 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
2056 
2057         if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
2058                 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
2059 
2060         if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
2061                 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
2062                 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
2063                 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
2064         }
2065 
2066         if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
2067                 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
2068                 /* controller can do slave DMA: can trigger cyclic transfers */
2069                 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
2070                 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
2071                 atdma->dma_common.device_config = atc_config;
2072                 atdma->dma_common.device_pause = atc_pause;
2073                 atdma->dma_common.device_resume = atc_resume;
2074                 atdma->dma_common.device_terminate_all = atc_terminate_all;
2075                 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
2076                 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
2077                 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2078                 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2079         }
2080 
2081         if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2082                 atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2083 
2084         dma_writel(atdma, EN, AT_DMA_ENABLE);
2085 
2086         dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
2087           dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
2088           dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
2089           dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
2090           dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
2091           plat_dat->nr_channels);
2092 
2093         dma_async_device_register(&atdma->dma_common);
2094 
2095         /*
2096          * Do not return an error if the dmac node is not present in order to
2097          * not break the existing way of requesting channel with
2098          * dma_request_channel().
2099          */
2100         if (pdev->dev.of_node) {
2101                 err = of_dma_controller_register(pdev->dev.of_node,
2102                                                  at_dma_xlate, atdma);
2103                 if (err) {
2104                         dev_err(&pdev->dev, "could not register of_dma_controller\n");
2105                         goto err_of_dma_controller_register;
2106                 }
2107         }
2108 
2109         return 0;
2110 
2111 err_of_dma_controller_register:
2112         dma_async_device_unregister(&atdma->dma_common);
2113         dma_pool_destroy(atdma->memset_pool);
2114 err_memset_pool_create:
2115         dma_pool_destroy(atdma->dma_desc_pool);
2116 err_desc_pool_create:
2117         free_irq(platform_get_irq(pdev, 0), atdma);
2118 err_irq:
2119         clk_disable_unprepare(atdma->clk);
2120 err_clk_prepare:
2121         clk_put(atdma->clk);
2122 err_clk:
2123         iounmap(atdma->regs);
2124         atdma->regs = NULL;
2125 err_release_r:
2126         release_mem_region(io->start, size);
2127 err_kfree:
2128         kfree(atdma);
2129         return err;
2130 }
2131 
2132 static int at_dma_remove(struct platform_device *pdev)
2133 {
2134         struct at_dma           *atdma = platform_get_drvdata(pdev);
2135         struct dma_chan         *chan, *_chan;
2136         struct resource         *io;
2137 
2138         at_dma_off(atdma);
2139         dma_async_device_unregister(&atdma->dma_common);
2140 
2141         dma_pool_destroy(atdma->memset_pool);
2142         dma_pool_destroy(atdma->dma_desc_pool);
2143         free_irq(platform_get_irq(pdev, 0), atdma);
2144 
2145         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2146                         device_node) {
2147                 struct at_dma_chan      *atchan = to_at_dma_chan(chan);
2148 
2149                 /* Disable interrupts */
2150                 atc_disable_chan_irq(atdma, chan->chan_id);
2151 
2152                 tasklet_kill(&atchan->tasklet);
2153                 list_del(&chan->device_node);
2154         }
2155 
2156         clk_disable_unprepare(atdma->clk);
2157         clk_put(atdma->clk);
2158 
2159         iounmap(atdma->regs);
2160         atdma->regs = NULL;
2161 
2162         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2163         release_mem_region(io->start, resource_size(io));
2164 
2165         kfree(atdma);
2166 
2167         return 0;
2168 }
2169 
2170 static void at_dma_shutdown(struct platform_device *pdev)
2171 {
2172         struct at_dma   *atdma = platform_get_drvdata(pdev);
2173 
2174         at_dma_off(platform_get_drvdata(pdev));
2175         clk_disable_unprepare(atdma->clk);
2176 }
2177 
2178 static int at_dma_prepare(struct device *dev)
2179 {
2180         struct platform_device *pdev = to_platform_device(dev);
2181         struct at_dma *atdma = platform_get_drvdata(pdev);
2182         struct dma_chan *chan, *_chan;
2183 
2184         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2185                         device_node) {
2186                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2187                 /* wait for transaction completion (except in cyclic case) */
2188                 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2189                         return -EAGAIN;
2190         }
2191         return 0;
2192 }
2193 
2194 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2195 {
2196         struct dma_chan *chan = &atchan->chan_common;
2197 
2198         /* Channel should be paused by user
2199          * do it anyway even if it is not done already */
2200         if (!atc_chan_is_paused(atchan)) {
2201                 dev_warn(chan2dev(chan),
2202                 "cyclic channel not paused, should be done by channel user\n");
2203                 atc_pause(chan);
2204         }
2205 
2206         /* now preserve additional data for cyclic operations */
2207         /* next descriptor address in the cyclic list */
2208         atchan->save_dscr = channel_readl(atchan, DSCR);
2209 
2210         vdbg_dump_regs(atchan);
2211 }
2212 
2213 static int at_dma_suspend_noirq(struct device *dev)
2214 {
2215         struct platform_device *pdev = to_platform_device(dev);
2216         struct at_dma *atdma = platform_get_drvdata(pdev);
2217         struct dma_chan *chan, *_chan;
2218 
2219         /* preserve data */
2220         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2221                         device_node) {
2222                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2223 
2224                 if (atc_chan_is_cyclic(atchan))
2225                         atc_suspend_cyclic(atchan);
2226                 atchan->save_cfg = channel_readl(atchan, CFG);
2227         }
2228         atdma->save_imr = dma_readl(atdma, EBCIMR);
2229 
2230         /* disable DMA controller */
2231         at_dma_off(atdma);
2232         clk_disable_unprepare(atdma->clk);
2233         return 0;
2234 }
2235 
2236 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2237 {
2238         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
2239 
2240         /* restore channel status for cyclic descriptors list:
2241          * next descriptor in the cyclic list at the time of suspend */
2242         channel_writel(atchan, SADDR, 0);
2243         channel_writel(atchan, DADDR, 0);
2244         channel_writel(atchan, CTRLA, 0);
2245         channel_writel(atchan, CTRLB, 0);
2246         channel_writel(atchan, DSCR, atchan->save_dscr);
2247         dma_writel(atdma, CHER, atchan->mask);
2248 
2249         /* channel pause status should be removed by channel user
2250          * We cannot take the initiative to do it here */
2251 
2252         vdbg_dump_regs(atchan);
2253 }
2254 
2255 static int at_dma_resume_noirq(struct device *dev)
2256 {
2257         struct platform_device *pdev = to_platform_device(dev);
2258         struct at_dma *atdma = platform_get_drvdata(pdev);
2259         struct dma_chan *chan, *_chan;
2260 
2261         /* bring back DMA controller */
2262         clk_prepare_enable(atdma->clk);
2263         dma_writel(atdma, EN, AT_DMA_ENABLE);
2264 
2265         /* clear any pending interrupt */
2266         while (dma_readl(atdma, EBCISR))
2267                 cpu_relax();
2268 
2269         /* restore saved data */
2270         dma_writel(atdma, EBCIER, atdma->save_imr);
2271         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2272                         device_node) {
2273                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2274 
2275                 channel_writel(atchan, CFG, atchan->save_cfg);
2276                 if (atc_chan_is_cyclic(atchan))
2277                         atc_resume_cyclic(atchan);
2278         }
2279         return 0;
2280 }
2281 
2282 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2283         .prepare = at_dma_prepare,
2284         .suspend_noirq = at_dma_suspend_noirq,
2285         .resume_noirq = at_dma_resume_noirq,
2286 };
2287 
2288 static struct platform_driver at_dma_driver = {
2289         .remove         = at_dma_remove,
2290         .shutdown       = at_dma_shutdown,
2291         .id_table       = atdma_devtypes,
2292         .driver = {
2293                 .name   = "at_hdmac",
2294                 .pm     = &at_dma_dev_pm_ops,
2295                 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2296         },
2297 };
2298 
2299 static int __init at_dma_init(void)
2300 {
2301         return platform_driver_probe(&at_dma_driver, at_dma_probe);
2302 }
2303 subsys_initcall(at_dma_init);
2304 
2305 static void __exit at_dma_exit(void)
2306 {
2307         platform_driver_unregister(&at_dma_driver);
2308 }
2309 module_exit(at_dma_exit);
2310 
2311 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2312 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2313 MODULE_LICENSE("GPL");
2314 MODULE_ALIAS("platform:at_hdmac");
2315 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us