Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/drivers/dma/k3dma.c

  1 /*
  2  * Copyright (c) 2013 - 2015 Linaro Ltd.
  3  * Copyright (c) 2013 Hisilicon Limited.
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License version 2 as
  7  * published by the Free Software Foundation.
  8  */
  9 #include <linux/sched.h>
 10 #include <linux/device.h>
 11 #include <linux/dma-mapping.h>
 12 #include <linux/dmapool.h>
 13 #include <linux/dmaengine.h>
 14 #include <linux/init.h>
 15 #include <linux/interrupt.h>
 16 #include <linux/kernel.h>
 17 #include <linux/module.h>
 18 #include <linux/platform_device.h>
 19 #include <linux/slab.h>
 20 #include <linux/spinlock.h>
 21 #include <linux/of_device.h>
 22 #include <linux/of.h>
 23 #include <linux/clk.h>
 24 #include <linux/of_dma.h>
 25 
 26 #include "virt-dma.h"
 27 
 28 #define DRIVER_NAME             "k3-dma"
 29 #define DMA_MAX_SIZE            0x1ffc
 30 #define DMA_CYCLIC_MAX_PERIOD   0x1000
 31 #define LLI_BLOCK_SIZE          (4 * PAGE_SIZE)
 32 
 33 #define INT_STAT                0x00
 34 #define INT_TC1                 0x04
 35 #define INT_TC2                 0x08
 36 #define INT_ERR1                0x0c
 37 #define INT_ERR2                0x10
 38 #define INT_TC1_MASK            0x18
 39 #define INT_TC2_MASK            0x1c
 40 #define INT_ERR1_MASK           0x20
 41 #define INT_ERR2_MASK           0x24
 42 #define INT_TC1_RAW             0x600
 43 #define INT_TC2_RAW             0x608
 44 #define INT_ERR1_RAW            0x610
 45 #define INT_ERR2_RAW            0x618
 46 #define CH_PRI                  0x688
 47 #define CH_STAT                 0x690
 48 #define CX_CUR_CNT              0x704
 49 #define CX_LLI                  0x800
 50 #define CX_CNT1                 0x80c
 51 #define CX_CNT0                 0x810
 52 #define CX_SRC                  0x814
 53 #define CX_DST                  0x818
 54 #define CX_CFG                  0x81c
 55 #define AXI_CFG                 0x820
 56 #define AXI_CFG_DEFAULT         0x201201
 57 
 58 #define CX_LLI_CHAIN_EN         0x2
 59 #define CX_CFG_EN               0x1
 60 #define CX_CFG_NODEIRQ          BIT(1)
 61 #define CX_CFG_MEM2PER          (0x1 << 2)
 62 #define CX_CFG_PER2MEM          (0x2 << 2)
 63 #define CX_CFG_SRCINCR          (0x1 << 31)
 64 #define CX_CFG_DSTINCR          (0x1 << 30)
 65 
 66 struct k3_desc_hw {
 67         u32 lli;
 68         u32 reserved[3];
 69         u32 count;
 70         u32 saddr;
 71         u32 daddr;
 72         u32 config;
 73 } __aligned(32);
 74 
 75 struct k3_dma_desc_sw {
 76         struct virt_dma_desc    vd;
 77         dma_addr_t              desc_hw_lli;
 78         size_t                  desc_num;
 79         size_t                  size;
 80         struct k3_desc_hw       *desc_hw;
 81 };
 82 
 83 struct k3_dma_phy;
 84 
 85 struct k3_dma_chan {
 86         u32                     ccfg;
 87         struct virt_dma_chan    vc;
 88         struct k3_dma_phy       *phy;
 89         struct list_head        node;
 90         enum dma_transfer_direction dir;
 91         dma_addr_t              dev_addr;
 92         enum dma_status         status;
 93         bool                    cyclic;
 94 };
 95 
 96 struct k3_dma_phy {
 97         u32                     idx;
 98         void __iomem            *base;
 99         struct k3_dma_chan      *vchan;
100         struct k3_dma_desc_sw   *ds_run;
101         struct k3_dma_desc_sw   *ds_done;
102 };
103 
104 struct k3_dma_dev {
105         struct dma_device       slave;
106         void __iomem            *base;
107         struct tasklet_struct   task;
108         spinlock_t              lock;
109         struct list_head        chan_pending;
110         struct k3_dma_phy       *phy;
111         struct k3_dma_chan      *chans;
112         struct clk              *clk;
113         struct dma_pool         *pool;
114         u32                     dma_channels;
115         u32                     dma_requests;
116         unsigned int            irq;
117 };
118 
119 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
120 
121 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
122 {
123         return container_of(chan, struct k3_dma_chan, vc.chan);
124 }
125 
126 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
127 {
128         u32 val = 0;
129 
130         if (on) {
131                 val = readl_relaxed(phy->base + CX_CFG);
132                 val |= CX_CFG_EN;
133                 writel_relaxed(val, phy->base + CX_CFG);
134         } else {
135                 val = readl_relaxed(phy->base + CX_CFG);
136                 val &= ~CX_CFG_EN;
137                 writel_relaxed(val, phy->base + CX_CFG);
138         }
139 }
140 
141 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
142 {
143         u32 val = 0;
144 
145         k3_dma_pause_dma(phy, false);
146 
147         val = 0x1 << phy->idx;
148         writel_relaxed(val, d->base + INT_TC1_RAW);
149         writel_relaxed(val, d->base + INT_TC2_RAW);
150         writel_relaxed(val, d->base + INT_ERR1_RAW);
151         writel_relaxed(val, d->base + INT_ERR2_RAW);
152 }
153 
154 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
155 {
156         writel_relaxed(hw->lli, phy->base + CX_LLI);
157         writel_relaxed(hw->count, phy->base + CX_CNT0);
158         writel_relaxed(hw->saddr, phy->base + CX_SRC);
159         writel_relaxed(hw->daddr, phy->base + CX_DST);
160         writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
161         writel_relaxed(hw->config, phy->base + CX_CFG);
162 }
163 
164 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
165 {
166         u32 cnt = 0;
167 
168         cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
169         cnt &= 0xffff;
170         return cnt;
171 }
172 
173 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
174 {
175         return readl_relaxed(phy->base + CX_LLI);
176 }
177 
178 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
179 {
180         return readl_relaxed(d->base + CH_STAT);
181 }
182 
183 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
184 {
185         if (on) {
186                 /* set same priority */
187                 writel_relaxed(0x0, d->base + CH_PRI);
188 
189                 /* unmask irq */
190                 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
191                 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
192                 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
193                 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
194         } else {
195                 /* mask irq */
196                 writel_relaxed(0x0, d->base + INT_TC1_MASK);
197                 writel_relaxed(0x0, d->base + INT_TC2_MASK);
198                 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
199                 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
200         }
201 }
202 
203 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
204 {
205         struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
206         struct k3_dma_phy *p;
207         struct k3_dma_chan *c;
208         u32 stat = readl_relaxed(d->base + INT_STAT);
209         u32 tc1  = readl_relaxed(d->base + INT_TC1);
210         u32 tc2  = readl_relaxed(d->base + INT_TC2);
211         u32 err1 = readl_relaxed(d->base + INT_ERR1);
212         u32 err2 = readl_relaxed(d->base + INT_ERR2);
213         u32 i, irq_chan = 0;
214 
215         while (stat) {
216                 i = __ffs(stat);
217                 stat &= ~BIT(i);
218                 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
219                         unsigned long flags;
220 
221                         p = &d->phy[i];
222                         c = p->vchan;
223                         if (c && (tc1 & BIT(i))) {
224                                 spin_lock_irqsave(&c->vc.lock, flags);
225                                 vchan_cookie_complete(&p->ds_run->vd);
226                                 WARN_ON_ONCE(p->ds_done);
227                                 p->ds_done = p->ds_run;
228                                 p->ds_run = NULL;
229                                 spin_unlock_irqrestore(&c->vc.lock, flags);
230                         }
231                         if (c && (tc2 & BIT(i))) {
232                                 spin_lock_irqsave(&c->vc.lock, flags);
233                                 if (p->ds_run != NULL)
234                                         vchan_cyclic_callback(&p->ds_run->vd);
235                                 spin_unlock_irqrestore(&c->vc.lock, flags);
236                         }
237                         irq_chan |= BIT(i);
238                 }
239                 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
240                         dev_warn(d->slave.dev, "DMA ERR\n");
241         }
242 
243         writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
244         writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
245         writel_relaxed(err1, d->base + INT_ERR1_RAW);
246         writel_relaxed(err2, d->base + INT_ERR2_RAW);
247 
248         if (irq_chan)
249                 tasklet_schedule(&d->task);
250 
251         if (irq_chan || err1 || err2)
252                 return IRQ_HANDLED;
253 
254         return IRQ_NONE;
255 }
256 
257 static int k3_dma_start_txd(struct k3_dma_chan *c)
258 {
259         struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
260         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
261 
262         if (!c->phy)
263                 return -EAGAIN;
264 
265         if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
266                 return -EAGAIN;
267 
268         if (vd) {
269                 struct k3_dma_desc_sw *ds =
270                         container_of(vd, struct k3_dma_desc_sw, vd);
271                 /*
272                  * fetch and remove request from vc->desc_issued
273                  * so vc->desc_issued only contains desc pending
274                  */
275                 list_del(&ds->vd.node);
276 
277                 WARN_ON_ONCE(c->phy->ds_run);
278                 WARN_ON_ONCE(c->phy->ds_done);
279                 c->phy->ds_run = ds;
280                 /* start dma */
281                 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
282                 return 0;
283         }
284         return -EAGAIN;
285 }
286 
287 static void k3_dma_tasklet(unsigned long arg)
288 {
289         struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
290         struct k3_dma_phy *p;
291         struct k3_dma_chan *c, *cn;
292         unsigned pch, pch_alloc = 0;
293 
294         /* check new dma request of running channel in vc->desc_issued */
295         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
296                 spin_lock_irq(&c->vc.lock);
297                 p = c->phy;
298                 if (p && p->ds_done) {
299                         if (k3_dma_start_txd(c)) {
300                                 /* No current txd associated with this channel */
301                                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
302                                 /* Mark this channel free */
303                                 c->phy = NULL;
304                                 p->vchan = NULL;
305                         }
306                 }
307                 spin_unlock_irq(&c->vc.lock);
308         }
309 
310         /* check new channel request in d->chan_pending */
311         spin_lock_irq(&d->lock);
312         for (pch = 0; pch < d->dma_channels; pch++) {
313                 p = &d->phy[pch];
314 
315                 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
316                         c = list_first_entry(&d->chan_pending,
317                                 struct k3_dma_chan, node);
318                         /* remove from d->chan_pending */
319                         list_del_init(&c->node);
320                         pch_alloc |= 1 << pch;
321                         /* Mark this channel allocated */
322                         p->vchan = c;
323                         c->phy = p;
324                         dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
325                 }
326         }
327         spin_unlock_irq(&d->lock);
328 
329         for (pch = 0; pch < d->dma_channels; pch++) {
330                 if (pch_alloc & (1 << pch)) {
331                         p = &d->phy[pch];
332                         c = p->vchan;
333                         if (c) {
334                                 spin_lock_irq(&c->vc.lock);
335                                 k3_dma_start_txd(c);
336                                 spin_unlock_irq(&c->vc.lock);
337                         }
338                 }
339         }
340 }
341 
342 static void k3_dma_free_chan_resources(struct dma_chan *chan)
343 {
344         struct k3_dma_chan *c = to_k3_chan(chan);
345         struct k3_dma_dev *d = to_k3_dma(chan->device);
346         unsigned long flags;
347 
348         spin_lock_irqsave(&d->lock, flags);
349         list_del_init(&c->node);
350         spin_unlock_irqrestore(&d->lock, flags);
351 
352         vchan_free_chan_resources(&c->vc);
353         c->ccfg = 0;
354 }
355 
356 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
357         dma_cookie_t cookie, struct dma_tx_state *state)
358 {
359         struct k3_dma_chan *c = to_k3_chan(chan);
360         struct k3_dma_dev *d = to_k3_dma(chan->device);
361         struct k3_dma_phy *p;
362         struct virt_dma_desc *vd;
363         unsigned long flags;
364         enum dma_status ret;
365         size_t bytes = 0;
366 
367         ret = dma_cookie_status(&c->vc.chan, cookie, state);
368         if (ret == DMA_COMPLETE)
369                 return ret;
370 
371         spin_lock_irqsave(&c->vc.lock, flags);
372         p = c->phy;
373         ret = c->status;
374 
375         /*
376          * If the cookie is on our issue queue, then the residue is
377          * its total size.
378          */
379         vd = vchan_find_desc(&c->vc, cookie);
380         if (vd && !c->cyclic) {
381                 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
382         } else if ((!p) || (!p->ds_run)) {
383                 bytes = 0;
384         } else {
385                 struct k3_dma_desc_sw *ds = p->ds_run;
386                 u32 clli = 0, index = 0;
387 
388                 bytes = k3_dma_get_curr_cnt(d, p);
389                 clli = k3_dma_get_curr_lli(p);
390                 index = ((clli - ds->desc_hw_lli) /
391                                 sizeof(struct k3_desc_hw)) + 1;
392                 for (; index < ds->desc_num; index++) {
393                         bytes += ds->desc_hw[index].count;
394                         /* end of lli */
395                         if (!ds->desc_hw[index].lli)
396                                 break;
397                 }
398         }
399         spin_unlock_irqrestore(&c->vc.lock, flags);
400         dma_set_residue(state, bytes);
401         return ret;
402 }
403 
404 static void k3_dma_issue_pending(struct dma_chan *chan)
405 {
406         struct k3_dma_chan *c = to_k3_chan(chan);
407         struct k3_dma_dev *d = to_k3_dma(chan->device);
408         unsigned long flags;
409 
410         spin_lock_irqsave(&c->vc.lock, flags);
411         /* add request to vc->desc_issued */
412         if (vchan_issue_pending(&c->vc)) {
413                 spin_lock(&d->lock);
414                 if (!c->phy) {
415                         if (list_empty(&c->node)) {
416                                 /* if new channel, add chan_pending */
417                                 list_add_tail(&c->node, &d->chan_pending);
418                                 /* check in tasklet */
419                                 tasklet_schedule(&d->task);
420                                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
421                         }
422                 }
423                 spin_unlock(&d->lock);
424         } else
425                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
426         spin_unlock_irqrestore(&c->vc.lock, flags);
427 }
428 
429 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
430                         dma_addr_t src, size_t len, u32 num, u32 ccfg)
431 {
432         if (num != ds->desc_num - 1)
433                 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
434                         sizeof(struct k3_desc_hw);
435 
436         ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
437         ds->desc_hw[num].count = len;
438         ds->desc_hw[num].saddr = src;
439         ds->desc_hw[num].daddr = dst;
440         ds->desc_hw[num].config = ccfg;
441 }
442 
443 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
444                                                         struct dma_chan *chan)
445 {
446         struct k3_dma_chan *c = to_k3_chan(chan);
447         struct k3_dma_desc_sw *ds;
448         struct k3_dma_dev *d = to_k3_dma(chan->device);
449         int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
450 
451         if (num > lli_limit) {
452                 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
453                         &c->vc, num, lli_limit);
454                 return NULL;
455         }
456 
457         ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
458         if (!ds)
459                 return NULL;
460 
461         ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
462         if (!ds->desc_hw) {
463                 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
464                 kfree(ds);
465                 return NULL;
466         }
467         ds->desc_num = num;
468         return ds;
469 }
470 
471 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
472         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
473         size_t len, unsigned long flags)
474 {
475         struct k3_dma_chan *c = to_k3_chan(chan);
476         struct k3_dma_desc_sw *ds;
477         size_t copy = 0;
478         int num = 0;
479 
480         if (!len)
481                 return NULL;
482 
483         num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
484 
485         ds = k3_dma_alloc_desc_resource(num, chan);
486         if (!ds)
487                 return NULL;
488 
489         c->cyclic = 0;
490         ds->size = len;
491         num = 0;
492 
493         if (!c->ccfg) {
494                 /* default is memtomem, without calling device_config */
495                 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
496                 c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
497                 c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
498         }
499 
500         do {
501                 copy = min_t(size_t, len, DMA_MAX_SIZE);
502                 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
503 
504                 if (c->dir == DMA_MEM_TO_DEV) {
505                         src += copy;
506                 } else if (c->dir == DMA_DEV_TO_MEM) {
507                         dst += copy;
508                 } else {
509                         src += copy;
510                         dst += copy;
511                 }
512                 len -= copy;
513         } while (len);
514 
515         ds->desc_hw[num-1].lli = 0;     /* end of link */
516         return vchan_tx_prep(&c->vc, &ds->vd, flags);
517 }
518 
519 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
520         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
521         enum dma_transfer_direction dir, unsigned long flags, void *context)
522 {
523         struct k3_dma_chan *c = to_k3_chan(chan);
524         struct k3_dma_desc_sw *ds;
525         size_t len, avail, total = 0;
526         struct scatterlist *sg;
527         dma_addr_t addr, src = 0, dst = 0;
528         int num = sglen, i;
529 
530         if (sgl == NULL)
531                 return NULL;
532 
533         c->cyclic = 0;
534 
535         for_each_sg(sgl, sg, sglen, i) {
536                 avail = sg_dma_len(sg);
537                 if (avail > DMA_MAX_SIZE)
538                         num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
539         }
540 
541         ds = k3_dma_alloc_desc_resource(num, chan);
542         if (!ds)
543                 return NULL;
544         num = 0;
545 
546         for_each_sg(sgl, sg, sglen, i) {
547                 addr = sg_dma_address(sg);
548                 avail = sg_dma_len(sg);
549                 total += avail;
550 
551                 do {
552                         len = min_t(size_t, avail, DMA_MAX_SIZE);
553 
554                         if (dir == DMA_MEM_TO_DEV) {
555                                 src = addr;
556                                 dst = c->dev_addr;
557                         } else if (dir == DMA_DEV_TO_MEM) {
558                                 src = c->dev_addr;
559                                 dst = addr;
560                         }
561 
562                         k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
563 
564                         addr += len;
565                         avail -= len;
566                 } while (avail);
567         }
568 
569         ds->desc_hw[num-1].lli = 0;     /* end of link */
570         ds->size = total;
571         return vchan_tx_prep(&c->vc, &ds->vd, flags);
572 }
573 
574 static struct dma_async_tx_descriptor *
575 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
576                        size_t buf_len, size_t period_len,
577                        enum dma_transfer_direction dir,
578                        unsigned long flags)
579 {
580         struct k3_dma_chan *c = to_k3_chan(chan);
581         struct k3_dma_desc_sw *ds;
582         size_t len, avail, total = 0;
583         dma_addr_t addr, src = 0, dst = 0;
584         int num = 1, since = 0;
585         size_t modulo = DMA_CYCLIC_MAX_PERIOD;
586         u32 en_tc2 = 0;
587 
588         dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
589                __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
590                buf_len, period_len, (int)dir);
591 
592         avail = buf_len;
593         if (avail > modulo)
594                 num += DIV_ROUND_UP(avail, modulo) - 1;
595 
596         ds = k3_dma_alloc_desc_resource(num, chan);
597         if (!ds)
598                 return NULL;
599 
600         c->cyclic = 1;
601         addr = buf_addr;
602         avail = buf_len;
603         total = avail;
604         num = 0;
605 
606         if (period_len < modulo)
607                 modulo = period_len;
608 
609         do {
610                 len = min_t(size_t, avail, modulo);
611 
612                 if (dir == DMA_MEM_TO_DEV) {
613                         src = addr;
614                         dst = c->dev_addr;
615                 } else if (dir == DMA_DEV_TO_MEM) {
616                         src = c->dev_addr;
617                         dst = addr;
618                 }
619                 since += len;
620                 if (since >= period_len) {
621                         /* descriptor asks for TC2 interrupt on completion */
622                         en_tc2 = CX_CFG_NODEIRQ;
623                         since -= period_len;
624                 } else
625                         en_tc2 = 0;
626 
627                 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
628 
629                 addr += len;
630                 avail -= len;
631         } while (avail);
632 
633         /* "Cyclic" == end of link points back to start of link */
634         ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
635 
636         ds->size = total;
637 
638         return vchan_tx_prep(&c->vc, &ds->vd, flags);
639 }
640 
641 static int k3_dma_config(struct dma_chan *chan,
642                          struct dma_slave_config *cfg)
643 {
644         struct k3_dma_chan *c = to_k3_chan(chan);
645         u32 maxburst = 0, val = 0;
646         enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
647 
648         if (cfg == NULL)
649                 return -EINVAL;
650         c->dir = cfg->direction;
651         if (c->dir == DMA_DEV_TO_MEM) {
652                 c->ccfg = CX_CFG_DSTINCR;
653                 c->dev_addr = cfg->src_addr;
654                 maxburst = cfg->src_maxburst;
655                 width = cfg->src_addr_width;
656         } else if (c->dir == DMA_MEM_TO_DEV) {
657                 c->ccfg = CX_CFG_SRCINCR;
658                 c->dev_addr = cfg->dst_addr;
659                 maxburst = cfg->dst_maxburst;
660                 width = cfg->dst_addr_width;
661         }
662         switch (width) {
663         case DMA_SLAVE_BUSWIDTH_1_BYTE:
664         case DMA_SLAVE_BUSWIDTH_2_BYTES:
665         case DMA_SLAVE_BUSWIDTH_4_BYTES:
666         case DMA_SLAVE_BUSWIDTH_8_BYTES:
667                 val =  __ffs(width);
668                 break;
669         default:
670                 val = 3;
671                 break;
672         }
673         c->ccfg |= (val << 12) | (val << 16);
674 
675         if ((maxburst == 0) || (maxburst > 16))
676                 val = 15;
677         else
678                 val = maxburst - 1;
679         c->ccfg |= (val << 20) | (val << 24);
680         c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
681 
682         /* specific request line */
683         c->ccfg |= c->vc.chan.chan_id << 4;
684 
685         return 0;
686 }
687 
688 static void k3_dma_free_desc(struct virt_dma_desc *vd)
689 {
690         struct k3_dma_desc_sw *ds =
691                 container_of(vd, struct k3_dma_desc_sw, vd);
692         struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
693 
694         dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
695         kfree(ds);
696 }
697 
698 static int k3_dma_terminate_all(struct dma_chan *chan)
699 {
700         struct k3_dma_chan *c = to_k3_chan(chan);
701         struct k3_dma_dev *d = to_k3_dma(chan->device);
702         struct k3_dma_phy *p = c->phy;
703         unsigned long flags;
704         LIST_HEAD(head);
705 
706         dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
707 
708         /* Prevent this channel being scheduled */
709         spin_lock(&d->lock);
710         list_del_init(&c->node);
711         spin_unlock(&d->lock);
712 
713         /* Clear the tx descriptor lists */
714         spin_lock_irqsave(&c->vc.lock, flags);
715         vchan_get_all_descriptors(&c->vc, &head);
716         if (p) {
717                 /* vchan is assigned to a pchan - stop the channel */
718                 k3_dma_terminate_chan(p, d);
719                 c->phy = NULL;
720                 p->vchan = NULL;
721                 if (p->ds_run) {
722                         k3_dma_free_desc(&p->ds_run->vd);
723                         p->ds_run = NULL;
724                 }
725                 if (p->ds_done) {
726                         k3_dma_free_desc(&p->ds_done->vd);
727                         p->ds_done = NULL;
728                 }
729 
730         }
731         spin_unlock_irqrestore(&c->vc.lock, flags);
732         vchan_dma_desc_free_list(&c->vc, &head);
733 
734         return 0;
735 }
736 
737 static int k3_dma_transfer_pause(struct dma_chan *chan)
738 {
739         struct k3_dma_chan *c = to_k3_chan(chan);
740         struct k3_dma_dev *d = to_k3_dma(chan->device);
741         struct k3_dma_phy *p = c->phy;
742 
743         dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
744         if (c->status == DMA_IN_PROGRESS) {
745                 c->status = DMA_PAUSED;
746                 if (p) {
747                         k3_dma_pause_dma(p, false);
748                 } else {
749                         spin_lock(&d->lock);
750                         list_del_init(&c->node);
751                         spin_unlock(&d->lock);
752                 }
753         }
754 
755         return 0;
756 }
757 
758 static int k3_dma_transfer_resume(struct dma_chan *chan)
759 {
760         struct k3_dma_chan *c = to_k3_chan(chan);
761         struct k3_dma_dev *d = to_k3_dma(chan->device);
762         struct k3_dma_phy *p = c->phy;
763         unsigned long flags;
764 
765         dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
766         spin_lock_irqsave(&c->vc.lock, flags);
767         if (c->status == DMA_PAUSED) {
768                 c->status = DMA_IN_PROGRESS;
769                 if (p) {
770                         k3_dma_pause_dma(p, true);
771                 } else if (!list_empty(&c->vc.desc_issued)) {
772                         spin_lock(&d->lock);
773                         list_add_tail(&c->node, &d->chan_pending);
774                         spin_unlock(&d->lock);
775                 }
776         }
777         spin_unlock_irqrestore(&c->vc.lock, flags);
778 
779         return 0;
780 }
781 
782 static const struct of_device_id k3_pdma_dt_ids[] = {
783         { .compatible = "hisilicon,k3-dma-1.0", },
784         {}
785 };
786 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
787 
788 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
789                                                 struct of_dma *ofdma)
790 {
791         struct k3_dma_dev *d = ofdma->of_dma_data;
792         unsigned int request = dma_spec->args[0];
793 
794         if (request > d->dma_requests)
795                 return NULL;
796 
797         return dma_get_slave_channel(&(d->chans[request].vc.chan));
798 }
799 
800 static int k3_dma_probe(struct platform_device *op)
801 {
802         struct k3_dma_dev *d;
803         const struct of_device_id *of_id;
804         struct resource *iores;
805         int i, ret, irq = 0;
806 
807         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
808         if (!iores)
809                 return -EINVAL;
810 
811         d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
812         if (!d)
813                 return -ENOMEM;
814 
815         d->base = devm_ioremap_resource(&op->dev, iores);
816         if (IS_ERR(d->base))
817                 return PTR_ERR(d->base);
818 
819         of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
820         if (of_id) {
821                 of_property_read_u32((&op->dev)->of_node,
822                                 "dma-channels", &d->dma_channels);
823                 of_property_read_u32((&op->dev)->of_node,
824                                 "dma-requests", &d->dma_requests);
825         }
826 
827         d->clk = devm_clk_get(&op->dev, NULL);
828         if (IS_ERR(d->clk)) {
829                 dev_err(&op->dev, "no dma clk\n");
830                 return PTR_ERR(d->clk);
831         }
832 
833         irq = platform_get_irq(op, 0);
834         ret = devm_request_irq(&op->dev, irq,
835                         k3_dma_int_handler, 0, DRIVER_NAME, d);
836         if (ret)
837                 return ret;
838 
839         d->irq = irq;
840 
841         /* A DMA memory pool for LLIs, align on 32-byte boundary */
842         d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
843                                         LLI_BLOCK_SIZE, 32, 0);
844         if (!d->pool)
845                 return -ENOMEM;
846 
847         /* init phy channel */
848         d->phy = devm_kzalloc(&op->dev,
849                 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
850         if (d->phy == NULL)
851                 return -ENOMEM;
852 
853         for (i = 0; i < d->dma_channels; i++) {
854                 struct k3_dma_phy *p = &d->phy[i];
855 
856                 p->idx = i;
857                 p->base = d->base + i * 0x40;
858         }
859 
860         INIT_LIST_HEAD(&d->slave.channels);
861         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
862         dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
863         dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
864         d->slave.dev = &op->dev;
865         d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
866         d->slave.device_tx_status = k3_dma_tx_status;
867         d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
868         d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
869         d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
870         d->slave.device_issue_pending = k3_dma_issue_pending;
871         d->slave.device_config = k3_dma_config;
872         d->slave.device_pause = k3_dma_transfer_pause;
873         d->slave.device_resume = k3_dma_transfer_resume;
874         d->slave.device_terminate_all = k3_dma_terminate_all;
875         d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
876 
877         /* init virtual channel */
878         d->chans = devm_kzalloc(&op->dev,
879                 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
880         if (d->chans == NULL)
881                 return -ENOMEM;
882 
883         for (i = 0; i < d->dma_requests; i++) {
884                 struct k3_dma_chan *c = &d->chans[i];
885 
886                 c->status = DMA_IN_PROGRESS;
887                 INIT_LIST_HEAD(&c->node);
888                 c->vc.desc_free = k3_dma_free_desc;
889                 vchan_init(&c->vc, &d->slave);
890         }
891 
892         /* Enable clock before accessing registers */
893         ret = clk_prepare_enable(d->clk);
894         if (ret < 0) {
895                 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
896                 return ret;
897         }
898 
899         k3_dma_enable_dma(d, true);
900 
901         ret = dma_async_device_register(&d->slave);
902         if (ret)
903                 goto dma_async_register_fail;
904 
905         ret = of_dma_controller_register((&op->dev)->of_node,
906                                         k3_of_dma_simple_xlate, d);
907         if (ret)
908                 goto of_dma_register_fail;
909 
910         spin_lock_init(&d->lock);
911         INIT_LIST_HEAD(&d->chan_pending);
912         tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
913         platform_set_drvdata(op, d);
914         dev_info(&op->dev, "initialized\n");
915 
916         return 0;
917 
918 of_dma_register_fail:
919         dma_async_device_unregister(&d->slave);
920 dma_async_register_fail:
921         clk_disable_unprepare(d->clk);
922         return ret;
923 }
924 
925 static int k3_dma_remove(struct platform_device *op)
926 {
927         struct k3_dma_chan *c, *cn;
928         struct k3_dma_dev *d = platform_get_drvdata(op);
929 
930         dma_async_device_unregister(&d->slave);
931         of_dma_controller_free((&op->dev)->of_node);
932 
933         devm_free_irq(&op->dev, d->irq, d);
934 
935         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
936                 list_del(&c->vc.chan.device_node);
937                 tasklet_kill(&c->vc.task);
938         }
939         tasklet_kill(&d->task);
940         clk_disable_unprepare(d->clk);
941         return 0;
942 }
943 
944 #ifdef CONFIG_PM_SLEEP
945 static int k3_dma_suspend_dev(struct device *dev)
946 {
947         struct k3_dma_dev *d = dev_get_drvdata(dev);
948         u32 stat = 0;
949 
950         stat = k3_dma_get_chan_stat(d);
951         if (stat) {
952                 dev_warn(d->slave.dev,
953                         "chan %d is running fail to suspend\n", stat);
954                 return -1;
955         }
956         k3_dma_enable_dma(d, false);
957         clk_disable_unprepare(d->clk);
958         return 0;
959 }
960 
961 static int k3_dma_resume_dev(struct device *dev)
962 {
963         struct k3_dma_dev *d = dev_get_drvdata(dev);
964         int ret = 0;
965 
966         ret = clk_prepare_enable(d->clk);
967         if (ret < 0) {
968                 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
969                 return ret;
970         }
971         k3_dma_enable_dma(d, true);
972         return 0;
973 }
974 #endif
975 
976 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
977 
978 static struct platform_driver k3_pdma_driver = {
979         .driver         = {
980                 .name   = DRIVER_NAME,
981                 .pm     = &k3_dma_pmops,
982                 .of_match_table = k3_pdma_dt_ids,
983         },
984         .probe          = k3_dma_probe,
985         .remove         = k3_dma_remove,
986 };
987 
988 module_platform_driver(k3_pdma_driver);
989 
990 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
991 MODULE_ALIAS("platform:k3dma");
992 MODULE_LICENSE("GPL v2");
993 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us