Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5

Linux/drivers/dma/mv_xor.c

  1 /*
  2  * offload engine driver for the Marvell XOR engine
  3  * Copyright (C) 2007, 2008, Marvell International Ltd.
  4  *
  5  * This program is free software; you can redistribute it and/or modify it
  6  * under the terms and conditions of the GNU General Public License,
  7  * version 2, as published by the Free Software Foundation.
  8  *
  9  * This program is distributed in the hope it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12  * more details.
 13  */
 14 
 15 #include <linux/init.h>
 16 #include <linux/slab.h>
 17 #include <linux/delay.h>
 18 #include <linux/dma-mapping.h>
 19 #include <linux/spinlock.h>
 20 #include <linux/interrupt.h>
 21 #include <linux/of_device.h>
 22 #include <linux/platform_device.h>
 23 #include <linux/memory.h>
 24 #include <linux/clk.h>
 25 #include <linux/of.h>
 26 #include <linux/of_irq.h>
 27 #include <linux/irqdomain.h>
 28 #include <linux/cpumask.h>
 29 #include <linux/platform_data/dma-mv_xor.h>
 30 
 31 #include "dmaengine.h"
 32 #include "mv_xor.h"
 33 
 34 enum mv_xor_mode {
 35         XOR_MODE_IN_REG,
 36         XOR_MODE_IN_DESC,
 37 };
 38 
 39 static void mv_xor_issue_pending(struct dma_chan *chan);
 40 
 41 #define to_mv_xor_chan(chan)            \
 42         container_of(chan, struct mv_xor_chan, dmachan)
 43 
 44 #define to_mv_xor_slot(tx)              \
 45         container_of(tx, struct mv_xor_desc_slot, async_tx)
 46 
 47 #define mv_chan_to_devp(chan)           \
 48         ((chan)->dmadev.dev)
 49 
 50 static void mv_desc_init(struct mv_xor_desc_slot *desc,
 51                          dma_addr_t addr, u32 byte_count,
 52                          enum dma_ctrl_flags flags)
 53 {
 54         struct mv_xor_desc *hw_desc = desc->hw_desc;
 55 
 56         hw_desc->status = XOR_DESC_DMA_OWNED;
 57         hw_desc->phy_next_desc = 0;
 58         /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
 59         hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
 60                                 XOR_DESC_EOD_INT_EN : 0;
 61         hw_desc->phy_dest_addr = addr;
 62         hw_desc->byte_count = byte_count;
 63 }
 64 
 65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
 66 {
 67         struct mv_xor_desc *hw_desc = desc->hw_desc;
 68 
 69         switch (desc->type) {
 70         case DMA_XOR:
 71         case DMA_INTERRUPT:
 72                 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
 73                 break;
 74         case DMA_MEMCPY:
 75                 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
 76                 break;
 77         default:
 78                 BUG();
 79                 return;
 80         }
 81 }
 82 
 83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
 84                                   u32 next_desc_addr)
 85 {
 86         struct mv_xor_desc *hw_desc = desc->hw_desc;
 87         BUG_ON(hw_desc->phy_next_desc);
 88         hw_desc->phy_next_desc = next_desc_addr;
 89 }
 90 
 91 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
 92                                  int index, dma_addr_t addr)
 93 {
 94         struct mv_xor_desc *hw_desc = desc->hw_desc;
 95         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
 96         if (desc->type == DMA_XOR)
 97                 hw_desc->desc_command |= (1 << index);
 98 }
 99 
100 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
101 {
102         return readl_relaxed(XOR_CURR_DESC(chan));
103 }
104 
105 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
106                                         u32 next_desc_addr)
107 {
108         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
109 }
110 
111 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
112 {
113         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
114         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
115         writel_relaxed(val, XOR_INTR_MASK(chan));
116 }
117 
118 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
119 {
120         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
121         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
122         return intr_cause;
123 }
124 
125 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
126 {
127         u32 val;
128 
129         val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
130         val = ~(val << (chan->idx * 16));
131         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
132         writel_relaxed(val, XOR_INTR_CAUSE(chan));
133 }
134 
135 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
136 {
137         u32 val = 0xFFFF0000 >> (chan->idx * 16);
138         writel_relaxed(val, XOR_INTR_CAUSE(chan));
139 }
140 
141 static void mv_chan_set_mode(struct mv_xor_chan *chan,
142                              u32 op_mode)
143 {
144         u32 config = readl_relaxed(XOR_CONFIG(chan));
145 
146         config &= ~0x7;
147         config |= op_mode;
148 
149 #if defined(__BIG_ENDIAN)
150         config |= XOR_DESCRIPTOR_SWAP;
151 #else
152         config &= ~XOR_DESCRIPTOR_SWAP;
153 #endif
154 
155         writel_relaxed(config, XOR_CONFIG(chan));
156 }
157 
158 static void mv_chan_activate(struct mv_xor_chan *chan)
159 {
160         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
161 
162         /* writel ensures all descriptors are flushed before activation */
163         writel(BIT(0), XOR_ACTIVATION(chan));
164 }
165 
166 static char mv_chan_is_busy(struct mv_xor_chan *chan)
167 {
168         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
169 
170         state = (state >> 4) & 0x3;
171 
172         return (state == 1) ? 1 : 0;
173 }
174 
175 /*
176  * mv_chan_start_new_chain - program the engine to operate on new
177  * chain headed by sw_desc
178  * Caller must hold &mv_chan->lock while calling this function
179  */
180 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
181                                     struct mv_xor_desc_slot *sw_desc)
182 {
183         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
184                 __func__, __LINE__, sw_desc);
185 
186         /* set the hardware chain */
187         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
188 
189         mv_chan->pending++;
190         mv_xor_issue_pending(&mv_chan->dmachan);
191 }
192 
193 static dma_cookie_t
194 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
195                                 struct mv_xor_chan *mv_chan,
196                                 dma_cookie_t cookie)
197 {
198         BUG_ON(desc->async_tx.cookie < 0);
199 
200         if (desc->async_tx.cookie > 0) {
201                 cookie = desc->async_tx.cookie;
202 
203                 /* call the callback (must not sleep or submit new
204                  * operations to this channel)
205                  */
206                 if (desc->async_tx.callback)
207                         desc->async_tx.callback(
208                                 desc->async_tx.callback_param);
209 
210                 dma_descriptor_unmap(&desc->async_tx);
211         }
212 
213         /* run dependent operations */
214         dma_run_dependencies(&desc->async_tx);
215 
216         return cookie;
217 }
218 
219 static int
220 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
221 {
222         struct mv_xor_desc_slot *iter, *_iter;
223 
224         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
225         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
226                                  node) {
227 
228                 if (async_tx_test_ack(&iter->async_tx))
229                         list_move_tail(&iter->node, &mv_chan->free_slots);
230         }
231         return 0;
232 }
233 
234 static int
235 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
236                    struct mv_xor_chan *mv_chan)
237 {
238         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
239                 __func__, __LINE__, desc, desc->async_tx.flags);
240 
241         /* the client is allowed to attach dependent operations
242          * until 'ack' is set
243          */
244         if (!async_tx_test_ack(&desc->async_tx))
245                 /* move this slot to the completed_slots */
246                 list_move_tail(&desc->node, &mv_chan->completed_slots);
247         else
248                 list_move_tail(&desc->node, &mv_chan->free_slots);
249 
250         return 0;
251 }
252 
253 /* This function must be called with the mv_xor_chan spinlock held */
254 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
255 {
256         struct mv_xor_desc_slot *iter, *_iter;
257         dma_cookie_t cookie = 0;
258         int busy = mv_chan_is_busy(mv_chan);
259         u32 current_desc = mv_chan_get_current_desc(mv_chan);
260         int current_cleaned = 0;
261         struct mv_xor_desc *hw_desc;
262 
263         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
264         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
265         mv_chan_clean_completed_slots(mv_chan);
266 
267         /* free completed slots from the chain starting with
268          * the oldest descriptor
269          */
270 
271         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
272                                  node) {
273 
274                 /* clean finished descriptors */
275                 hw_desc = iter->hw_desc;
276                 if (hw_desc->status & XOR_DESC_SUCCESS) {
277                         cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
278                                                                  cookie);
279 
280                         /* done processing desc, clean slot */
281                         mv_desc_clean_slot(iter, mv_chan);
282 
283                         /* break if we did cleaned the current */
284                         if (iter->async_tx.phys == current_desc) {
285                                 current_cleaned = 1;
286                                 break;
287                         }
288                 } else {
289                         if (iter->async_tx.phys == current_desc) {
290                                 current_cleaned = 0;
291                                 break;
292                         }
293                 }
294         }
295 
296         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
297                 if (current_cleaned) {
298                         /*
299                          * current descriptor cleaned and removed, run
300                          * from list head
301                          */
302                         iter = list_entry(mv_chan->chain.next,
303                                           struct mv_xor_desc_slot,
304                                           node);
305                         mv_chan_start_new_chain(mv_chan, iter);
306                 } else {
307                         if (!list_is_last(&iter->node, &mv_chan->chain)) {
308                                 /*
309                                  * descriptors are still waiting after
310                                  * current, trigger them
311                                  */
312                                 iter = list_entry(iter->node.next,
313                                                   struct mv_xor_desc_slot,
314                                                   node);
315                                 mv_chan_start_new_chain(mv_chan, iter);
316                         } else {
317                                 /*
318                                  * some descriptors are still waiting
319                                  * to be cleaned
320                                  */
321                                 tasklet_schedule(&mv_chan->irq_tasklet);
322                         }
323                 }
324         }
325 
326         if (cookie > 0)
327                 mv_chan->dmachan.completed_cookie = cookie;
328 }
329 
330 static void mv_xor_tasklet(unsigned long data)
331 {
332         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
333 
334         spin_lock_bh(&chan->lock);
335         mv_chan_slot_cleanup(chan);
336         spin_unlock_bh(&chan->lock);
337 }
338 
339 static struct mv_xor_desc_slot *
340 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
341 {
342         struct mv_xor_desc_slot *iter;
343 
344         spin_lock_bh(&mv_chan->lock);
345 
346         if (!list_empty(&mv_chan->free_slots)) {
347                 iter = list_first_entry(&mv_chan->free_slots,
348                                         struct mv_xor_desc_slot,
349                                         node);
350 
351                 list_move_tail(&iter->node, &mv_chan->allocated_slots);
352 
353                 spin_unlock_bh(&mv_chan->lock);
354 
355                 /* pre-ack descriptor */
356                 async_tx_ack(&iter->async_tx);
357                 iter->async_tx.cookie = -EBUSY;
358 
359                 return iter;
360 
361         }
362 
363         spin_unlock_bh(&mv_chan->lock);
364 
365         /* try to free some slots if the allocation fails */
366         tasklet_schedule(&mv_chan->irq_tasklet);
367 
368         return NULL;
369 }
370 
371 /************************ DMA engine API functions ****************************/
372 static dma_cookie_t
373 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
374 {
375         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
376         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
377         struct mv_xor_desc_slot *old_chain_tail;
378         dma_cookie_t cookie;
379         int new_hw_chain = 1;
380 
381         dev_dbg(mv_chan_to_devp(mv_chan),
382                 "%s sw_desc %p: async_tx %p\n",
383                 __func__, sw_desc, &sw_desc->async_tx);
384 
385         spin_lock_bh(&mv_chan->lock);
386         cookie = dma_cookie_assign(tx);
387 
388         if (list_empty(&mv_chan->chain))
389                 list_move_tail(&sw_desc->node, &mv_chan->chain);
390         else {
391                 new_hw_chain = 0;
392 
393                 old_chain_tail = list_entry(mv_chan->chain.prev,
394                                             struct mv_xor_desc_slot,
395                                             node);
396                 list_move_tail(&sw_desc->node, &mv_chan->chain);
397 
398                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
399                         &old_chain_tail->async_tx.phys);
400 
401                 /* fix up the hardware chain */
402                 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
403 
404                 /* if the channel is not busy */
405                 if (!mv_chan_is_busy(mv_chan)) {
406                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
407                         /*
408                          * and the curren desc is the end of the chain before
409                          * the append, then we need to start the channel
410                          */
411                         if (current_desc == old_chain_tail->async_tx.phys)
412                                 new_hw_chain = 1;
413                 }
414         }
415 
416         if (new_hw_chain)
417                 mv_chan_start_new_chain(mv_chan, sw_desc);
418 
419         spin_unlock_bh(&mv_chan->lock);
420 
421         return cookie;
422 }
423 
424 /* returns the number of allocated descriptors */
425 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
426 {
427         void *virt_desc;
428         dma_addr_t dma_desc;
429         int idx;
430         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
431         struct mv_xor_desc_slot *slot = NULL;
432         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
433 
434         /* Allocate descriptor slots */
435         idx = mv_chan->slots_allocated;
436         while (idx < num_descs_in_pool) {
437                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
438                 if (!slot) {
439                         dev_info(mv_chan_to_devp(mv_chan),
440                                  "channel only initialized %d descriptor slots",
441                                  idx);
442                         break;
443                 }
444                 virt_desc = mv_chan->dma_desc_pool_virt;
445                 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
446 
447                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
448                 slot->async_tx.tx_submit = mv_xor_tx_submit;
449                 INIT_LIST_HEAD(&slot->node);
450                 dma_desc = mv_chan->dma_desc_pool;
451                 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
452                 slot->idx = idx++;
453 
454                 spin_lock_bh(&mv_chan->lock);
455                 mv_chan->slots_allocated = idx;
456                 list_add_tail(&slot->node, &mv_chan->free_slots);
457                 spin_unlock_bh(&mv_chan->lock);
458         }
459 
460         dev_dbg(mv_chan_to_devp(mv_chan),
461                 "allocated %d descriptor slots\n",
462                 mv_chan->slots_allocated);
463 
464         return mv_chan->slots_allocated ? : -ENOMEM;
465 }
466 
467 static struct dma_async_tx_descriptor *
468 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
469                     unsigned int src_cnt, size_t len, unsigned long flags)
470 {
471         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
472         struct mv_xor_desc_slot *sw_desc;
473 
474         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
475                 return NULL;
476 
477         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
478 
479         dev_dbg(mv_chan_to_devp(mv_chan),
480                 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
481                 __func__, src_cnt, len, &dest, flags);
482 
483         sw_desc = mv_chan_alloc_slot(mv_chan);
484         if (sw_desc) {
485                 sw_desc->type = DMA_XOR;
486                 sw_desc->async_tx.flags = flags;
487                 mv_desc_init(sw_desc, dest, len, flags);
488                 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
489                         mv_desc_set_mode(sw_desc);
490                 while (src_cnt--)
491                         mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
492         }
493 
494         dev_dbg(mv_chan_to_devp(mv_chan),
495                 "%s sw_desc %p async_tx %p \n",
496                 __func__, sw_desc, &sw_desc->async_tx);
497         return sw_desc ? &sw_desc->async_tx : NULL;
498 }
499 
500 static struct dma_async_tx_descriptor *
501 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
502                 size_t len, unsigned long flags)
503 {
504         /*
505          * A MEMCPY operation is identical to an XOR operation with only
506          * a single source address.
507          */
508         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
509 }
510 
511 static struct dma_async_tx_descriptor *
512 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
513 {
514         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
515         dma_addr_t src, dest;
516         size_t len;
517 
518         src = mv_chan->dummy_src_addr;
519         dest = mv_chan->dummy_dst_addr;
520         len = MV_XOR_MIN_BYTE_COUNT;
521 
522         /*
523          * We implement the DMA_INTERRUPT operation as a minimum sized
524          * XOR operation with a single dummy source address.
525          */
526         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
527 }
528 
529 static void mv_xor_free_chan_resources(struct dma_chan *chan)
530 {
531         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
532         struct mv_xor_desc_slot *iter, *_iter;
533         int in_use_descs = 0;
534 
535         spin_lock_bh(&mv_chan->lock);
536 
537         mv_chan_slot_cleanup(mv_chan);
538 
539         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
540                                         node) {
541                 in_use_descs++;
542                 list_move_tail(&iter->node, &mv_chan->free_slots);
543         }
544         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
545                                  node) {
546                 in_use_descs++;
547                 list_move_tail(&iter->node, &mv_chan->free_slots);
548         }
549         list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
550                                  node) {
551                 in_use_descs++;
552                 list_move_tail(&iter->node, &mv_chan->free_slots);
553         }
554         list_for_each_entry_safe_reverse(
555                 iter, _iter, &mv_chan->free_slots, node) {
556                 list_del(&iter->node);
557                 kfree(iter);
558                 mv_chan->slots_allocated--;
559         }
560 
561         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
562                 __func__, mv_chan->slots_allocated);
563         spin_unlock_bh(&mv_chan->lock);
564 
565         if (in_use_descs)
566                 dev_err(mv_chan_to_devp(mv_chan),
567                         "freeing %d in use descriptors!\n", in_use_descs);
568 }
569 
570 /**
571  * mv_xor_status - poll the status of an XOR transaction
572  * @chan: XOR channel handle
573  * @cookie: XOR transaction identifier
574  * @txstate: XOR transactions state holder (or NULL)
575  */
576 static enum dma_status mv_xor_status(struct dma_chan *chan,
577                                           dma_cookie_t cookie,
578                                           struct dma_tx_state *txstate)
579 {
580         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
581         enum dma_status ret;
582 
583         ret = dma_cookie_status(chan, cookie, txstate);
584         if (ret == DMA_COMPLETE)
585                 return ret;
586 
587         spin_lock_bh(&mv_chan->lock);
588         mv_chan_slot_cleanup(mv_chan);
589         spin_unlock_bh(&mv_chan->lock);
590 
591         return dma_cookie_status(chan, cookie, txstate);
592 }
593 
594 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
595 {
596         u32 val;
597 
598         val = readl_relaxed(XOR_CONFIG(chan));
599         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
600 
601         val = readl_relaxed(XOR_ACTIVATION(chan));
602         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
603 
604         val = readl_relaxed(XOR_INTR_CAUSE(chan));
605         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
606 
607         val = readl_relaxed(XOR_INTR_MASK(chan));
608         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
609 
610         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
611         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
612 
613         val = readl_relaxed(XOR_ERROR_ADDR(chan));
614         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
615 }
616 
617 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
618                                           u32 intr_cause)
619 {
620         if (intr_cause & XOR_INT_ERR_DECODE) {
621                 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
622                 return;
623         }
624 
625         dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
626                 chan->idx, intr_cause);
627 
628         mv_chan_dump_regs(chan);
629         WARN_ON(1);
630 }
631 
632 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
633 {
634         struct mv_xor_chan *chan = data;
635         u32 intr_cause = mv_chan_get_intr_cause(chan);
636 
637         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
638 
639         if (intr_cause & XOR_INTR_ERRORS)
640                 mv_chan_err_interrupt_handler(chan, intr_cause);
641 
642         tasklet_schedule(&chan->irq_tasklet);
643 
644         mv_chan_clear_eoc_cause(chan);
645 
646         return IRQ_HANDLED;
647 }
648 
649 static void mv_xor_issue_pending(struct dma_chan *chan)
650 {
651         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
652 
653         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
654                 mv_chan->pending = 0;
655                 mv_chan_activate(mv_chan);
656         }
657 }
658 
659 /*
660  * Perform a transaction to verify the HW works.
661  */
662 
663 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
664 {
665         int i, ret;
666         void *src, *dest;
667         dma_addr_t src_dma, dest_dma;
668         struct dma_chan *dma_chan;
669         dma_cookie_t cookie;
670         struct dma_async_tx_descriptor *tx;
671         struct dmaengine_unmap_data *unmap;
672         int err = 0;
673 
674         src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
675         if (!src)
676                 return -ENOMEM;
677 
678         dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
679         if (!dest) {
680                 kfree(src);
681                 return -ENOMEM;
682         }
683 
684         /* Fill in src buffer */
685         for (i = 0; i < PAGE_SIZE; i++)
686                 ((u8 *) src)[i] = (u8)i;
687 
688         dma_chan = &mv_chan->dmachan;
689         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
690                 err = -ENODEV;
691                 goto out;
692         }
693 
694         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
695         if (!unmap) {
696                 err = -ENOMEM;
697                 goto free_resources;
698         }
699 
700         src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
701                                  PAGE_SIZE, DMA_TO_DEVICE);
702         unmap->addr[0] = src_dma;
703 
704         ret = dma_mapping_error(dma_chan->device->dev, src_dma);
705         if (ret) {
706                 err = -ENOMEM;
707                 goto free_resources;
708         }
709         unmap->to_cnt = 1;
710 
711         dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
712                                   PAGE_SIZE, DMA_FROM_DEVICE);
713         unmap->addr[1] = dest_dma;
714 
715         ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
716         if (ret) {
717                 err = -ENOMEM;
718                 goto free_resources;
719         }
720         unmap->from_cnt = 1;
721         unmap->len = PAGE_SIZE;
722 
723         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
724                                     PAGE_SIZE, 0);
725         if (!tx) {
726                 dev_err(dma_chan->device->dev,
727                         "Self-test cannot prepare operation, disabling\n");
728                 err = -ENODEV;
729                 goto free_resources;
730         }
731 
732         cookie = mv_xor_tx_submit(tx);
733         if (dma_submit_error(cookie)) {
734                 dev_err(dma_chan->device->dev,
735                         "Self-test submit error, disabling\n");
736                 err = -ENODEV;
737                 goto free_resources;
738         }
739 
740         mv_xor_issue_pending(dma_chan);
741         async_tx_ack(tx);
742         msleep(1);
743 
744         if (mv_xor_status(dma_chan, cookie, NULL) !=
745             DMA_COMPLETE) {
746                 dev_err(dma_chan->device->dev,
747                         "Self-test copy timed out, disabling\n");
748                 err = -ENODEV;
749                 goto free_resources;
750         }
751 
752         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
753                                 PAGE_SIZE, DMA_FROM_DEVICE);
754         if (memcmp(src, dest, PAGE_SIZE)) {
755                 dev_err(dma_chan->device->dev,
756                         "Self-test copy failed compare, disabling\n");
757                 err = -ENODEV;
758                 goto free_resources;
759         }
760 
761 free_resources:
762         dmaengine_unmap_put(unmap);
763         mv_xor_free_chan_resources(dma_chan);
764 out:
765         kfree(src);
766         kfree(dest);
767         return err;
768 }
769 
770 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
771 static int
772 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
773 {
774         int i, src_idx, ret;
775         struct page *dest;
776         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
777         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
778         dma_addr_t dest_dma;
779         struct dma_async_tx_descriptor *tx;
780         struct dmaengine_unmap_data *unmap;
781         struct dma_chan *dma_chan;
782         dma_cookie_t cookie;
783         u8 cmp_byte = 0;
784         u32 cmp_word;
785         int err = 0;
786         int src_count = MV_XOR_NUM_SRC_TEST;
787 
788         for (src_idx = 0; src_idx < src_count; src_idx++) {
789                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
790                 if (!xor_srcs[src_idx]) {
791                         while (src_idx--)
792                                 __free_page(xor_srcs[src_idx]);
793                         return -ENOMEM;
794                 }
795         }
796 
797         dest = alloc_page(GFP_KERNEL);
798         if (!dest) {
799                 while (src_idx--)
800                         __free_page(xor_srcs[src_idx]);
801                 return -ENOMEM;
802         }
803 
804         /* Fill in src buffers */
805         for (src_idx = 0; src_idx < src_count; src_idx++) {
806                 u8 *ptr = page_address(xor_srcs[src_idx]);
807                 for (i = 0; i < PAGE_SIZE; i++)
808                         ptr[i] = (1 << src_idx);
809         }
810 
811         for (src_idx = 0; src_idx < src_count; src_idx++)
812                 cmp_byte ^= (u8) (1 << src_idx);
813 
814         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
815                 (cmp_byte << 8) | cmp_byte;
816 
817         memset(page_address(dest), 0, PAGE_SIZE);
818 
819         dma_chan = &mv_chan->dmachan;
820         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
821                 err = -ENODEV;
822                 goto out;
823         }
824 
825         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
826                                          GFP_KERNEL);
827         if (!unmap) {
828                 err = -ENOMEM;
829                 goto free_resources;
830         }
831 
832         /* test xor */
833         for (i = 0; i < src_count; i++) {
834                 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
835                                               0, PAGE_SIZE, DMA_TO_DEVICE);
836                 dma_srcs[i] = unmap->addr[i];
837                 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
838                 if (ret) {
839                         err = -ENOMEM;
840                         goto free_resources;
841                 }
842                 unmap->to_cnt++;
843         }
844 
845         unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
846                                       DMA_FROM_DEVICE);
847         dest_dma = unmap->addr[src_count];
848         ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
849         if (ret) {
850                 err = -ENOMEM;
851                 goto free_resources;
852         }
853         unmap->from_cnt = 1;
854         unmap->len = PAGE_SIZE;
855 
856         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
857                                  src_count, PAGE_SIZE, 0);
858         if (!tx) {
859                 dev_err(dma_chan->device->dev,
860                         "Self-test cannot prepare operation, disabling\n");
861                 err = -ENODEV;
862                 goto free_resources;
863         }
864 
865         cookie = mv_xor_tx_submit(tx);
866         if (dma_submit_error(cookie)) {
867                 dev_err(dma_chan->device->dev,
868                         "Self-test submit error, disabling\n");
869                 err = -ENODEV;
870                 goto free_resources;
871         }
872 
873         mv_xor_issue_pending(dma_chan);
874         async_tx_ack(tx);
875         msleep(8);
876 
877         if (mv_xor_status(dma_chan, cookie, NULL) !=
878             DMA_COMPLETE) {
879                 dev_err(dma_chan->device->dev,
880                         "Self-test xor timed out, disabling\n");
881                 err = -ENODEV;
882                 goto free_resources;
883         }
884 
885         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
886                                 PAGE_SIZE, DMA_FROM_DEVICE);
887         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
888                 u32 *ptr = page_address(dest);
889                 if (ptr[i] != cmp_word) {
890                         dev_err(dma_chan->device->dev,
891                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
892                                 i, ptr[i], cmp_word);
893                         err = -ENODEV;
894                         goto free_resources;
895                 }
896         }
897 
898 free_resources:
899         dmaengine_unmap_put(unmap);
900         mv_xor_free_chan_resources(dma_chan);
901 out:
902         src_idx = src_count;
903         while (src_idx--)
904                 __free_page(xor_srcs[src_idx]);
905         __free_page(dest);
906         return err;
907 }
908 
909 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
910 {
911         struct dma_chan *chan, *_chan;
912         struct device *dev = mv_chan->dmadev.dev;
913 
914         dma_async_device_unregister(&mv_chan->dmadev);
915 
916         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
917                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
918         dma_unmap_single(dev, mv_chan->dummy_src_addr,
919                          MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
920         dma_unmap_single(dev, mv_chan->dummy_dst_addr,
921                          MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
922 
923         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
924                                  device_node) {
925                 list_del(&chan->device_node);
926         }
927 
928         free_irq(mv_chan->irq, mv_chan);
929 
930         return 0;
931 }
932 
933 static struct mv_xor_chan *
934 mv_xor_channel_add(struct mv_xor_device *xordev,
935                    struct platform_device *pdev,
936                    int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
937 {
938         int ret = 0;
939         struct mv_xor_chan *mv_chan;
940         struct dma_device *dma_dev;
941 
942         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
943         if (!mv_chan)
944                 return ERR_PTR(-ENOMEM);
945 
946         mv_chan->idx = idx;
947         mv_chan->irq = irq;
948         mv_chan->op_in_desc = op_in_desc;
949 
950         dma_dev = &mv_chan->dmadev;
951 
952         /*
953          * These source and destination dummy buffers are used to implement
954          * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
955          * Hence, we only need to map the buffers at initialization-time.
956          */
957         mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
958                 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
959         mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
960                 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
961 
962         /* allocate coherent memory for hardware descriptors
963          * note: writecombine gives slightly better performance, but
964          * requires that we explicitly flush the writes
965          */
966         mv_chan->dma_desc_pool_virt =
967           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
968                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
969         if (!mv_chan->dma_desc_pool_virt)
970                 return ERR_PTR(-ENOMEM);
971 
972         /* discover transaction capabilites from the platform data */
973         dma_dev->cap_mask = cap_mask;
974 
975         INIT_LIST_HEAD(&dma_dev->channels);
976 
977         /* set base routines */
978         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
979         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
980         dma_dev->device_tx_status = mv_xor_status;
981         dma_dev->device_issue_pending = mv_xor_issue_pending;
982         dma_dev->dev = &pdev->dev;
983 
984         /* set prep routines based on capability */
985         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
986                 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
987         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
988                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
989         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
990                 dma_dev->max_xor = 8;
991                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
992         }
993 
994         mv_chan->mmr_base = xordev->xor_base;
995         mv_chan->mmr_high_base = xordev->xor_high_base;
996         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
997                      mv_chan);
998 
999         /* clear errors before enabling interrupts */
1000         mv_chan_clear_err_status(mv_chan);
1001 
1002         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1003                           0, dev_name(&pdev->dev), mv_chan);
1004         if (ret)
1005                 goto err_free_dma;
1006 
1007         mv_chan_unmask_interrupts(mv_chan);
1008 
1009         if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1010                 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1011         else
1012                 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1013 
1014         spin_lock_init(&mv_chan->lock);
1015         INIT_LIST_HEAD(&mv_chan->chain);
1016         INIT_LIST_HEAD(&mv_chan->completed_slots);
1017         INIT_LIST_HEAD(&mv_chan->free_slots);
1018         INIT_LIST_HEAD(&mv_chan->allocated_slots);
1019         mv_chan->dmachan.device = dma_dev;
1020         dma_cookie_init(&mv_chan->dmachan);
1021 
1022         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1023 
1024         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1025                 ret = mv_chan_memcpy_self_test(mv_chan);
1026                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1027                 if (ret)
1028                         goto err_free_irq;
1029         }
1030 
1031         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1032                 ret = mv_chan_xor_self_test(mv_chan);
1033                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1034                 if (ret)
1035                         goto err_free_irq;
1036         }
1037 
1038         dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1039                  mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1040                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1041                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1042                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1043 
1044         dma_async_device_register(dma_dev);
1045         return mv_chan;
1046 
1047 err_free_irq:
1048         free_irq(mv_chan->irq, mv_chan);
1049  err_free_dma:
1050         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1051                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1052         return ERR_PTR(ret);
1053 }
1054 
1055 static void
1056 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1057                          const struct mbus_dram_target_info *dram)
1058 {
1059         void __iomem *base = xordev->xor_high_base;
1060         u32 win_enable = 0;
1061         int i;
1062 
1063         for (i = 0; i < 8; i++) {
1064                 writel(0, base + WINDOW_BASE(i));
1065                 writel(0, base + WINDOW_SIZE(i));
1066                 if (i < 4)
1067                         writel(0, base + WINDOW_REMAP_HIGH(i));
1068         }
1069 
1070         for (i = 0; i < dram->num_cs; i++) {
1071                 const struct mbus_dram_window *cs = dram->cs + i;
1072 
1073                 writel((cs->base & 0xffff0000) |
1074                        (cs->mbus_attr << 8) |
1075                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1076                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1077 
1078                 win_enable |= (1 << i);
1079                 win_enable |= 3 << (16 + (2 * i));
1080         }
1081 
1082         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1083         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1084         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1085         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1086 }
1087 
1088 /*
1089  * Since this XOR driver is basically used only for RAID5, we don't
1090  * need to care about synchronizing ->suspend with DMA activity,
1091  * because the DMA engine will naturally be quiet due to the block
1092  * devices being suspended.
1093  */
1094 static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1095 {
1096         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1097         int i;
1098 
1099         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1100                 struct mv_xor_chan *mv_chan = xordev->channels[i];
1101 
1102                 if (!mv_chan)
1103                         continue;
1104 
1105                 mv_chan->saved_config_reg =
1106                         readl_relaxed(XOR_CONFIG(mv_chan));
1107                 mv_chan->saved_int_mask_reg =
1108                         readl_relaxed(XOR_INTR_MASK(mv_chan));
1109         }
1110 
1111         return 0;
1112 }
1113 
1114 static int mv_xor_resume(struct platform_device *dev)
1115 {
1116         struct mv_xor_device *xordev = platform_get_drvdata(dev);
1117         const struct mbus_dram_target_info *dram;
1118         int i;
1119 
1120         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1121                 struct mv_xor_chan *mv_chan = xordev->channels[i];
1122 
1123                 if (!mv_chan)
1124                         continue;
1125 
1126                 writel_relaxed(mv_chan->saved_config_reg,
1127                                XOR_CONFIG(mv_chan));
1128                 writel_relaxed(mv_chan->saved_int_mask_reg,
1129                                XOR_INTR_MASK(mv_chan));
1130         }
1131 
1132         dram = mv_mbus_dram_info();
1133         if (dram)
1134                 mv_xor_conf_mbus_windows(xordev, dram);
1135 
1136         return 0;
1137 }
1138 
1139 static const struct of_device_id mv_xor_dt_ids[] = {
1140         { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1141         { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1142         {},
1143 };
1144 
1145 static unsigned int mv_xor_engine_count;
1146 
1147 static int mv_xor_probe(struct platform_device *pdev)
1148 {
1149         const struct mbus_dram_target_info *dram;
1150         struct mv_xor_device *xordev;
1151         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1152         struct resource *res;
1153         unsigned int max_engines, max_channels;
1154         int i, ret;
1155         int op_in_desc;
1156 
1157         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1158 
1159         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1160         if (!xordev)
1161                 return -ENOMEM;
1162 
1163         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1164         if (!res)
1165                 return -ENODEV;
1166 
1167         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1168                                         resource_size(res));
1169         if (!xordev->xor_base)
1170                 return -EBUSY;
1171 
1172         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1173         if (!res)
1174                 return -ENODEV;
1175 
1176         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1177                                              resource_size(res));
1178         if (!xordev->xor_high_base)
1179                 return -EBUSY;
1180 
1181         platform_set_drvdata(pdev, xordev);
1182 
1183         /*
1184          * (Re-)program MBUS remapping windows if we are asked to.
1185          */
1186         dram = mv_mbus_dram_info();
1187         if (dram)
1188                 mv_xor_conf_mbus_windows(xordev, dram);
1189 
1190         /* Not all platforms can gate the clock, so it is not
1191          * an error if the clock does not exists.
1192          */
1193         xordev->clk = clk_get(&pdev->dev, NULL);
1194         if (!IS_ERR(xordev->clk))
1195                 clk_prepare_enable(xordev->clk);
1196 
1197         /*
1198          * We don't want to have more than one channel per CPU in
1199          * order for async_tx to perform well. So we limit the number
1200          * of engines and channels so that we take into account this
1201          * constraint. Note that we also want to use channels from
1202          * separate engines when possible.
1203          */
1204         max_engines = num_present_cpus();
1205         max_channels = min_t(unsigned int,
1206                              MV_XOR_MAX_CHANNELS,
1207                              DIV_ROUND_UP(num_present_cpus(), 2));
1208 
1209         if (mv_xor_engine_count >= max_engines)
1210                 return 0;
1211 
1212         if (pdev->dev.of_node) {
1213                 struct device_node *np;
1214                 int i = 0;
1215                 const struct of_device_id *of_id =
1216                         of_match_device(mv_xor_dt_ids,
1217                                         &pdev->dev);
1218 
1219                 for_each_child_of_node(pdev->dev.of_node, np) {
1220                         struct mv_xor_chan *chan;
1221                         dma_cap_mask_t cap_mask;
1222                         int irq;
1223                         op_in_desc = (int)of_id->data;
1224 
1225                         if (i >= max_channels)
1226                                 continue;
1227 
1228                         dma_cap_zero(cap_mask);
1229                         dma_cap_set(DMA_MEMCPY, cap_mask);
1230                         dma_cap_set(DMA_XOR, cap_mask);
1231                         dma_cap_set(DMA_INTERRUPT, cap_mask);
1232 
1233                         irq = irq_of_parse_and_map(np, 0);
1234                         if (!irq) {
1235                                 ret = -ENODEV;
1236                                 goto err_channel_add;
1237                         }
1238 
1239                         chan = mv_xor_channel_add(xordev, pdev, i,
1240                                                   cap_mask, irq, op_in_desc);
1241                         if (IS_ERR(chan)) {
1242                                 ret = PTR_ERR(chan);
1243                                 irq_dispose_mapping(irq);
1244                                 goto err_channel_add;
1245                         }
1246 
1247                         xordev->channels[i] = chan;
1248                         i++;
1249                 }
1250         } else if (pdata && pdata->channels) {
1251                 for (i = 0; i < max_channels; i++) {
1252                         struct mv_xor_channel_data *cd;
1253                         struct mv_xor_chan *chan;
1254                         int irq;
1255 
1256                         cd = &pdata->channels[i];
1257                         if (!cd) {
1258                                 ret = -ENODEV;
1259                                 goto err_channel_add;
1260                         }
1261 
1262                         irq = platform_get_irq(pdev, i);
1263                         if (irq < 0) {
1264                                 ret = irq;
1265                                 goto err_channel_add;
1266                         }
1267 
1268                         chan = mv_xor_channel_add(xordev, pdev, i,
1269                                                   cd->cap_mask, irq,
1270                                                   XOR_MODE_IN_REG);
1271                         if (IS_ERR(chan)) {
1272                                 ret = PTR_ERR(chan);
1273                                 goto err_channel_add;
1274                         }
1275 
1276                         xordev->channels[i] = chan;
1277                 }
1278         }
1279 
1280         return 0;
1281 
1282 err_channel_add:
1283         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1284                 if (xordev->channels[i]) {
1285                         mv_xor_channel_remove(xordev->channels[i]);
1286                         if (pdev->dev.of_node)
1287                                 irq_dispose_mapping(xordev->channels[i]->irq);
1288                 }
1289 
1290         if (!IS_ERR(xordev->clk)) {
1291                 clk_disable_unprepare(xordev->clk);
1292                 clk_put(xordev->clk);
1293         }
1294 
1295         return ret;
1296 }
1297 
1298 static struct platform_driver mv_xor_driver = {
1299         .probe          = mv_xor_probe,
1300         .suspend        = mv_xor_suspend,
1301         .resume         = mv_xor_resume,
1302         .driver         = {
1303                 .name           = MV_XOR_NAME,
1304                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1305         },
1306 };
1307 
1308 
1309 static int __init mv_xor_init(void)
1310 {
1311         return platform_driver_register(&mv_xor_driver);
1312 }
1313 device_initcall(mv_xor_init);
1314 
1315 /*
1316 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1317 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1318 MODULE_LICENSE("GPL");
1319 */
1320 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us