Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/drivers/infiniband/hw/amso1100/c2.c

  1 /*
  2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
  3  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  4  *
  5  * This software is available to you under a choice of one of two
  6  * licenses.  You may choose to be licensed under the terms of the GNU
  7  * General Public License (GPL) Version 2, available from the file
  8  * COPYING in the main directory of this source tree, or the
  9  * OpenIB.org BSD license below:
 10  *
 11  *     Redistribution and use in source and binary forms, with or
 12  *     without modification, are permitted provided that the following
 13  *     conditions are met:
 14  *
 15  *      - Redistributions of source code must retain the above
 16  *        copyright notice, this list of conditions and the following
 17  *        disclaimer.
 18  *
 19  *      - Redistributions in binary form must reproduce the above
 20  *        copyright notice, this list of conditions and the following
 21  *        disclaimer in the documentation and/or other materials
 22  *        provided with the distribution.
 23  *
 24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31  * SOFTWARE.
 32  */
 33 #include <linux/module.h>
 34 #include <linux/moduleparam.h>
 35 #include <linux/pci.h>
 36 #include <linux/netdevice.h>
 37 #include <linux/etherdevice.h>
 38 #include <linux/inetdevice.h>
 39 #include <linux/interrupt.h>
 40 #include <linux/delay.h>
 41 #include <linux/ethtool.h>
 42 #include <linux/mii.h>
 43 #include <linux/if_vlan.h>
 44 #include <linux/crc32.h>
 45 #include <linux/in.h>
 46 #include <linux/ip.h>
 47 #include <linux/tcp.h>
 48 #include <linux/init.h>
 49 #include <linux/dma-mapping.h>
 50 #include <linux/slab.h>
 51 #include <linux/prefetch.h>
 52 
 53 #include <asm/io.h>
 54 #include <asm/irq.h>
 55 #include <asm/byteorder.h>
 56 
 57 #include <rdma/ib_smi.h>
 58 #include "c2.h"
 59 #include "c2_provider.h"
 60 
 61 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
 62 MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
 63 MODULE_LICENSE("Dual BSD/GPL");
 64 MODULE_VERSION(DRV_VERSION);
 65 
 66 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
 67     | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
 68 
 69 static int debug = -1;          /* defaults above */
 70 module_param(debug, int, 0);
 71 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 72 
 73 static int c2_up(struct net_device *netdev);
 74 static int c2_down(struct net_device *netdev);
 75 static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 76 static void c2_tx_interrupt(struct net_device *netdev);
 77 static void c2_rx_interrupt(struct net_device *netdev);
 78 static irqreturn_t c2_interrupt(int irq, void *dev_id);
 79 static void c2_tx_timeout(struct net_device *netdev);
 80 static int c2_change_mtu(struct net_device *netdev, int new_mtu);
 81 static void c2_reset(struct c2_port *c2_port);
 82 
 83 static struct pci_device_id c2_pci_table[] = {
 84         { PCI_DEVICE(0x18b8, 0xb001) },
 85         { 0 }
 86 };
 87 
 88 MODULE_DEVICE_TABLE(pci, c2_pci_table);
 89 
 90 static void c2_print_macaddr(struct net_device *netdev)
 91 {
 92         pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
 93 }
 94 
 95 static void c2_set_rxbufsize(struct c2_port *c2_port)
 96 {
 97         struct net_device *netdev = c2_port->netdev;
 98 
 99         if (netdev->mtu > RX_BUF_SIZE)
100                 c2_port->rx_buf_size =
101                     netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
102                     NET_IP_ALIGN;
103         else
104                 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
105 }
106 
107 /*
108  * Allocate TX ring elements and chain them together.
109  * One-to-one association of adapter descriptors with ring elements.
110  */
111 static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
112                             dma_addr_t base, void __iomem * mmio_txp_ring)
113 {
114         struct c2_tx_desc *tx_desc;
115         struct c2_txp_desc __iomem *txp_desc;
116         struct c2_element *elem;
117         int i;
118 
119         tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
120         if (!tx_ring->start)
121                 return -ENOMEM;
122 
123         elem = tx_ring->start;
124         tx_desc = vaddr;
125         txp_desc = mmio_txp_ring;
126         for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
127                 tx_desc->len = 0;
128                 tx_desc->status = 0;
129 
130                 /* Set TXP_HTXD_UNINIT */
131                 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
132                              (void __iomem *) txp_desc + C2_TXP_ADDR);
133                 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
134                 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
135                              (void __iomem *) txp_desc + C2_TXP_FLAGS);
136 
137                 elem->skb = NULL;
138                 elem->ht_desc = tx_desc;
139                 elem->hw_desc = txp_desc;
140 
141                 if (i == tx_ring->count - 1) {
142                         elem->next = tx_ring->start;
143                         tx_desc->next_offset = base;
144                 } else {
145                         elem->next = elem + 1;
146                         tx_desc->next_offset =
147                             base + (i + 1) * sizeof(*tx_desc);
148                 }
149         }
150 
151         tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
152 
153         return 0;
154 }
155 
156 /*
157  * Allocate RX ring elements and chain them together.
158  * One-to-one association of adapter descriptors with ring elements.
159  */
160 static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
161                             dma_addr_t base, void __iomem * mmio_rxp_ring)
162 {
163         struct c2_rx_desc *rx_desc;
164         struct c2_rxp_desc __iomem *rxp_desc;
165         struct c2_element *elem;
166         int i;
167 
168         rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
169         if (!rx_ring->start)
170                 return -ENOMEM;
171 
172         elem = rx_ring->start;
173         rx_desc = vaddr;
174         rxp_desc = mmio_rxp_ring;
175         for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
176                 rx_desc->len = 0;
177                 rx_desc->status = 0;
178 
179                 /* Set RXP_HRXD_UNINIT */
180                 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
181                        (void __iomem *) rxp_desc + C2_RXP_STATUS);
182                 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
183                 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
184                 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
185                              (void __iomem *) rxp_desc + C2_RXP_ADDR);
186                 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
187                              (void __iomem *) rxp_desc + C2_RXP_FLAGS);
188 
189                 elem->skb = NULL;
190                 elem->ht_desc = rx_desc;
191                 elem->hw_desc = rxp_desc;
192 
193                 if (i == rx_ring->count - 1) {
194                         elem->next = rx_ring->start;
195                         rx_desc->next_offset = base;
196                 } else {
197                         elem->next = elem + 1;
198                         rx_desc->next_offset =
199                             base + (i + 1) * sizeof(*rx_desc);
200                 }
201         }
202 
203         rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
204 
205         return 0;
206 }
207 
208 /* Setup buffer for receiving */
209 static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
210 {
211         struct c2_dev *c2dev = c2_port->c2dev;
212         struct c2_rx_desc *rx_desc = elem->ht_desc;
213         struct sk_buff *skb;
214         dma_addr_t mapaddr;
215         u32 maplen;
216         struct c2_rxp_hdr *rxp_hdr;
217 
218         skb = dev_alloc_skb(c2_port->rx_buf_size);
219         if (unlikely(!skb)) {
220                 pr_debug("%s: out of memory for receive\n",
221                         c2_port->netdev->name);
222                 return -ENOMEM;
223         }
224 
225         /* Zero out the rxp hdr in the sk_buff */
226         memset(skb->data, 0, sizeof(*rxp_hdr));
227 
228         skb->dev = c2_port->netdev;
229 
230         maplen = c2_port->rx_buf_size;
231         mapaddr =
232             pci_map_single(c2dev->pcidev, skb->data, maplen,
233                            PCI_DMA_FROMDEVICE);
234 
235         /* Set the sk_buff RXP_header to RXP_HRXD_READY */
236         rxp_hdr = (struct c2_rxp_hdr *) skb->data;
237         rxp_hdr->flags = RXP_HRXD_READY;
238 
239         __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
240         __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
241                      elem->hw_desc + C2_RXP_LEN);
242         __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
243         __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
244                      elem->hw_desc + C2_RXP_FLAGS);
245 
246         elem->skb = skb;
247         elem->mapaddr = mapaddr;
248         elem->maplen = maplen;
249         rx_desc->len = maplen;
250 
251         return 0;
252 }
253 
254 /*
255  * Allocate buffers for the Rx ring
256  * For receive:  rx_ring.to_clean is next received frame
257  */
258 static int c2_rx_fill(struct c2_port *c2_port)
259 {
260         struct c2_ring *rx_ring = &c2_port->rx_ring;
261         struct c2_element *elem;
262         int ret = 0;
263 
264         elem = rx_ring->start;
265         do {
266                 if (c2_rx_alloc(c2_port, elem)) {
267                         ret = 1;
268                         break;
269                 }
270         } while ((elem = elem->next) != rx_ring->start);
271 
272         rx_ring->to_clean = rx_ring->start;
273         return ret;
274 }
275 
276 /* Free all buffers in RX ring, assumes receiver stopped */
277 static void c2_rx_clean(struct c2_port *c2_port)
278 {
279         struct c2_dev *c2dev = c2_port->c2dev;
280         struct c2_ring *rx_ring = &c2_port->rx_ring;
281         struct c2_element *elem;
282         struct c2_rx_desc *rx_desc;
283 
284         elem = rx_ring->start;
285         do {
286                 rx_desc = elem->ht_desc;
287                 rx_desc->len = 0;
288 
289                 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
290                 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
291                 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
292                 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
293                              elem->hw_desc + C2_RXP_ADDR);
294                 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
295                              elem->hw_desc + C2_RXP_FLAGS);
296 
297                 if (elem->skb) {
298                         pci_unmap_single(c2dev->pcidev, elem->mapaddr,
299                                          elem->maplen, PCI_DMA_FROMDEVICE);
300                         dev_kfree_skb(elem->skb);
301                         elem->skb = NULL;
302                 }
303         } while ((elem = elem->next) != rx_ring->start);
304 }
305 
306 static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
307 {
308         struct c2_tx_desc *tx_desc = elem->ht_desc;
309 
310         tx_desc->len = 0;
311 
312         pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
313                          PCI_DMA_TODEVICE);
314 
315         if (elem->skb) {
316                 dev_kfree_skb_any(elem->skb);
317                 elem->skb = NULL;
318         }
319 
320         return 0;
321 }
322 
323 /* Free all buffers in TX ring, assumes transmitter stopped */
324 static void c2_tx_clean(struct c2_port *c2_port)
325 {
326         struct c2_ring *tx_ring = &c2_port->tx_ring;
327         struct c2_element *elem;
328         struct c2_txp_desc txp_htxd;
329         int retry;
330         unsigned long flags;
331 
332         spin_lock_irqsave(&c2_port->tx_lock, flags);
333 
334         elem = tx_ring->start;
335 
336         do {
337                 retry = 0;
338                 do {
339                         txp_htxd.flags =
340                             readw(elem->hw_desc + C2_TXP_FLAGS);
341 
342                         if (txp_htxd.flags == TXP_HTXD_READY) {
343                                 retry = 1;
344                                 __raw_writew(0,
345                                              elem->hw_desc + C2_TXP_LEN);
346                                 __raw_writeq(0,
347                                              elem->hw_desc + C2_TXP_ADDR);
348                                 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
349                                              elem->hw_desc + C2_TXP_FLAGS);
350                                 c2_port->netdev->stats.tx_dropped++;
351                                 break;
352                         } else {
353                                 __raw_writew(0,
354                                              elem->hw_desc + C2_TXP_LEN);
355                                 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
356                                              elem->hw_desc + C2_TXP_ADDR);
357                                 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
358                                              elem->hw_desc + C2_TXP_FLAGS);
359                         }
360 
361                         c2_tx_free(c2_port->c2dev, elem);
362 
363                 } while ((elem = elem->next) != tx_ring->start);
364         } while (retry);
365 
366         c2_port->tx_avail = c2_port->tx_ring.count - 1;
367         c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
368 
369         if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
370                 netif_wake_queue(c2_port->netdev);
371 
372         spin_unlock_irqrestore(&c2_port->tx_lock, flags);
373 }
374 
375 /*
376  * Process transmit descriptors marked 'DONE' by the firmware,
377  * freeing up their unneeded sk_buffs.
378  */
379 static void c2_tx_interrupt(struct net_device *netdev)
380 {
381         struct c2_port *c2_port = netdev_priv(netdev);
382         struct c2_dev *c2dev = c2_port->c2dev;
383         struct c2_ring *tx_ring = &c2_port->tx_ring;
384         struct c2_element *elem;
385         struct c2_txp_desc txp_htxd;
386 
387         spin_lock(&c2_port->tx_lock);
388 
389         for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
390              elem = elem->next) {
391                 txp_htxd.flags =
392                     be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
393 
394                 if (txp_htxd.flags != TXP_HTXD_DONE)
395                         break;
396 
397                 if (netif_msg_tx_done(c2_port)) {
398                         /* PCI reads are expensive in fast path */
399                         txp_htxd.len =
400                             be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
401                         pr_debug("%s: tx done slot %3Zu status 0x%x len "
402                                 "%5u bytes\n",
403                                 netdev->name, elem - tx_ring->start,
404                                 txp_htxd.flags, txp_htxd.len);
405                 }
406 
407                 c2_tx_free(c2dev, elem);
408                 ++(c2_port->tx_avail);
409         }
410 
411         tx_ring->to_clean = elem;
412 
413         if (netif_queue_stopped(netdev)
414             && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
415                 netif_wake_queue(netdev);
416 
417         spin_unlock(&c2_port->tx_lock);
418 }
419 
420 static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
421 {
422         struct c2_rx_desc *rx_desc = elem->ht_desc;
423         struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
424 
425         if (rxp_hdr->status != RXP_HRXD_OK ||
426             rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
427                 pr_debug("BAD RXP_HRXD\n");
428                 pr_debug("  rx_desc : %p\n", rx_desc);
429                 pr_debug("    index : %Zu\n",
430                         elem - c2_port->rx_ring.start);
431                 pr_debug("    len   : %u\n", rx_desc->len);
432                 pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
433                         (void *) __pa((unsigned long) rxp_hdr));
434                 pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
435                 pr_debug("    status: 0x%x\n", rxp_hdr->status);
436                 pr_debug("    len   : %u\n", rxp_hdr->len);
437                 pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
438         }
439 
440         /* Setup the skb for reuse since we're dropping this pkt */
441         elem->skb->data = elem->skb->head;
442         skb_reset_tail_pointer(elem->skb);
443 
444         /* Zero out the rxp hdr in the sk_buff */
445         memset(elem->skb->data, 0, sizeof(*rxp_hdr));
446 
447         /* Write the descriptor to the adapter's rx ring */
448         __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
449         __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
450         __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
451                      elem->hw_desc + C2_RXP_LEN);
452         __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
453                      elem->hw_desc + C2_RXP_ADDR);
454         __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
455                      elem->hw_desc + C2_RXP_FLAGS);
456 
457         pr_debug("packet dropped\n");
458         c2_port->netdev->stats.rx_dropped++;
459 }
460 
461 static void c2_rx_interrupt(struct net_device *netdev)
462 {
463         struct c2_port *c2_port = netdev_priv(netdev);
464         struct c2_dev *c2dev = c2_port->c2dev;
465         struct c2_ring *rx_ring = &c2_port->rx_ring;
466         struct c2_element *elem;
467         struct c2_rx_desc *rx_desc;
468         struct c2_rxp_hdr *rxp_hdr;
469         struct sk_buff *skb;
470         dma_addr_t mapaddr;
471         u32 maplen, buflen;
472         unsigned long flags;
473 
474         spin_lock_irqsave(&c2dev->lock, flags);
475 
476         /* Begin where we left off */
477         rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
478 
479         for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
480              elem = elem->next) {
481                 rx_desc = elem->ht_desc;
482                 mapaddr = elem->mapaddr;
483                 maplen = elem->maplen;
484                 skb = elem->skb;
485                 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
486 
487                 if (rxp_hdr->flags != RXP_HRXD_DONE)
488                         break;
489                 buflen = rxp_hdr->len;
490 
491                 /* Sanity check the RXP header */
492                 if (rxp_hdr->status != RXP_HRXD_OK ||
493                     buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
494                         c2_rx_error(c2_port, elem);
495                         continue;
496                 }
497 
498                 /*
499                  * Allocate and map a new skb for replenishing the host
500                  * RX desc
501                  */
502                 if (c2_rx_alloc(c2_port, elem)) {
503                         c2_rx_error(c2_port, elem);
504                         continue;
505                 }
506 
507                 /* Unmap the old skb */
508                 pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
509                                  PCI_DMA_FROMDEVICE);
510 
511                 prefetch(skb->data);
512 
513                 /*
514                  * Skip past the leading 8 bytes comprising of the
515                  * "struct c2_rxp_hdr", prepended by the adapter
516                  * to the usual Ethernet header ("struct ethhdr"),
517                  * to the start of the raw Ethernet packet.
518                  *
519                  * Fix up the various fields in the sk_buff before
520                  * passing it up to netif_rx(). The transfer size
521                  * (in bytes) specified by the adapter len field of
522                  * the "struct rxp_hdr_t" does NOT include the
523                  * "sizeof(struct c2_rxp_hdr)".
524                  */
525                 skb->data += sizeof(*rxp_hdr);
526                 skb_set_tail_pointer(skb, buflen);
527                 skb->len = buflen;
528                 skb->protocol = eth_type_trans(skb, netdev);
529 
530                 netif_rx(skb);
531 
532                 netdev->stats.rx_packets++;
533                 netdev->stats.rx_bytes += buflen;
534         }
535 
536         /* Save where we left off */
537         rx_ring->to_clean = elem;
538         c2dev->cur_rx = elem - rx_ring->start;
539         C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
540 
541         spin_unlock_irqrestore(&c2dev->lock, flags);
542 }
543 
544 /*
545  * Handle netisr0 TX & RX interrupts.
546  */
547 static irqreturn_t c2_interrupt(int irq, void *dev_id)
548 {
549         unsigned int netisr0, dmaisr;
550         int handled = 0;
551         struct c2_dev *c2dev = (struct c2_dev *) dev_id;
552 
553         /* Process CCILNET interrupts */
554         netisr0 = readl(c2dev->regs + C2_NISR0);
555         if (netisr0) {
556 
557                 /*
558                  * There is an issue with the firmware that always
559                  * provides the status of RX for both TX & RX
560                  * interrupts.  So process both queues here.
561                  */
562                 c2_rx_interrupt(c2dev->netdev);
563                 c2_tx_interrupt(c2dev->netdev);
564 
565                 /* Clear the interrupt */
566                 writel(netisr0, c2dev->regs + C2_NISR0);
567                 handled++;
568         }
569 
570         /* Process RNIC interrupts */
571         dmaisr = readl(c2dev->regs + C2_DISR);
572         if (dmaisr) {
573                 writel(dmaisr, c2dev->regs + C2_DISR);
574                 c2_rnic_interrupt(c2dev);
575                 handled++;
576         }
577 
578         if (handled) {
579                 return IRQ_HANDLED;
580         } else {
581                 return IRQ_NONE;
582         }
583 }
584 
585 static int c2_up(struct net_device *netdev)
586 {
587         struct c2_port *c2_port = netdev_priv(netdev);
588         struct c2_dev *c2dev = c2_port->c2dev;
589         struct c2_element *elem;
590         struct c2_rxp_hdr *rxp_hdr;
591         struct in_device *in_dev;
592         size_t rx_size, tx_size;
593         int ret, i;
594         unsigned int netimr0;
595 
596         if (netif_msg_ifup(c2_port))
597                 pr_debug("%s: enabling interface\n", netdev->name);
598 
599         /* Set the Rx buffer size based on MTU */
600         c2_set_rxbufsize(c2_port);
601 
602         /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
603         rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
604         tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
605 
606         c2_port->mem_size = tx_size + rx_size;
607         c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
608                                             &c2_port->dma);
609         if (c2_port->mem == NULL) {
610                 pr_debug("Unable to allocate memory for "
611                         "host descriptor rings\n");
612                 return -ENOMEM;
613         }
614 
615         memset(c2_port->mem, 0, c2_port->mem_size);
616 
617         /* Create the Rx host descriptor ring */
618         if ((ret =
619              c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
620                               c2dev->mmio_rxp_ring))) {
621                 pr_debug("Unable to create RX ring\n");
622                 goto bail0;
623         }
624 
625         /* Allocate Rx buffers for the host descriptor ring */
626         if (c2_rx_fill(c2_port)) {
627                 pr_debug("Unable to fill RX ring\n");
628                 goto bail1;
629         }
630 
631         /* Create the Tx host descriptor ring */
632         if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
633                                     c2_port->dma + rx_size,
634                                     c2dev->mmio_txp_ring))) {
635                 pr_debug("Unable to create TX ring\n");
636                 goto bail1;
637         }
638 
639         /* Set the TX pointer to where we left off */
640         c2_port->tx_avail = c2_port->tx_ring.count - 1;
641         c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
642             c2_port->tx_ring.start + c2dev->cur_tx;
643 
644         /* missing: Initialize MAC */
645 
646         BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
647 
648         /* Reset the adapter, ensures the driver is in sync with the RXP */
649         c2_reset(c2_port);
650 
651         /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
652         for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
653              i++, elem++) {
654                 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655                 rxp_hdr->flags = 0;
656                 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
657                              elem->hw_desc + C2_RXP_FLAGS);
658         }
659 
660         /* Enable network packets */
661         netif_start_queue(netdev);
662 
663         /* Enable IRQ */
664         writel(0, c2dev->regs + C2_IDIS);
665         netimr0 = readl(c2dev->regs + C2_NIMR0);
666         netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
667         writel(netimr0, c2dev->regs + C2_NIMR0);
668 
669         /* Tell the stack to ignore arp requests for ipaddrs bound to
670          * other interfaces.  This is needed to prevent the host stack
671          * from responding to arp requests to the ipaddr bound on the
672          * rdma interface.
673          */
674         in_dev = in_dev_get(netdev);
675         IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
676         in_dev_put(in_dev);
677 
678         return 0;
679 
680       bail1:
681         c2_rx_clean(c2_port);
682         kfree(c2_port->rx_ring.start);
683 
684       bail0:
685         pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
686                             c2_port->dma);
687 
688         return ret;
689 }
690 
691 static int c2_down(struct net_device *netdev)
692 {
693         struct c2_port *c2_port = netdev_priv(netdev);
694         struct c2_dev *c2dev = c2_port->c2dev;
695 
696         if (netif_msg_ifdown(c2_port))
697                 pr_debug("%s: disabling interface\n",
698                         netdev->name);
699 
700         /* Wait for all the queued packets to get sent */
701         c2_tx_interrupt(netdev);
702 
703         /* Disable network packets */
704         netif_stop_queue(netdev);
705 
706         /* Disable IRQs by clearing the interrupt mask */
707         writel(1, c2dev->regs + C2_IDIS);
708         writel(0, c2dev->regs + C2_NIMR0);
709 
710         /* missing: Stop transmitter */
711 
712         /* missing: Stop receiver */
713 
714         /* Reset the adapter, ensures the driver is in sync with the RXP */
715         c2_reset(c2_port);
716 
717         /* missing: Turn off LEDs here */
718 
719         /* Free all buffers in the host descriptor rings */
720         c2_tx_clean(c2_port);
721         c2_rx_clean(c2_port);
722 
723         /* Free the host descriptor rings */
724         kfree(c2_port->rx_ring.start);
725         kfree(c2_port->tx_ring.start);
726         pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
727                             c2_port->dma);
728 
729         return 0;
730 }
731 
732 static void c2_reset(struct c2_port *c2_port)
733 {
734         struct c2_dev *c2dev = c2_port->c2dev;
735         unsigned int cur_rx = c2dev->cur_rx;
736 
737         /* Tell the hardware to quiesce */
738         C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
739 
740         /*
741          * The hardware will reset the C2_PCI_HRX_QUI bit once
742          * the RXP is quiesced.  Wait 2 seconds for this.
743          */
744         ssleep(2);
745 
746         cur_rx = C2_GET_CUR_RX(c2dev);
747 
748         if (cur_rx & C2_PCI_HRX_QUI)
749                 pr_debug("c2_reset: failed to quiesce the hardware!\n");
750 
751         cur_rx &= ~C2_PCI_HRX_QUI;
752 
753         c2dev->cur_rx = cur_rx;
754 
755         pr_debug("Current RX: %u\n", c2dev->cur_rx);
756 }
757 
758 static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
759 {
760         struct c2_port *c2_port = netdev_priv(netdev);
761         struct c2_dev *c2dev = c2_port->c2dev;
762         struct c2_ring *tx_ring = &c2_port->tx_ring;
763         struct c2_element *elem;
764         dma_addr_t mapaddr;
765         u32 maplen;
766         unsigned long flags;
767         unsigned int i;
768 
769         spin_lock_irqsave(&c2_port->tx_lock, flags);
770 
771         if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
772                 netif_stop_queue(netdev);
773                 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
774 
775                 pr_debug("%s: Tx ring full when queue awake!\n",
776                         netdev->name);
777                 return NETDEV_TX_BUSY;
778         }
779 
780         maplen = skb_headlen(skb);
781         mapaddr =
782             pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
783 
784         elem = tx_ring->to_use;
785         elem->skb = skb;
786         elem->mapaddr = mapaddr;
787         elem->maplen = maplen;
788 
789         /* Tell HW to xmit */
790         __raw_writeq((__force u64) cpu_to_be64(mapaddr),
791                      elem->hw_desc + C2_TXP_ADDR);
792         __raw_writew((__force u16) cpu_to_be16(maplen),
793                      elem->hw_desc + C2_TXP_LEN);
794         __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
795                      elem->hw_desc + C2_TXP_FLAGS);
796 
797         netdev->stats.tx_packets++;
798         netdev->stats.tx_bytes += maplen;
799 
800         /* Loop thru additional data fragments and queue them */
801         if (skb_shinfo(skb)->nr_frags) {
802                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
803                         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
804                         maplen = skb_frag_size(frag);
805                         mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
806                                                    0, maplen, DMA_TO_DEVICE);
807                         elem = elem->next;
808                         elem->skb = NULL;
809                         elem->mapaddr = mapaddr;
810                         elem->maplen = maplen;
811 
812                         /* Tell HW to xmit */
813                         __raw_writeq((__force u64) cpu_to_be64(mapaddr),
814                                      elem->hw_desc + C2_TXP_ADDR);
815                         __raw_writew((__force u16) cpu_to_be16(maplen),
816                                      elem->hw_desc + C2_TXP_LEN);
817                         __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
818                                      elem->hw_desc + C2_TXP_FLAGS);
819 
820                         netdev->stats.tx_packets++;
821                         netdev->stats.tx_bytes += maplen;
822                 }
823         }
824 
825         tx_ring->to_use = elem->next;
826         c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
827 
828         if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
829                 netif_stop_queue(netdev);
830                 if (netif_msg_tx_queued(c2_port))
831                         pr_debug("%s: transmit queue full\n",
832                                 netdev->name);
833         }
834 
835         spin_unlock_irqrestore(&c2_port->tx_lock, flags);
836 
837         netdev->trans_start = jiffies;
838 
839         return NETDEV_TX_OK;
840 }
841 
842 static void c2_tx_timeout(struct net_device *netdev)
843 {
844         struct c2_port *c2_port = netdev_priv(netdev);
845 
846         if (netif_msg_timer(c2_port))
847                 pr_debug("%s: tx timeout\n", netdev->name);
848 
849         c2_tx_clean(c2_port);
850 }
851 
852 static int c2_change_mtu(struct net_device *netdev, int new_mtu)
853 {
854         int ret = 0;
855 
856         if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
857                 return -EINVAL;
858 
859         netdev->mtu = new_mtu;
860 
861         if (netif_running(netdev)) {
862                 c2_down(netdev);
863 
864                 c2_up(netdev);
865         }
866 
867         return ret;
868 }
869 
870 static const struct net_device_ops c2_netdev = {
871         .ndo_open               = c2_up,
872         .ndo_stop               = c2_down,
873         .ndo_start_xmit         = c2_xmit_frame,
874         .ndo_tx_timeout         = c2_tx_timeout,
875         .ndo_change_mtu         = c2_change_mtu,
876         .ndo_set_mac_address    = eth_mac_addr,
877         .ndo_validate_addr      = eth_validate_addr,
878 };
879 
880 /* Initialize network device */
881 static struct net_device *c2_devinit(struct c2_dev *c2dev,
882                                      void __iomem * mmio_addr)
883 {
884         struct c2_port *c2_port = NULL;
885         struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
886 
887         if (!netdev) {
888                 pr_debug("c2_port etherdev alloc failed");
889                 return NULL;
890         }
891 
892         SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
893 
894         netdev->netdev_ops = &c2_netdev;
895         netdev->watchdog_timeo = C2_TX_TIMEOUT;
896         netdev->irq = c2dev->pcidev->irq;
897 
898         c2_port = netdev_priv(netdev);
899         c2_port->netdev = netdev;
900         c2_port->c2dev = c2dev;
901         c2_port->msg_enable = netif_msg_init(debug, default_msg);
902         c2_port->tx_ring.count = C2_NUM_TX_DESC;
903         c2_port->rx_ring.count = C2_NUM_RX_DESC;
904 
905         spin_lock_init(&c2_port->tx_lock);
906 
907         /* Copy our 48-bit ethernet hardware address */
908         memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
909 
910         /* Validate the MAC address */
911         if (!is_valid_ether_addr(netdev->dev_addr)) {
912                 pr_debug("Invalid MAC Address\n");
913                 c2_print_macaddr(netdev);
914                 free_netdev(netdev);
915                 return NULL;
916         }
917 
918         c2dev->netdev = netdev;
919 
920         return netdev;
921 }
922 
923 static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
924 {
925         int ret = 0, i;
926         unsigned long reg0_start, reg0_flags, reg0_len;
927         unsigned long reg2_start, reg2_flags, reg2_len;
928         unsigned long reg4_start, reg4_flags, reg4_len;
929         unsigned kva_map_size;
930         struct net_device *netdev = NULL;
931         struct c2_dev *c2dev = NULL;
932         void __iomem *mmio_regs = NULL;
933 
934         printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
935                 DRV_VERSION);
936 
937         /* Enable PCI device */
938         ret = pci_enable_device(pcidev);
939         if (ret) {
940                 printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
941                         pci_name(pcidev));
942                 goto bail0;
943         }
944 
945         reg0_start = pci_resource_start(pcidev, BAR_0);
946         reg0_len = pci_resource_len(pcidev, BAR_0);
947         reg0_flags = pci_resource_flags(pcidev, BAR_0);
948 
949         reg2_start = pci_resource_start(pcidev, BAR_2);
950         reg2_len = pci_resource_len(pcidev, BAR_2);
951         reg2_flags = pci_resource_flags(pcidev, BAR_2);
952 
953         reg4_start = pci_resource_start(pcidev, BAR_4);
954         reg4_len = pci_resource_len(pcidev, BAR_4);
955         reg4_flags = pci_resource_flags(pcidev, BAR_4);
956 
957         pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
958         pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
959         pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
960 
961         /* Make sure PCI base addr are MMIO */
962         if (!(reg0_flags & IORESOURCE_MEM) ||
963             !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
964                 printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
965                 ret = -ENODEV;
966                 goto bail1;
967         }
968 
969         /* Check for weird/broken PCI region reporting */
970         if ((reg0_len < C2_REG0_SIZE) ||
971             (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
972                 printk(KERN_ERR PFX "Invalid PCI region sizes\n");
973                 ret = -ENODEV;
974                 goto bail1;
975         }
976 
977         /* Reserve PCI I/O and memory resources */
978         ret = pci_request_regions(pcidev, DRV_NAME);
979         if (ret) {
980                 printk(KERN_ERR PFX "%s: Unable to request regions\n",
981                         pci_name(pcidev));
982                 goto bail1;
983         }
984 
985         if ((sizeof(dma_addr_t) > 4)) {
986                 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
987                 if (ret < 0) {
988                         printk(KERN_ERR PFX "64b DMA configuration failed\n");
989                         goto bail2;
990                 }
991         } else {
992                 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
993                 if (ret < 0) {
994                         printk(KERN_ERR PFX "32b DMA configuration failed\n");
995                         goto bail2;
996                 }
997         }
998 
999         /* Enables bus-mastering on the device */
1000         pci_set_master(pcidev);
1001 
1002         /* Remap the adapter PCI registers in BAR4 */
1003         mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1004                                     sizeof(struct c2_adapter_pci_regs));
1005         if (!mmio_regs) {
1006                 printk(KERN_ERR PFX
1007                         "Unable to remap adapter PCI registers in BAR4\n");
1008                 ret = -EIO;
1009                 goto bail2;
1010         }
1011 
1012         /* Validate PCI regs magic */
1013         for (i = 0; i < sizeof(c2_magic); i++) {
1014                 if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1015                         printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1016                                 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1017                                "utility to update your boot loader\n",
1018                                 i + 1, sizeof(c2_magic),
1019                                 readb(mmio_regs + C2_REGS_MAGIC + i),
1020                                 c2_magic[i]);
1021                         printk(KERN_ERR PFX "Adapter not claimed\n");
1022                         iounmap(mmio_regs);
1023                         ret = -EIO;
1024                         goto bail2;
1025                 }
1026         }
1027 
1028         /* Validate the adapter version */
1029         if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1030                 printk(KERN_ERR PFX "Version mismatch "
1031                         "[fw=%u, c2=%u], Adapter not claimed\n",
1032                         be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1033                         C2_VERSION);
1034                 ret = -EINVAL;
1035                 iounmap(mmio_regs);
1036                 goto bail2;
1037         }
1038 
1039         /* Validate the adapter IVN */
1040         if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1041                 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1042                        "the OpenIB device support kit. "
1043                        "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1044                        be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1045                        C2_IVN);
1046                 ret = -EINVAL;
1047                 iounmap(mmio_regs);
1048                 goto bail2;
1049         }
1050 
1051         /* Allocate hardware structure */
1052         c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1053         if (!c2dev) {
1054                 printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1055                         pci_name(pcidev));
1056                 ret = -ENOMEM;
1057                 iounmap(mmio_regs);
1058                 goto bail2;
1059         }
1060 
1061         memset(c2dev, 0, sizeof(*c2dev));
1062         spin_lock_init(&c2dev->lock);
1063         c2dev->pcidev = pcidev;
1064         c2dev->cur_tx = 0;
1065 
1066         /* Get the last RX index */
1067         c2dev->cur_rx =
1068             (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1069              0xffffc000) / sizeof(struct c2_rxp_desc);
1070 
1071         /* Request an interrupt line for the driver */
1072         ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
1073         if (ret) {
1074                 printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1075                         pci_name(pcidev), pcidev->irq);
1076                 iounmap(mmio_regs);
1077                 goto bail3;
1078         }
1079 
1080         /* Set driver specific data */
1081         pci_set_drvdata(pcidev, c2dev);
1082 
1083         /* Initialize network device */
1084         if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1085                 ret = -ENOMEM;
1086                 iounmap(mmio_regs);
1087                 goto bail4;
1088         }
1089 
1090         /* Save off the actual size prior to unmapping mmio_regs */
1091         kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1092 
1093         /* Unmap the adapter PCI registers in BAR4 */
1094         iounmap(mmio_regs);
1095 
1096         /* Register network device */
1097         ret = register_netdev(netdev);
1098         if (ret) {
1099                 printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1100                         ret);
1101                 goto bail5;
1102         }
1103 
1104         /* Disable network packets */
1105         netif_stop_queue(netdev);
1106 
1107         /* Remap the adapter HRXDQ PA space to kernel VA space */
1108         c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1109                                                C2_RXP_HRXDQ_SIZE);
1110         if (!c2dev->mmio_rxp_ring) {
1111                 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1112                 ret = -EIO;
1113                 goto bail6;
1114         }
1115 
1116         /* Remap the adapter HTXDQ PA space to kernel VA space */
1117         c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1118                                                C2_TXP_HTXDQ_SIZE);
1119         if (!c2dev->mmio_txp_ring) {
1120                 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1121                 ret = -EIO;
1122                 goto bail7;
1123         }
1124 
1125         /* Save off the current RX index in the last 4 bytes of the TXP Ring */
1126         C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1127 
1128         /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1129         c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1130         if (!c2dev->regs) {
1131                 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1132                 ret = -EIO;
1133                 goto bail8;
1134         }
1135 
1136         /* Remap the PCI registers in adapter BAR4 to kernel VA space */
1137         c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1138         c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1139                                      kva_map_size);
1140         if (!c2dev->kva) {
1141                 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1142                 ret = -EIO;
1143                 goto bail9;
1144         }
1145 
1146         /* Print out the MAC address */
1147         c2_print_macaddr(netdev);
1148 
1149         ret = c2_rnic_init(c2dev);
1150         if (ret) {
1151                 printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1152                 goto bail10;
1153         }
1154 
1155         ret = c2_register_device(c2dev);
1156         if (ret)
1157                 goto bail10;
1158 
1159         return 0;
1160 
1161  bail10:
1162         iounmap(c2dev->kva);
1163 
1164  bail9:
1165         iounmap(c2dev->regs);
1166 
1167  bail8:
1168         iounmap(c2dev->mmio_txp_ring);
1169 
1170  bail7:
1171         iounmap(c2dev->mmio_rxp_ring);
1172 
1173  bail6:
1174         unregister_netdev(netdev);
1175 
1176  bail5:
1177         free_netdev(netdev);
1178 
1179  bail4:
1180         free_irq(pcidev->irq, c2dev);
1181 
1182  bail3:
1183         ib_dealloc_device(&c2dev->ibdev);
1184 
1185  bail2:
1186         pci_release_regions(pcidev);
1187 
1188  bail1:
1189         pci_disable_device(pcidev);
1190 
1191  bail0:
1192         return ret;
1193 }
1194 
1195 static void c2_remove(struct pci_dev *pcidev)
1196 {
1197         struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1198         struct net_device *netdev = c2dev->netdev;
1199 
1200         /* Unregister with OpenIB */
1201         c2_unregister_device(c2dev);
1202 
1203         /* Clean up the RNIC resources */
1204         c2_rnic_term(c2dev);
1205 
1206         /* Remove network device from the kernel */
1207         unregister_netdev(netdev);
1208 
1209         /* Free network device */
1210         free_netdev(netdev);
1211 
1212         /* Free the interrupt line */
1213         free_irq(pcidev->irq, c2dev);
1214 
1215         /* missing: Turn LEDs off here */
1216 
1217         /* Unmap adapter PA space */
1218         iounmap(c2dev->kva);
1219         iounmap(c2dev->regs);
1220         iounmap(c2dev->mmio_txp_ring);
1221         iounmap(c2dev->mmio_rxp_ring);
1222 
1223         /* Free the hardware structure */
1224         ib_dealloc_device(&c2dev->ibdev);
1225 
1226         /* Release reserved PCI I/O and memory resources */
1227         pci_release_regions(pcidev);
1228 
1229         /* Disable PCI device */
1230         pci_disable_device(pcidev);
1231 
1232         /* Clear driver specific data */
1233         pci_set_drvdata(pcidev, NULL);
1234 }
1235 
1236 static struct pci_driver c2_pci_driver = {
1237         .name = DRV_NAME,
1238         .id_table = c2_pci_table,
1239         .probe = c2_probe,
1240         .remove = c2_remove,
1241 };
1242 
1243 module_pci_driver(c2_pci_driver);
1244 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us