Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/drivers/net/ethernet/ibm/ehea/ehea_main.c

  1 /*
  2  *  linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
  3  *
  4  *  eHEA ethernet device driver for IBM eServer System p
  5  *
  6  *  (C) Copyright IBM Corp. 2006
  7  *
  8  *  Authors:
  9  *       Christoph Raisch <raisch@de.ibm.com>
 10  *       Jan-Bernd Themann <themann@de.ibm.com>
 11  *       Thomas Klein <tklein@de.ibm.com>
 12  *
 13  *
 14  * This program is free software; you can redistribute it and/or modify
 15  * it under the terms of the GNU General Public License as published by
 16  * the Free Software Foundation; either version 2, or (at your option)
 17  * any later version.
 18  *
 19  * This program is distributed in the hope that it will be useful,
 20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 22  * GNU General Public License for more details.
 23  *
 24  * You should have received a copy of the GNU General Public License
 25  * along with this program; if not, write to the Free Software
 26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 27  */
 28 
 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 30 
 31 #include <linux/in.h>
 32 #include <linux/ip.h>
 33 #include <linux/tcp.h>
 34 #include <linux/udp.h>
 35 #include <linux/if.h>
 36 #include <linux/list.h>
 37 #include <linux/slab.h>
 38 #include <linux/if_ether.h>
 39 #include <linux/notifier.h>
 40 #include <linux/reboot.h>
 41 #include <linux/memory.h>
 42 #include <asm/kexec.h>
 43 #include <linux/mutex.h>
 44 #include <linux/prefetch.h>
 45 
 46 #include <net/ip.h>
 47 
 48 #include "ehea.h"
 49 #include "ehea_qmr.h"
 50 #include "ehea_phyp.h"
 51 
 52 
 53 MODULE_LICENSE("GPL");
 54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
 55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
 56 MODULE_VERSION(DRV_VERSION);
 57 
 58 
 59 static int msg_level = -1;
 60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
 61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
 62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
 63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
 64 static int use_mcs = 1;
 65 static int prop_carrier_state;
 66 
 67 module_param(msg_level, int, 0);
 68 module_param(rq1_entries, int, 0);
 69 module_param(rq2_entries, int, 0);
 70 module_param(rq3_entries, int, 0);
 71 module_param(sq_entries, int, 0);
 72 module_param(prop_carrier_state, int, 0);
 73 module_param(use_mcs, int, 0);
 74 
 75 MODULE_PARM_DESC(msg_level, "msg_level");
 76 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
 77                  "port to stack. 1:yes, 0:no.  Default = 0 ");
 78 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
 79                  "[2^x - 1], x = [7..14]. Default = "
 80                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
 81 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
 82                  "[2^x - 1], x = [7..14]. Default = "
 83                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
 84 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
 85                  "[2^x - 1], x = [7..14]. Default = "
 86                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
 87 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
 88                  "[2^x - 1], x = [7..14]. Default = "
 89                  __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
 90 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
 91                  "Default = 1");
 92 
 93 static int port_name_cnt;
 94 static LIST_HEAD(adapter_list);
 95 static unsigned long ehea_driver_flags;
 96 static DEFINE_MUTEX(dlpar_mem_lock);
 97 static struct ehea_fw_handle_array ehea_fw_handles;
 98 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
 99 
100 
101 static int ehea_probe_adapter(struct platform_device *dev);
102 
103 static int ehea_remove(struct platform_device *dev);
104 
105 static struct of_device_id ehea_module_device_table[] = {
106         {
107                 .name = "lhea",
108                 .compatible = "IBM,lhea",
109         },
110         {
111                 .type = "network",
112                 .compatible = "IBM,lhea-ethernet",
113         },
114         {},
115 };
116 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
117 
118 static struct of_device_id ehea_device_table[] = {
119         {
120                 .name = "lhea",
121                 .compatible = "IBM,lhea",
122         },
123         {},
124 };
125 
126 static struct platform_driver ehea_driver = {
127         .driver = {
128                 .name = "ehea",
129                 .owner = THIS_MODULE,
130                 .of_match_table = ehea_device_table,
131         },
132         .probe = ehea_probe_adapter,
133         .remove = ehea_remove,
134 };
135 
136 void ehea_dump(void *adr, int len, char *msg)
137 {
138         int x;
139         unsigned char *deb = adr;
140         for (x = 0; x < len; x += 16) {
141                 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
142                         msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
143                 deb += 16;
144         }
145 }
146 
147 static void ehea_schedule_port_reset(struct ehea_port *port)
148 {
149         if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
150                 schedule_work(&port->reset_task);
151 }
152 
153 static void ehea_update_firmware_handles(void)
154 {
155         struct ehea_fw_handle_entry *arr = NULL;
156         struct ehea_adapter *adapter;
157         int num_adapters = 0;
158         int num_ports = 0;
159         int num_portres = 0;
160         int i = 0;
161         int num_fw_handles, k, l;
162 
163         /* Determine number of handles */
164         mutex_lock(&ehea_fw_handles.lock);
165 
166         list_for_each_entry(adapter, &adapter_list, list) {
167                 num_adapters++;
168 
169                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
170                         struct ehea_port *port = adapter->port[k];
171 
172                         if (!port || (port->state != EHEA_PORT_UP))
173                                 continue;
174 
175                         num_ports++;
176                         num_portres += port->num_def_qps;
177                 }
178         }
179 
180         num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
181                          num_ports * EHEA_NUM_PORT_FW_HANDLES +
182                          num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
183 
184         if (num_fw_handles) {
185                 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
186                 if (!arr)
187                         goto out;  /* Keep the existing array */
188         } else
189                 goto out_update;
190 
191         list_for_each_entry(adapter, &adapter_list, list) {
192                 if (num_adapters == 0)
193                         break;
194 
195                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
196                         struct ehea_port *port = adapter->port[k];
197 
198                         if (!port || (port->state != EHEA_PORT_UP) ||
199                             (num_ports == 0))
200                                 continue;
201 
202                         for (l = 0; l < port->num_def_qps; l++) {
203                                 struct ehea_port_res *pr = &port->port_res[l];
204 
205                                 arr[i].adh = adapter->handle;
206                                 arr[i++].fwh = pr->qp->fw_handle;
207                                 arr[i].adh = adapter->handle;
208                                 arr[i++].fwh = pr->send_cq->fw_handle;
209                                 arr[i].adh = adapter->handle;
210                                 arr[i++].fwh = pr->recv_cq->fw_handle;
211                                 arr[i].adh = adapter->handle;
212                                 arr[i++].fwh = pr->eq->fw_handle;
213                                 arr[i].adh = adapter->handle;
214                                 arr[i++].fwh = pr->send_mr.handle;
215                                 arr[i].adh = adapter->handle;
216                                 arr[i++].fwh = pr->recv_mr.handle;
217                         }
218                         arr[i].adh = adapter->handle;
219                         arr[i++].fwh = port->qp_eq->fw_handle;
220                         num_ports--;
221                 }
222 
223                 arr[i].adh = adapter->handle;
224                 arr[i++].fwh = adapter->neq->fw_handle;
225 
226                 if (adapter->mr.handle) {
227                         arr[i].adh = adapter->handle;
228                         arr[i++].fwh = adapter->mr.handle;
229                 }
230                 num_adapters--;
231         }
232 
233 out_update:
234         kfree(ehea_fw_handles.arr);
235         ehea_fw_handles.arr = arr;
236         ehea_fw_handles.num_entries = i;
237 out:
238         mutex_unlock(&ehea_fw_handles.lock);
239 }
240 
241 static void ehea_update_bcmc_registrations(void)
242 {
243         unsigned long flags;
244         struct ehea_bcmc_reg_entry *arr = NULL;
245         struct ehea_adapter *adapter;
246         struct ehea_mc_list *mc_entry;
247         int num_registrations = 0;
248         int i = 0;
249         int k;
250 
251         spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
252 
253         /* Determine number of registrations */
254         list_for_each_entry(adapter, &adapter_list, list)
255                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
256                         struct ehea_port *port = adapter->port[k];
257 
258                         if (!port || (port->state != EHEA_PORT_UP))
259                                 continue;
260 
261                         num_registrations += 2; /* Broadcast registrations */
262 
263                         list_for_each_entry(mc_entry, &port->mc_list->list,list)
264                                 num_registrations += 2;
265                 }
266 
267         if (num_registrations) {
268                 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
269                 if (!arr)
270                         goto out;  /* Keep the existing array */
271         } else
272                 goto out_update;
273 
274         list_for_each_entry(adapter, &adapter_list, list) {
275                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
276                         struct ehea_port *port = adapter->port[k];
277 
278                         if (!port || (port->state != EHEA_PORT_UP))
279                                 continue;
280 
281                         if (num_registrations == 0)
282                                 goto out_update;
283 
284                         arr[i].adh = adapter->handle;
285                         arr[i].port_id = port->logical_port_id;
286                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
287                                           EHEA_BCMC_UNTAGGED;
288                         arr[i++].macaddr = port->mac_addr;
289 
290                         arr[i].adh = adapter->handle;
291                         arr[i].port_id = port->logical_port_id;
292                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
293                                           EHEA_BCMC_VLANID_ALL;
294                         arr[i++].macaddr = port->mac_addr;
295                         num_registrations -= 2;
296 
297                         list_for_each_entry(mc_entry,
298                                             &port->mc_list->list, list) {
299                                 if (num_registrations == 0)
300                                         goto out_update;
301 
302                                 arr[i].adh = adapter->handle;
303                                 arr[i].port_id = port->logical_port_id;
304                                 arr[i].reg_type = EHEA_BCMC_MULTICAST |
305                                                   EHEA_BCMC_UNTAGGED;
306                                 if (mc_entry->macaddr == 0)
307                                         arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
308                                 arr[i++].macaddr = mc_entry->macaddr;
309 
310                                 arr[i].adh = adapter->handle;
311                                 arr[i].port_id = port->logical_port_id;
312                                 arr[i].reg_type = EHEA_BCMC_MULTICAST |
313                                                   EHEA_BCMC_VLANID_ALL;
314                                 if (mc_entry->macaddr == 0)
315                                         arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
316                                 arr[i++].macaddr = mc_entry->macaddr;
317                                 num_registrations -= 2;
318                         }
319                 }
320         }
321 
322 out_update:
323         kfree(ehea_bcmc_regs.arr);
324         ehea_bcmc_regs.arr = arr;
325         ehea_bcmc_regs.num_entries = i;
326 out:
327         spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
328 }
329 
330 static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
331                                         struct rtnl_link_stats64 *stats)
332 {
333         struct ehea_port *port = netdev_priv(dev);
334         u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
335         int i;
336 
337         for (i = 0; i < port->num_def_qps; i++) {
338                 rx_packets += port->port_res[i].rx_packets;
339                 rx_bytes   += port->port_res[i].rx_bytes;
340         }
341 
342         for (i = 0; i < port->num_def_qps; i++) {
343                 tx_packets += port->port_res[i].tx_packets;
344                 tx_bytes   += port->port_res[i].tx_bytes;
345         }
346 
347         stats->tx_packets = tx_packets;
348         stats->rx_bytes = rx_bytes;
349         stats->tx_bytes = tx_bytes;
350         stats->rx_packets = rx_packets;
351 
352         stats->multicast = port->stats.multicast;
353         stats->rx_errors = port->stats.rx_errors;
354         return stats;
355 }
356 
357 static void ehea_update_stats(struct work_struct *work)
358 {
359         struct ehea_port *port =
360                 container_of(work, struct ehea_port, stats_work.work);
361         struct net_device *dev = port->netdev;
362         struct rtnl_link_stats64 *stats = &port->stats;
363         struct hcp_ehea_port_cb2 *cb2;
364         u64 hret;
365 
366         cb2 = (void *)get_zeroed_page(GFP_KERNEL);
367         if (!cb2) {
368                 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
369                 goto resched;
370         }
371 
372         hret = ehea_h_query_ehea_port(port->adapter->handle,
373                                       port->logical_port_id,
374                                       H_PORT_CB2, H_PORT_CB2_ALL, cb2);
375         if (hret != H_SUCCESS) {
376                 netdev_err(dev, "query_ehea_port failed\n");
377                 goto out_herr;
378         }
379 
380         if (netif_msg_hw(port))
381                 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
382 
383         stats->multicast = cb2->rxmcp;
384         stats->rx_errors = cb2->rxuerr;
385 
386 out_herr:
387         free_page((unsigned long)cb2);
388 resched:
389         schedule_delayed_work(&port->stats_work,
390                               round_jiffies_relative(msecs_to_jiffies(1000)));
391 }
392 
393 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
394 {
395         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
396         struct net_device *dev = pr->port->netdev;
397         int max_index_mask = pr->rq1_skba.len - 1;
398         int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
399         int adder = 0;
400         int i;
401 
402         pr->rq1_skba.os_skbs = 0;
403 
404         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
405                 if (nr_of_wqes > 0)
406                         pr->rq1_skba.index = index;
407                 pr->rq1_skba.os_skbs = fill_wqes;
408                 return;
409         }
410 
411         for (i = 0; i < fill_wqes; i++) {
412                 if (!skb_arr_rq1[index]) {
413                         skb_arr_rq1[index] = netdev_alloc_skb(dev,
414                                                               EHEA_L_PKT_SIZE);
415                         if (!skb_arr_rq1[index]) {
416                                 pr->rq1_skba.os_skbs = fill_wqes - i;
417                                 break;
418                         }
419                 }
420                 index--;
421                 index &= max_index_mask;
422                 adder++;
423         }
424 
425         if (adder == 0)
426                 return;
427 
428         /* Ring doorbell */
429         ehea_update_rq1a(pr->qp, adder);
430 }
431 
432 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
433 {
434         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
435         struct net_device *dev = pr->port->netdev;
436         int i;
437 
438         if (nr_rq1a > pr->rq1_skba.len) {
439                 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
440                 return;
441         }
442 
443         for (i = 0; i < nr_rq1a; i++) {
444                 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
445                 if (!skb_arr_rq1[i])
446                         break;
447         }
448         /* Ring doorbell */
449         ehea_update_rq1a(pr->qp, i - 1);
450 }
451 
452 static int ehea_refill_rq_def(struct ehea_port_res *pr,
453                               struct ehea_q_skb_arr *q_skba, int rq_nr,
454                               int num_wqes, int wqe_type, int packet_size)
455 {
456         struct net_device *dev = pr->port->netdev;
457         struct ehea_qp *qp = pr->qp;
458         struct sk_buff **skb_arr = q_skba->arr;
459         struct ehea_rwqe *rwqe;
460         int i, index, max_index_mask, fill_wqes;
461         int adder = 0;
462         int ret = 0;
463 
464         fill_wqes = q_skba->os_skbs + num_wqes;
465         q_skba->os_skbs = 0;
466 
467         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
468                 q_skba->os_skbs = fill_wqes;
469                 return ret;
470         }
471 
472         index = q_skba->index;
473         max_index_mask = q_skba->len - 1;
474         for (i = 0; i < fill_wqes; i++) {
475                 u64 tmp_addr;
476                 struct sk_buff *skb;
477 
478                 skb = netdev_alloc_skb_ip_align(dev, packet_size);
479                 if (!skb) {
480                         q_skba->os_skbs = fill_wqes - i;
481                         if (q_skba->os_skbs == q_skba->len - 2) {
482                                 netdev_info(pr->port->netdev,
483                                             "rq%i ran dry - no mem for skb\n",
484                                             rq_nr);
485                                 ret = -ENOMEM;
486                         }
487                         break;
488                 }
489 
490                 skb_arr[index] = skb;
491                 tmp_addr = ehea_map_vaddr(skb->data);
492                 if (tmp_addr == -1) {
493                         dev_kfree_skb(skb);
494                         q_skba->os_skbs = fill_wqes - i;
495                         ret = 0;
496                         break;
497                 }
498 
499                 rwqe = ehea_get_next_rwqe(qp, rq_nr);
500                 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
501                             | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
502                 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
503                 rwqe->sg_list[0].vaddr = tmp_addr;
504                 rwqe->sg_list[0].len = packet_size;
505                 rwqe->data_segments = 1;
506 
507                 index++;
508                 index &= max_index_mask;
509                 adder++;
510         }
511 
512         q_skba->index = index;
513         if (adder == 0)
514                 goto out;
515 
516         /* Ring doorbell */
517         iosync();
518         if (rq_nr == 2)
519                 ehea_update_rq2a(pr->qp, adder);
520         else
521                 ehea_update_rq3a(pr->qp, adder);
522 out:
523         return ret;
524 }
525 
526 
527 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
528 {
529         return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
530                                   nr_of_wqes, EHEA_RWQE2_TYPE,
531                                   EHEA_RQ2_PKT_SIZE);
532 }
533 
534 
535 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
536 {
537         return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
538                                   nr_of_wqes, EHEA_RWQE3_TYPE,
539                                   EHEA_MAX_PACKET_SIZE);
540 }
541 
542 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
543 {
544         *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
545         if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
546                 return 0;
547         if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
548             (cqe->header_length == 0))
549                 return 0;
550         return -EINVAL;
551 }
552 
553 static inline void ehea_fill_skb(struct net_device *dev,
554                                  struct sk_buff *skb, struct ehea_cqe *cqe,
555                                  struct ehea_port_res *pr)
556 {
557         int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
558 
559         skb_put(skb, length);
560         skb->protocol = eth_type_trans(skb, dev);
561 
562         /* The packet was not an IPV4 packet so a complemented checksum was
563            calculated. The value is found in the Internet Checksum field. */
564         if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
565                 skb->ip_summed = CHECKSUM_COMPLETE;
566                 skb->csum = csum_unfold(~cqe->inet_checksum_value);
567         } else
568                 skb->ip_summed = CHECKSUM_UNNECESSARY;
569 
570         skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
571 }
572 
573 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
574                                                int arr_len,
575                                                struct ehea_cqe *cqe)
576 {
577         int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
578         struct sk_buff *skb;
579         void *pref;
580         int x;
581 
582         x = skb_index + 1;
583         x &= (arr_len - 1);
584 
585         pref = skb_array[x];
586         if (pref) {
587                 prefetchw(pref);
588                 prefetchw(pref + EHEA_CACHE_LINE);
589 
590                 pref = (skb_array[x]->data);
591                 prefetch(pref);
592                 prefetch(pref + EHEA_CACHE_LINE);
593                 prefetch(pref + EHEA_CACHE_LINE * 2);
594                 prefetch(pref + EHEA_CACHE_LINE * 3);
595         }
596 
597         skb = skb_array[skb_index];
598         skb_array[skb_index] = NULL;
599         return skb;
600 }
601 
602 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
603                                                   int arr_len, int wqe_index)
604 {
605         struct sk_buff *skb;
606         void *pref;
607         int x;
608 
609         x = wqe_index + 1;
610         x &= (arr_len - 1);
611 
612         pref = skb_array[x];
613         if (pref) {
614                 prefetchw(pref);
615                 prefetchw(pref + EHEA_CACHE_LINE);
616 
617                 pref = (skb_array[x]->data);
618                 prefetchw(pref);
619                 prefetchw(pref + EHEA_CACHE_LINE);
620         }
621 
622         skb = skb_array[wqe_index];
623         skb_array[wqe_index] = NULL;
624         return skb;
625 }
626 
627 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
628                                  struct ehea_cqe *cqe, int *processed_rq2,
629                                  int *processed_rq3)
630 {
631         struct sk_buff *skb;
632 
633         if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
634                 pr->p_stats.err_tcp_cksum++;
635         if (cqe->status & EHEA_CQE_STAT_ERR_IP)
636                 pr->p_stats.err_ip_cksum++;
637         if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
638                 pr->p_stats.err_frame_crc++;
639 
640         if (rq == 2) {
641                 *processed_rq2 += 1;
642                 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
643                 dev_kfree_skb(skb);
644         } else if (rq == 3) {
645                 *processed_rq3 += 1;
646                 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
647                 dev_kfree_skb(skb);
648         }
649 
650         if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
651                 if (netif_msg_rx_err(pr->port)) {
652                         pr_err("Critical receive error for QP %d. Resetting port.\n",
653                                pr->qp->init_attr.qp_nr);
654                         ehea_dump(cqe, sizeof(*cqe), "CQE");
655                 }
656                 ehea_schedule_port_reset(pr->port);
657                 return 1;
658         }
659 
660         return 0;
661 }
662 
663 static int ehea_proc_rwqes(struct net_device *dev,
664                            struct ehea_port_res *pr,
665                            int budget)
666 {
667         struct ehea_port *port = pr->port;
668         struct ehea_qp *qp = pr->qp;
669         struct ehea_cqe *cqe;
670         struct sk_buff *skb;
671         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
672         struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
673         struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
674         int skb_arr_rq1_len = pr->rq1_skba.len;
675         int skb_arr_rq2_len = pr->rq2_skba.len;
676         int skb_arr_rq3_len = pr->rq3_skba.len;
677         int processed, processed_rq1, processed_rq2, processed_rq3;
678         u64 processed_bytes = 0;
679         int wqe_index, last_wqe_index, rq, port_reset;
680 
681         processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
682         last_wqe_index = 0;
683 
684         cqe = ehea_poll_rq1(qp, &wqe_index);
685         while ((processed < budget) && cqe) {
686                 ehea_inc_rq1(qp);
687                 processed_rq1++;
688                 processed++;
689                 if (netif_msg_rx_status(port))
690                         ehea_dump(cqe, sizeof(*cqe), "CQE");
691 
692                 last_wqe_index = wqe_index;
693                 rmb();
694                 if (!ehea_check_cqe(cqe, &rq)) {
695                         if (rq == 1) {
696                                 /* LL RQ1 */
697                                 skb = get_skb_by_index_ll(skb_arr_rq1,
698                                                           skb_arr_rq1_len,
699                                                           wqe_index);
700                                 if (unlikely(!skb)) {
701                                         netif_info(port, rx_err, dev,
702                                                   "LL rq1: skb=NULL\n");
703 
704                                         skb = netdev_alloc_skb(dev,
705                                                                EHEA_L_PKT_SIZE);
706                                         if (!skb)
707                                                 break;
708                                 }
709                                 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
710                                                  cqe->num_bytes_transfered - 4);
711                                 ehea_fill_skb(dev, skb, cqe, pr);
712                         } else if (rq == 2) {
713                                 /* RQ2 */
714                                 skb = get_skb_by_index(skb_arr_rq2,
715                                                        skb_arr_rq2_len, cqe);
716                                 if (unlikely(!skb)) {
717                                         netif_err(port, rx_err, dev,
718                                                   "rq2: skb=NULL\n");
719                                         break;
720                                 }
721                                 ehea_fill_skb(dev, skb, cqe, pr);
722                                 processed_rq2++;
723                         } else {
724                                 /* RQ3 */
725                                 skb = get_skb_by_index(skb_arr_rq3,
726                                                        skb_arr_rq3_len, cqe);
727                                 if (unlikely(!skb)) {
728                                         netif_err(port, rx_err, dev,
729                                                   "rq3: skb=NULL\n");
730                                         break;
731                                 }
732                                 ehea_fill_skb(dev, skb, cqe, pr);
733                                 processed_rq3++;
734                         }
735 
736                         processed_bytes += skb->len;
737 
738                         if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
739                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
740                                                        cqe->vlan_tag);
741 
742                         napi_gro_receive(&pr->napi, skb);
743                 } else {
744                         pr->p_stats.poll_receive_errors++;
745                         port_reset = ehea_treat_poll_error(pr, rq, cqe,
746                                                            &processed_rq2,
747                                                            &processed_rq3);
748                         if (port_reset)
749                                 break;
750                 }
751                 cqe = ehea_poll_rq1(qp, &wqe_index);
752         }
753 
754         pr->rx_packets += processed;
755         pr->rx_bytes += processed_bytes;
756 
757         ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
758         ehea_refill_rq2(pr, processed_rq2);
759         ehea_refill_rq3(pr, processed_rq3);
760 
761         return processed;
762 }
763 
764 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
765 
766 static void reset_sq_restart_flag(struct ehea_port *port)
767 {
768         int i;
769 
770         for (i = 0; i < port->num_def_qps; i++) {
771                 struct ehea_port_res *pr = &port->port_res[i];
772                 pr->sq_restart_flag = 0;
773         }
774         wake_up(&port->restart_wq);
775 }
776 
777 static void check_sqs(struct ehea_port *port)
778 {
779         struct ehea_swqe *swqe;
780         int swqe_index;
781         int i, k;
782 
783         for (i = 0; i < port->num_def_qps; i++) {
784                 struct ehea_port_res *pr = &port->port_res[i];
785                 int ret;
786                 k = 0;
787                 swqe = ehea_get_swqe(pr->qp, &swqe_index);
788                 memset(swqe, 0, SWQE_HEADER_SIZE);
789                 atomic_dec(&pr->swqe_avail);
790 
791                 swqe->tx_control |= EHEA_SWQE_PURGE;
792                 swqe->wr_id = SWQE_RESTART_CHECK;
793                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
794                 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
795                 swqe->immediate_data_length = 80;
796 
797                 ehea_post_swqe(pr->qp, swqe);
798 
799                 ret = wait_event_timeout(port->restart_wq,
800                                          pr->sq_restart_flag == 0,
801                                          msecs_to_jiffies(100));
802 
803                 if (!ret) {
804                         pr_err("HW/SW queues out of sync\n");
805                         ehea_schedule_port_reset(pr->port);
806                         return;
807                 }
808         }
809 }
810 
811 
812 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
813 {
814         struct sk_buff *skb;
815         struct ehea_cq *send_cq = pr->send_cq;
816         struct ehea_cqe *cqe;
817         int quota = my_quota;
818         int cqe_counter = 0;
819         int swqe_av = 0;
820         int index;
821         struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
822                                                 pr - &pr->port->port_res[0]);
823 
824         cqe = ehea_poll_cq(send_cq);
825         while (cqe && (quota > 0)) {
826                 ehea_inc_cq(send_cq);
827 
828                 cqe_counter++;
829                 rmb();
830 
831                 if (cqe->wr_id == SWQE_RESTART_CHECK) {
832                         pr->sq_restart_flag = 1;
833                         swqe_av++;
834                         break;
835                 }
836 
837                 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
838                         pr_err("Bad send completion status=0x%04X\n",
839                                cqe->status);
840 
841                         if (netif_msg_tx_err(pr->port))
842                                 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
843 
844                         if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
845                                 pr_err("Resetting port\n");
846                                 ehea_schedule_port_reset(pr->port);
847                                 break;
848                         }
849                 }
850 
851                 if (netif_msg_tx_done(pr->port))
852                         ehea_dump(cqe, sizeof(*cqe), "CQE");
853 
854                 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
855                            == EHEA_SWQE2_TYPE)) {
856 
857                         index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
858                         skb = pr->sq_skba.arr[index];
859                         dev_kfree_skb(skb);
860                         pr->sq_skba.arr[index] = NULL;
861                 }
862 
863                 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
864                 quota--;
865 
866                 cqe = ehea_poll_cq(send_cq);
867         }
868 
869         ehea_update_feca(send_cq, cqe_counter);
870         atomic_add(swqe_av, &pr->swqe_avail);
871 
872         if (unlikely(netif_tx_queue_stopped(txq) &&
873                      (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
874                 __netif_tx_lock(txq, smp_processor_id());
875                 if (netif_tx_queue_stopped(txq) &&
876                     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
877                         netif_tx_wake_queue(txq);
878                 __netif_tx_unlock(txq);
879         }
880 
881         wake_up(&pr->port->swqe_avail_wq);
882 
883         return cqe;
884 }
885 
886 #define EHEA_POLL_MAX_CQES 65535
887 
888 static int ehea_poll(struct napi_struct *napi, int budget)
889 {
890         struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
891                                                 napi);
892         struct net_device *dev = pr->port->netdev;
893         struct ehea_cqe *cqe;
894         struct ehea_cqe *cqe_skb = NULL;
895         int wqe_index;
896         int rx = 0;
897 
898         cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
899         rx += ehea_proc_rwqes(dev, pr, budget - rx);
900 
901         while (rx != budget) {
902                 napi_complete(napi);
903                 ehea_reset_cq_ep(pr->recv_cq);
904                 ehea_reset_cq_ep(pr->send_cq);
905                 ehea_reset_cq_n1(pr->recv_cq);
906                 ehea_reset_cq_n1(pr->send_cq);
907                 rmb();
908                 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
909                 cqe_skb = ehea_poll_cq(pr->send_cq);
910 
911                 if (!cqe && !cqe_skb)
912                         return rx;
913 
914                 if (!napi_reschedule(napi))
915                         return rx;
916 
917                 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
918                 rx += ehea_proc_rwqes(dev, pr, budget - rx);
919         }
920 
921         return rx;
922 }
923 
924 #ifdef CONFIG_NET_POLL_CONTROLLER
925 static void ehea_netpoll(struct net_device *dev)
926 {
927         struct ehea_port *port = netdev_priv(dev);
928         int i;
929 
930         for (i = 0; i < port->num_def_qps; i++)
931                 napi_schedule(&port->port_res[i].napi);
932 }
933 #endif
934 
935 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
936 {
937         struct ehea_port_res *pr = param;
938 
939         napi_schedule(&pr->napi);
940 
941         return IRQ_HANDLED;
942 }
943 
944 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
945 {
946         struct ehea_port *port = param;
947         struct ehea_eqe *eqe;
948         struct ehea_qp *qp;
949         u32 qp_token;
950         u64 resource_type, aer, aerr;
951         int reset_port = 0;
952 
953         eqe = ehea_poll_eq(port->qp_eq);
954 
955         while (eqe) {
956                 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
957                 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
958                        eqe->entry, qp_token);
959 
960                 qp = port->port_res[qp_token].qp;
961 
962                 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
963                                                 &aer, &aerr);
964 
965                 if (resource_type == EHEA_AER_RESTYPE_QP) {
966                         if ((aer & EHEA_AER_RESET_MASK) ||
967                             (aerr & EHEA_AERR_RESET_MASK))
968                                  reset_port = 1;
969                 } else
970                         reset_port = 1;   /* Reset in case of CQ or EQ error */
971 
972                 eqe = ehea_poll_eq(port->qp_eq);
973         }
974 
975         if (reset_port) {
976                 pr_err("Resetting port\n");
977                 ehea_schedule_port_reset(port);
978         }
979 
980         return IRQ_HANDLED;
981 }
982 
983 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
984                                        int logical_port)
985 {
986         int i;
987 
988         for (i = 0; i < EHEA_MAX_PORTS; i++)
989                 if (adapter->port[i])
990                         if (adapter->port[i]->logical_port_id == logical_port)
991                                 return adapter->port[i];
992         return NULL;
993 }
994 
995 int ehea_sense_port_attr(struct ehea_port *port)
996 {
997         int ret;
998         u64 hret;
999         struct hcp_ehea_port_cb0 *cb0;
1000 
1001         /* may be called via ehea_neq_tasklet() */
1002         cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1003         if (!cb0) {
1004                 pr_err("no mem for cb0\n");
1005                 ret = -ENOMEM;
1006                 goto out;
1007         }
1008 
1009         hret = ehea_h_query_ehea_port(port->adapter->handle,
1010                                       port->logical_port_id, H_PORT_CB0,
1011                                       EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1012                                       cb0);
1013         if (hret != H_SUCCESS) {
1014                 ret = -EIO;
1015                 goto out_free;
1016         }
1017 
1018         /* MAC address */
1019         port->mac_addr = cb0->port_mac_addr << 16;
1020 
1021         if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1022                 ret = -EADDRNOTAVAIL;
1023                 goto out_free;
1024         }
1025 
1026         /* Port speed */
1027         switch (cb0->port_speed) {
1028         case H_SPEED_10M_H:
1029                 port->port_speed = EHEA_SPEED_10M;
1030                 port->full_duplex = 0;
1031                 break;
1032         case H_SPEED_10M_F:
1033                 port->port_speed = EHEA_SPEED_10M;
1034                 port->full_duplex = 1;
1035                 break;
1036         case H_SPEED_100M_H:
1037                 port->port_speed = EHEA_SPEED_100M;
1038                 port->full_duplex = 0;
1039                 break;
1040         case H_SPEED_100M_F:
1041                 port->port_speed = EHEA_SPEED_100M;
1042                 port->full_duplex = 1;
1043                 break;
1044         case H_SPEED_1G_F:
1045                 port->port_speed = EHEA_SPEED_1G;
1046                 port->full_duplex = 1;
1047                 break;
1048         case H_SPEED_10G_F:
1049                 port->port_speed = EHEA_SPEED_10G;
1050                 port->full_duplex = 1;
1051                 break;
1052         default:
1053                 port->port_speed = 0;
1054                 port->full_duplex = 0;
1055                 break;
1056         }
1057 
1058         port->autoneg = 1;
1059         port->num_mcs = cb0->num_default_qps;
1060 
1061         /* Number of default QPs */
1062         if (use_mcs)
1063                 port->num_def_qps = cb0->num_default_qps;
1064         else
1065                 port->num_def_qps = 1;
1066 
1067         if (!port->num_def_qps) {
1068                 ret = -EINVAL;
1069                 goto out_free;
1070         }
1071 
1072         ret = 0;
1073 out_free:
1074         if (ret || netif_msg_probe(port))
1075                 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1076         free_page((unsigned long)cb0);
1077 out:
1078         return ret;
1079 }
1080 
1081 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1082 {
1083         struct hcp_ehea_port_cb4 *cb4;
1084         u64 hret;
1085         int ret = 0;
1086 
1087         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1088         if (!cb4) {
1089                 pr_err("no mem for cb4\n");
1090                 ret = -ENOMEM;
1091                 goto out;
1092         }
1093 
1094         cb4->port_speed = port_speed;
1095 
1096         netif_carrier_off(port->netdev);
1097 
1098         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1099                                        port->logical_port_id,
1100                                        H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1101         if (hret == H_SUCCESS) {
1102                 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1103 
1104                 hret = ehea_h_query_ehea_port(port->adapter->handle,
1105                                               port->logical_port_id,
1106                                               H_PORT_CB4, H_PORT_CB4_SPEED,
1107                                               cb4);
1108                 if (hret == H_SUCCESS) {
1109                         switch (cb4->port_speed) {
1110                         case H_SPEED_10M_H:
1111                                 port->port_speed = EHEA_SPEED_10M;
1112                                 port->full_duplex = 0;
1113                                 break;
1114                         case H_SPEED_10M_F:
1115                                 port->port_speed = EHEA_SPEED_10M;
1116                                 port->full_duplex = 1;
1117                                 break;
1118                         case H_SPEED_100M_H:
1119                                 port->port_speed = EHEA_SPEED_100M;
1120                                 port->full_duplex = 0;
1121                                 break;
1122                         case H_SPEED_100M_F:
1123                                 port->port_speed = EHEA_SPEED_100M;
1124                                 port->full_duplex = 1;
1125                                 break;
1126                         case H_SPEED_1G_F:
1127                                 port->port_speed = EHEA_SPEED_1G;
1128                                 port->full_duplex = 1;
1129                                 break;
1130                         case H_SPEED_10G_F:
1131                                 port->port_speed = EHEA_SPEED_10G;
1132                                 port->full_duplex = 1;
1133                                 break;
1134                         default:
1135                                 port->port_speed = 0;
1136                                 port->full_duplex = 0;
1137                                 break;
1138                         }
1139                 } else {
1140                         pr_err("Failed sensing port speed\n");
1141                         ret = -EIO;
1142                 }
1143         } else {
1144                 if (hret == H_AUTHORITY) {
1145                         pr_info("Hypervisor denied setting port speed\n");
1146                         ret = -EPERM;
1147                 } else {
1148                         ret = -EIO;
1149                         pr_err("Failed setting port speed\n");
1150                 }
1151         }
1152         if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1153                 netif_carrier_on(port->netdev);
1154 
1155         free_page((unsigned long)cb4);
1156 out:
1157         return ret;
1158 }
1159 
1160 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1161 {
1162         int ret;
1163         u8 ec;
1164         u8 portnum;
1165         struct ehea_port *port;
1166         struct net_device *dev;
1167 
1168         ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1169         portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1170         port = ehea_get_port(adapter, portnum);
1171         dev = port->netdev;
1172 
1173         switch (ec) {
1174         case EHEA_EC_PORTSTATE_CHG:     /* port state change */
1175 
1176                 if (!port) {
1177                         netdev_err(dev, "unknown portnum %x\n", portnum);
1178                         break;
1179                 }
1180 
1181                 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1182                         if (!netif_carrier_ok(dev)) {
1183                                 ret = ehea_sense_port_attr(port);
1184                                 if (ret) {
1185                                         netdev_err(dev, "failed resensing port attributes\n");
1186                                         break;
1187                                 }
1188 
1189                                 netif_info(port, link, dev,
1190                                            "Logical port up: %dMbps %s Duplex\n",
1191                                            port->port_speed,
1192                                            port->full_duplex == 1 ?
1193                                            "Full" : "Half");
1194 
1195                                 netif_carrier_on(dev);
1196                                 netif_wake_queue(dev);
1197                         }
1198                 } else
1199                         if (netif_carrier_ok(dev)) {
1200                                 netif_info(port, link, dev,
1201                                            "Logical port down\n");
1202                                 netif_carrier_off(dev);
1203                                 netif_tx_disable(dev);
1204                         }
1205 
1206                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1207                         port->phy_link = EHEA_PHY_LINK_UP;
1208                         netif_info(port, link, dev,
1209                                    "Physical port up\n");
1210                         if (prop_carrier_state)
1211                                 netif_carrier_on(dev);
1212                 } else {
1213                         port->phy_link = EHEA_PHY_LINK_DOWN;
1214                         netif_info(port, link, dev,
1215                                    "Physical port down\n");
1216                         if (prop_carrier_state)
1217                                 netif_carrier_off(dev);
1218                 }
1219 
1220                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1221                         netdev_info(dev,
1222                                     "External switch port is primary port\n");
1223                 else
1224                         netdev_info(dev,
1225                                     "External switch port is backup port\n");
1226 
1227                 break;
1228         case EHEA_EC_ADAPTER_MALFUNC:
1229                 netdev_err(dev, "Adapter malfunction\n");
1230                 break;
1231         case EHEA_EC_PORT_MALFUNC:
1232                 netdev_info(dev, "Port malfunction\n");
1233                 netif_carrier_off(dev);
1234                 netif_tx_disable(dev);
1235                 break;
1236         default:
1237                 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1238                 break;
1239         }
1240 }
1241 
1242 static void ehea_neq_tasklet(unsigned long data)
1243 {
1244         struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1245         struct ehea_eqe *eqe;
1246         u64 event_mask;
1247 
1248         eqe = ehea_poll_eq(adapter->neq);
1249         pr_debug("eqe=%p\n", eqe);
1250 
1251         while (eqe) {
1252                 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1253                 ehea_parse_eqe(adapter, eqe->entry);
1254                 eqe = ehea_poll_eq(adapter->neq);
1255                 pr_debug("next eqe=%p\n", eqe);
1256         }
1257 
1258         event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1259                    | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1260                    | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1261 
1262         ehea_h_reset_events(adapter->handle,
1263                             adapter->neq->fw_handle, event_mask);
1264 }
1265 
1266 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1267 {
1268         struct ehea_adapter *adapter = param;
1269         tasklet_hi_schedule(&adapter->neq_tasklet);
1270         return IRQ_HANDLED;
1271 }
1272 
1273 
1274 static int ehea_fill_port_res(struct ehea_port_res *pr)
1275 {
1276         int ret;
1277         struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1278 
1279         ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1280 
1281         ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1282 
1283         ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1284 
1285         return ret;
1286 }
1287 
1288 static int ehea_reg_interrupts(struct net_device *dev)
1289 {
1290         struct ehea_port *port = netdev_priv(dev);
1291         struct ehea_port_res *pr;
1292         int i, ret;
1293 
1294 
1295         snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1296                  dev->name);
1297 
1298         ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1299                                   ehea_qp_aff_irq_handler,
1300                                   0, port->int_aff_name, port);
1301         if (ret) {
1302                 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1303                            port->qp_eq->attr.ist1);
1304                 goto out_free_qpeq;
1305         }
1306 
1307         netif_info(port, ifup, dev,
1308                    "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1309                    port->qp_eq->attr.ist1);
1310 
1311 
1312         for (i = 0; i < port->num_def_qps; i++) {
1313                 pr = &port->port_res[i];
1314                 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1315                          "%s-queue%d", dev->name, i);
1316                 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1317                                           ehea_recv_irq_handler,
1318                                           0, pr->int_send_name, pr);
1319                 if (ret) {
1320                         netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1321                                    i, pr->eq->attr.ist1);
1322                         goto out_free_req;
1323                 }
1324                 netif_info(port, ifup, dev,
1325                            "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1326                            pr->eq->attr.ist1, i);
1327         }
1328 out:
1329         return ret;
1330 
1331 
1332 out_free_req:
1333         while (--i >= 0) {
1334                 u32 ist = port->port_res[i].eq->attr.ist1;
1335                 ibmebus_free_irq(ist, &port->port_res[i]);
1336         }
1337 
1338 out_free_qpeq:
1339         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1340         i = port->num_def_qps;
1341 
1342         goto out;
1343 
1344 }
1345 
1346 static void ehea_free_interrupts(struct net_device *dev)
1347 {
1348         struct ehea_port *port = netdev_priv(dev);
1349         struct ehea_port_res *pr;
1350         int i;
1351 
1352         /* send */
1353 
1354         for (i = 0; i < port->num_def_qps; i++) {
1355                 pr = &port->port_res[i];
1356                 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1357                 netif_info(port, intr, dev,
1358                            "free send irq for res %d with handle 0x%X\n",
1359                            i, pr->eq->attr.ist1);
1360         }
1361 
1362         /* associated events */
1363         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1364         netif_info(port, intr, dev,
1365                    "associated event interrupt for handle 0x%X freed\n",
1366                    port->qp_eq->attr.ist1);
1367 }
1368 
1369 static int ehea_configure_port(struct ehea_port *port)
1370 {
1371         int ret, i;
1372         u64 hret, mask;
1373         struct hcp_ehea_port_cb0 *cb0;
1374 
1375         ret = -ENOMEM;
1376         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1377         if (!cb0)
1378                 goto out;
1379 
1380         cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1381                      | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1382                      | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1383                      | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1384                      | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1385                                       PXLY_RC_VLAN_FILTER)
1386                      | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1387 
1388         for (i = 0; i < port->num_mcs; i++)
1389                 if (use_mcs)
1390                         cb0->default_qpn_arr[i] =
1391                                 port->port_res[i].qp->init_attr.qp_nr;
1392                 else
1393                         cb0->default_qpn_arr[i] =
1394                                 port->port_res[0].qp->init_attr.qp_nr;
1395 
1396         if (netif_msg_ifup(port))
1397                 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1398 
1399         mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1400              | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1401 
1402         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1403                                        port->logical_port_id,
1404                                        H_PORT_CB0, mask, cb0);
1405         ret = -EIO;
1406         if (hret != H_SUCCESS)
1407                 goto out_free;
1408 
1409         ret = 0;
1410 
1411 out_free:
1412         free_page((unsigned long)cb0);
1413 out:
1414         return ret;
1415 }
1416 
1417 static int ehea_gen_smrs(struct ehea_port_res *pr)
1418 {
1419         int ret;
1420         struct ehea_adapter *adapter = pr->port->adapter;
1421 
1422         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1423         if (ret)
1424                 goto out;
1425 
1426         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1427         if (ret)
1428                 goto out_free;
1429 
1430         return 0;
1431 
1432 out_free:
1433         ehea_rem_mr(&pr->send_mr);
1434 out:
1435         pr_err("Generating SMRS failed\n");
1436         return -EIO;
1437 }
1438 
1439 static int ehea_rem_smrs(struct ehea_port_res *pr)
1440 {
1441         if ((ehea_rem_mr(&pr->send_mr)) ||
1442             (ehea_rem_mr(&pr->recv_mr)))
1443                 return -EIO;
1444         else
1445                 return 0;
1446 }
1447 
1448 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1449 {
1450         int arr_size = sizeof(void *) * max_q_entries;
1451 
1452         q_skba->arr = vzalloc(arr_size);
1453         if (!q_skba->arr)
1454                 return -ENOMEM;
1455 
1456         q_skba->len = max_q_entries;
1457         q_skba->index = 0;
1458         q_skba->os_skbs = 0;
1459 
1460         return 0;
1461 }
1462 
1463 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1464                               struct port_res_cfg *pr_cfg, int queue_token)
1465 {
1466         struct ehea_adapter *adapter = port->adapter;
1467         enum ehea_eq_type eq_type = EHEA_EQ;
1468         struct ehea_qp_init_attr *init_attr = NULL;
1469         int ret = -EIO;
1470         u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1471 
1472         tx_bytes = pr->tx_bytes;
1473         tx_packets = pr->tx_packets;
1474         rx_bytes = pr->rx_bytes;
1475         rx_packets = pr->rx_packets;
1476 
1477         memset(pr, 0, sizeof(struct ehea_port_res));
1478 
1479         pr->tx_bytes = rx_bytes;
1480         pr->tx_packets = tx_packets;
1481         pr->rx_bytes = rx_bytes;
1482         pr->rx_packets = rx_packets;
1483 
1484         pr->port = port;
1485 
1486         pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1487         if (!pr->eq) {
1488                 pr_err("create_eq failed (eq)\n");
1489                 goto out_free;
1490         }
1491 
1492         pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1493                                      pr->eq->fw_handle,
1494                                      port->logical_port_id);
1495         if (!pr->recv_cq) {
1496                 pr_err("create_cq failed (cq_recv)\n");
1497                 goto out_free;
1498         }
1499 
1500         pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1501                                      pr->eq->fw_handle,
1502                                      port->logical_port_id);
1503         if (!pr->send_cq) {
1504                 pr_err("create_cq failed (cq_send)\n");
1505                 goto out_free;
1506         }
1507 
1508         if (netif_msg_ifup(port))
1509                 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1510                         pr->send_cq->attr.act_nr_of_cqes,
1511                         pr->recv_cq->attr.act_nr_of_cqes);
1512 
1513         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1514         if (!init_attr) {
1515                 ret = -ENOMEM;
1516                 pr_err("no mem for ehea_qp_init_attr\n");
1517                 goto out_free;
1518         }
1519 
1520         init_attr->low_lat_rq1 = 1;
1521         init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1522         init_attr->rq_count = 3;
1523         init_attr->qp_token = queue_token;
1524         init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1525         init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1526         init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1527         init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1528         init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1529         init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1530         init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1531         init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1532         init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1533         init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1534         init_attr->port_nr = port->logical_port_id;
1535         init_attr->send_cq_handle = pr->send_cq->fw_handle;
1536         init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1537         init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1538 
1539         pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1540         if (!pr->qp) {
1541                 pr_err("create_qp failed\n");
1542                 ret = -EIO;
1543                 goto out_free;
1544         }
1545 
1546         if (netif_msg_ifup(port))
1547                 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1548                         init_attr->qp_nr,
1549                         init_attr->act_nr_send_wqes,
1550                         init_attr->act_nr_rwqes_rq1,
1551                         init_attr->act_nr_rwqes_rq2,
1552                         init_attr->act_nr_rwqes_rq3);
1553 
1554         pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1555 
1556         ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1557         ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1558         ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1559         ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1560         if (ret)
1561                 goto out_free;
1562 
1563         pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1564         if (ehea_gen_smrs(pr) != 0) {
1565                 ret = -EIO;
1566                 goto out_free;
1567         }
1568 
1569         atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1570 
1571         kfree(init_attr);
1572 
1573         netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1574 
1575         ret = 0;
1576         goto out;
1577 
1578 out_free:
1579         kfree(init_attr);
1580         vfree(pr->sq_skba.arr);
1581         vfree(pr->rq1_skba.arr);
1582         vfree(pr->rq2_skba.arr);
1583         vfree(pr->rq3_skba.arr);
1584         ehea_destroy_qp(pr->qp);
1585         ehea_destroy_cq(pr->send_cq);
1586         ehea_destroy_cq(pr->recv_cq);
1587         ehea_destroy_eq(pr->eq);
1588 out:
1589         return ret;
1590 }
1591 
1592 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1593 {
1594         int ret, i;
1595 
1596         if (pr->qp)
1597                 netif_napi_del(&pr->napi);
1598 
1599         ret = ehea_destroy_qp(pr->qp);
1600 
1601         if (!ret) {
1602                 ehea_destroy_cq(pr->send_cq);
1603                 ehea_destroy_cq(pr->recv_cq);
1604                 ehea_destroy_eq(pr->eq);
1605 
1606                 for (i = 0; i < pr->rq1_skba.len; i++)
1607                         if (pr->rq1_skba.arr[i])
1608                                 dev_kfree_skb(pr->rq1_skba.arr[i]);
1609 
1610                 for (i = 0; i < pr->rq2_skba.len; i++)
1611                         if (pr->rq2_skba.arr[i])
1612                                 dev_kfree_skb(pr->rq2_skba.arr[i]);
1613 
1614                 for (i = 0; i < pr->rq3_skba.len; i++)
1615                         if (pr->rq3_skba.arr[i])
1616                                 dev_kfree_skb(pr->rq3_skba.arr[i]);
1617 
1618                 for (i = 0; i < pr->sq_skba.len; i++)
1619                         if (pr->sq_skba.arr[i])
1620                                 dev_kfree_skb(pr->sq_skba.arr[i]);
1621 
1622                 vfree(pr->rq1_skba.arr);
1623                 vfree(pr->rq2_skba.arr);
1624                 vfree(pr->rq3_skba.arr);
1625                 vfree(pr->sq_skba.arr);
1626                 ret = ehea_rem_smrs(pr);
1627         }
1628         return ret;
1629 }
1630 
1631 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1632                                   u32 lkey)
1633 {
1634         int skb_data_size = skb_headlen(skb);
1635         u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1636         struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1637         unsigned int immediate_len = SWQE2_MAX_IMM;
1638 
1639         swqe->descriptors = 0;
1640 
1641         if (skb_is_gso(skb)) {
1642                 swqe->tx_control |= EHEA_SWQE_TSO;
1643                 swqe->mss = skb_shinfo(skb)->gso_size;
1644                 /*
1645                  * For TSO packets we only copy the headers into the
1646                  * immediate area.
1647                  */
1648                 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1649         }
1650 
1651         if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1652                 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1653                 swqe->immediate_data_length = immediate_len;
1654 
1655                 if (skb_data_size > immediate_len) {
1656                         sg1entry->l_key = lkey;
1657                         sg1entry->len = skb_data_size - immediate_len;
1658                         sg1entry->vaddr =
1659                                 ehea_map_vaddr(skb->data + immediate_len);
1660                         swqe->descriptors++;
1661                 }
1662         } else {
1663                 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1664                 swqe->immediate_data_length = skb_data_size;
1665         }
1666 }
1667 
1668 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1669                                     struct ehea_swqe *swqe, u32 lkey)
1670 {
1671         struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1672         skb_frag_t *frag;
1673         int nfrags, sg1entry_contains_frag_data, i;
1674 
1675         nfrags = skb_shinfo(skb)->nr_frags;
1676         sg1entry = &swqe->u.immdata_desc.sg_entry;
1677         sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1678         sg1entry_contains_frag_data = 0;
1679 
1680         write_swqe2_immediate(skb, swqe, lkey);
1681 
1682         /* write descriptors */
1683         if (nfrags > 0) {
1684                 if (swqe->descriptors == 0) {
1685                         /* sg1entry not yet used */
1686                         frag = &skb_shinfo(skb)->frags[0];
1687 
1688                         /* copy sg1entry data */
1689                         sg1entry->l_key = lkey;
1690                         sg1entry->len = skb_frag_size(frag);
1691                         sg1entry->vaddr =
1692                                 ehea_map_vaddr(skb_frag_address(frag));
1693                         swqe->descriptors++;
1694                         sg1entry_contains_frag_data = 1;
1695                 }
1696 
1697                 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1698 
1699                         frag = &skb_shinfo(skb)->frags[i];
1700                         sgentry = &sg_list[i - sg1entry_contains_frag_data];
1701 
1702                         sgentry->l_key = lkey;
1703                         sgentry->len = skb_frag_size(frag);
1704                         sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1705                         swqe->descriptors++;
1706                 }
1707         }
1708 }
1709 
1710 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1711 {
1712         int ret = 0;
1713         u64 hret;
1714         u8 reg_type;
1715 
1716         /* De/Register untagged packets */
1717         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1718         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1719                                      port->logical_port_id,
1720                                      reg_type, port->mac_addr, 0, hcallid);
1721         if (hret != H_SUCCESS) {
1722                 pr_err("%sregistering bc address failed (tagged)\n",
1723                        hcallid == H_REG_BCMC ? "" : "de");
1724                 ret = -EIO;
1725                 goto out_herr;
1726         }
1727 
1728         /* De/Register VLAN packets */
1729         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1730         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1731                                      port->logical_port_id,
1732                                      reg_type, port->mac_addr, 0, hcallid);
1733         if (hret != H_SUCCESS) {
1734                 pr_err("%sregistering bc address failed (vlan)\n",
1735                        hcallid == H_REG_BCMC ? "" : "de");
1736                 ret = -EIO;
1737         }
1738 out_herr:
1739         return ret;
1740 }
1741 
1742 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1743 {
1744         struct ehea_port *port = netdev_priv(dev);
1745         struct sockaddr *mac_addr = sa;
1746         struct hcp_ehea_port_cb0 *cb0;
1747         int ret;
1748         u64 hret;
1749 
1750         if (!is_valid_ether_addr(mac_addr->sa_data)) {
1751                 ret = -EADDRNOTAVAIL;
1752                 goto out;
1753         }
1754 
1755         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1756         if (!cb0) {
1757                 pr_err("no mem for cb0\n");
1758                 ret = -ENOMEM;
1759                 goto out;
1760         }
1761 
1762         memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1763 
1764         cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1765 
1766         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1767                                        port->logical_port_id, H_PORT_CB0,
1768                                        EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1769         if (hret != H_SUCCESS) {
1770                 ret = -EIO;
1771                 goto out_free;
1772         }
1773 
1774         memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1775 
1776         /* Deregister old MAC in pHYP */
1777         if (port->state == EHEA_PORT_UP) {
1778                 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1779                 if (ret)
1780                         goto out_upregs;
1781         }
1782 
1783         port->mac_addr = cb0->port_mac_addr << 16;
1784 
1785         /* Register new MAC in pHYP */
1786         if (port->state == EHEA_PORT_UP) {
1787                 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1788                 if (ret)
1789                         goto out_upregs;
1790         }
1791 
1792         ret = 0;
1793 
1794 out_upregs:
1795         ehea_update_bcmc_registrations();
1796 out_free:
1797         free_page((unsigned long)cb0);
1798 out:
1799         return ret;
1800 }
1801 
1802 static void ehea_promiscuous_error(u64 hret, int enable)
1803 {
1804         if (hret == H_AUTHORITY)
1805                 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1806                         enable == 1 ? "en" : "dis");
1807         else
1808                 pr_err("failed %sabling promiscuous mode\n",
1809                        enable == 1 ? "en" : "dis");
1810 }
1811 
1812 static void ehea_promiscuous(struct net_device *dev, int enable)
1813 {
1814         struct ehea_port *port = netdev_priv(dev);
1815         struct hcp_ehea_port_cb7 *cb7;
1816         u64 hret;
1817 
1818         if (enable == port->promisc)
1819                 return;
1820 
1821         cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1822         if (!cb7) {
1823                 pr_err("no mem for cb7\n");
1824                 goto out;
1825         }
1826 
1827         /* Modify Pxs_DUCQPN in CB7 */
1828         cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1829 
1830         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1831                                        port->logical_port_id,
1832                                        H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1833         if (hret) {
1834                 ehea_promiscuous_error(hret, enable);
1835                 goto out;
1836         }
1837 
1838         port->promisc = enable;
1839 out:
1840         free_page((unsigned long)cb7);
1841 }
1842 
1843 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1844                                      u32 hcallid)
1845 {
1846         u64 hret;
1847         u8 reg_type;
1848 
1849         reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1850         if (mc_mac_addr == 0)
1851                 reg_type |= EHEA_BCMC_SCOPE_ALL;
1852 
1853         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1854                                      port->logical_port_id,
1855                                      reg_type, mc_mac_addr, 0, hcallid);
1856         if (hret)
1857                 goto out;
1858 
1859         reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1860         if (mc_mac_addr == 0)
1861                 reg_type |= EHEA_BCMC_SCOPE_ALL;
1862 
1863         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1864                                      port->logical_port_id,
1865                                      reg_type, mc_mac_addr, 0, hcallid);
1866 out:
1867         return hret;
1868 }
1869 
1870 static int ehea_drop_multicast_list(struct net_device *dev)
1871 {
1872         struct ehea_port *port = netdev_priv(dev);
1873         struct ehea_mc_list *mc_entry = port->mc_list;
1874         struct list_head *pos;
1875         struct list_head *temp;
1876         int ret = 0;
1877         u64 hret;
1878 
1879         list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1880                 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1881 
1882                 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1883                                                  H_DEREG_BCMC);
1884                 if (hret) {
1885                         pr_err("failed deregistering mcast MAC\n");
1886                         ret = -EIO;
1887                 }
1888 
1889                 list_del(pos);
1890                 kfree(mc_entry);
1891         }
1892         return ret;
1893 }
1894 
1895 static void ehea_allmulti(struct net_device *dev, int enable)
1896 {
1897         struct ehea_port *port = netdev_priv(dev);
1898         u64 hret;
1899 
1900         if (!port->allmulti) {
1901                 if (enable) {
1902                         /* Enable ALLMULTI */
1903                         ehea_drop_multicast_list(dev);
1904                         hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1905                         if (!hret)
1906                                 port->allmulti = 1;
1907                         else
1908                                 netdev_err(dev,
1909                                            "failed enabling IFF_ALLMULTI\n");
1910                 }
1911         } else {
1912                 if (!enable) {
1913                         /* Disable ALLMULTI */
1914                         hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1915                         if (!hret)
1916                                 port->allmulti = 0;
1917                         else
1918                                 netdev_err(dev,
1919                                            "failed disabling IFF_ALLMULTI\n");
1920                 }
1921         }
1922 }
1923 
1924 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1925 {
1926         struct ehea_mc_list *ehea_mcl_entry;
1927         u64 hret;
1928 
1929         ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1930         if (!ehea_mcl_entry)
1931                 return;
1932 
1933         INIT_LIST_HEAD(&ehea_mcl_entry->list);
1934 
1935         memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1936 
1937         hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1938                                          H_REG_BCMC);
1939         if (!hret)
1940                 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1941         else {
1942                 pr_err("failed registering mcast MAC\n");
1943                 kfree(ehea_mcl_entry);
1944         }
1945 }
1946 
1947 static void ehea_set_multicast_list(struct net_device *dev)
1948 {
1949         struct ehea_port *port = netdev_priv(dev);
1950         struct netdev_hw_addr *ha;
1951         int ret;
1952 
1953         ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1954 
1955         if (dev->flags & IFF_ALLMULTI) {
1956                 ehea_allmulti(dev, 1);
1957                 goto out;
1958         }
1959         ehea_allmulti(dev, 0);
1960 
1961         if (!netdev_mc_empty(dev)) {
1962                 ret = ehea_drop_multicast_list(dev);
1963                 if (ret) {
1964                         /* Dropping the current multicast list failed.
1965                          * Enabling ALL_MULTI is the best we can do.
1966                          */
1967                         ehea_allmulti(dev, 1);
1968                 }
1969 
1970                 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1971                         pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1972                                 port->adapter->max_mc_mac);
1973                         goto out;
1974                 }
1975 
1976                 netdev_for_each_mc_addr(ha, dev)
1977                         ehea_add_multicast_entry(port, ha->addr);
1978 
1979         }
1980 out:
1981         ehea_update_bcmc_registrations();
1982 }
1983 
1984 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1985 {
1986         if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1987                 return -EINVAL;
1988         dev->mtu = new_mtu;
1989         return 0;
1990 }
1991 
1992 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1993 {
1994         swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1995 
1996         if (skb->protocol != htons(ETH_P_IP))
1997                 return;
1998 
1999         if (skb->ip_summed == CHECKSUM_PARTIAL)
2000                 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
2001 
2002         swqe->ip_start = skb_network_offset(skb);
2003         swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
2004 
2005         switch (ip_hdr(skb)->protocol) {
2006         case IPPROTO_UDP:
2007                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2008                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2009 
2010                 swqe->tcp_offset = swqe->ip_end + 1 +
2011                                    offsetof(struct udphdr, check);
2012                 break;
2013 
2014         case IPPROTO_TCP:
2015                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2016                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2017 
2018                 swqe->tcp_offset = swqe->ip_end + 1 +
2019                                    offsetof(struct tcphdr, check);
2020                 break;
2021         }
2022 }
2023 
2024 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2025                        struct ehea_swqe *swqe, u32 lkey)
2026 {
2027         swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2028 
2029         xmit_common(skb, swqe);
2030 
2031         write_swqe2_data(skb, dev, swqe, lkey);
2032 }
2033 
2034 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2035                        struct ehea_swqe *swqe)
2036 {
2037         u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2038 
2039         xmit_common(skb, swqe);
2040 
2041         if (!skb->data_len)
2042                 skb_copy_from_linear_data(skb, imm_data, skb->len);
2043         else
2044                 skb_copy_bits(skb, 0, imm_data, skb->len);
2045 
2046         swqe->immediate_data_length = skb->len;
2047         dev_kfree_skb(skb);
2048 }
2049 
2050 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2051 {
2052         struct ehea_port *port = netdev_priv(dev);
2053         struct ehea_swqe *swqe;
2054         u32 lkey;
2055         int swqe_index;
2056         struct ehea_port_res *pr;
2057         struct netdev_queue *txq;
2058 
2059         pr = &port->port_res[skb_get_queue_mapping(skb)];
2060         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2061 
2062         swqe = ehea_get_swqe(pr->qp, &swqe_index);
2063         memset(swqe, 0, SWQE_HEADER_SIZE);
2064         atomic_dec(&pr->swqe_avail);
2065 
2066         if (vlan_tx_tag_present(skb)) {
2067                 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2068                 swqe->vlan_tag = vlan_tx_tag_get(skb);
2069         }
2070 
2071         pr->tx_packets++;
2072         pr->tx_bytes += skb->len;
2073 
2074         if (skb->len <= SWQE3_MAX_IMM) {
2075                 u32 sig_iv = port->sig_comp_iv;
2076                 u32 swqe_num = pr->swqe_id_counter;
2077                 ehea_xmit3(skb, dev, swqe);
2078                 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2079                         | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2080                 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2081                         swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2082                                                       sig_iv);
2083                         swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2084                         pr->swqe_ll_count = 0;
2085                 } else
2086                         pr->swqe_ll_count += 1;
2087         } else {
2088                 swqe->wr_id =
2089                         EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2090                       | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2091                       | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2092                       | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2093                 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2094 
2095                 pr->sq_skba.index++;
2096                 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2097 
2098                 lkey = pr->send_mr.lkey;
2099                 ehea_xmit2(skb, dev, swqe, lkey);
2100                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2101         }
2102         pr->swqe_id_counter += 1;
2103 
2104         netif_info(port, tx_queued, dev,
2105                    "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2106         if (netif_msg_tx_queued(port))
2107                 ehea_dump(swqe, 512, "swqe");
2108 
2109         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2110                 netif_tx_stop_queue(txq);
2111                 swqe->tx_control |= EHEA_SWQE_PURGE;
2112         }
2113 
2114         ehea_post_swqe(pr->qp, swqe);
2115 
2116         if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2117                 pr->p_stats.queue_stopped++;
2118                 netif_tx_stop_queue(txq);
2119         }
2120 
2121         return NETDEV_TX_OK;
2122 }
2123 
2124 static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2125 {
2126         struct ehea_port *port = netdev_priv(dev);
2127         struct ehea_adapter *adapter = port->adapter;
2128         struct hcp_ehea_port_cb1 *cb1;
2129         int index;
2130         u64 hret;
2131         int err = 0;
2132 
2133         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2134         if (!cb1) {
2135                 pr_err("no mem for cb1\n");
2136                 err = -ENOMEM;
2137                 goto out;
2138         }
2139 
2140         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2141                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2142         if (hret != H_SUCCESS) {
2143                 pr_err("query_ehea_port failed\n");
2144                 err = -EINVAL;
2145                 goto out;
2146         }
2147 
2148         index = (vid / 64);
2149         cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2150 
2151         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2152                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2153         if (hret != H_SUCCESS) {
2154                 pr_err("modify_ehea_port failed\n");
2155                 err = -EINVAL;
2156         }
2157 out:
2158         free_page((unsigned long)cb1);
2159         return err;
2160 }
2161 
2162 static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2163 {
2164         struct ehea_port *port = netdev_priv(dev);
2165         struct ehea_adapter *adapter = port->adapter;
2166         struct hcp_ehea_port_cb1 *cb1;
2167         int index;
2168         u64 hret;
2169         int err = 0;
2170 
2171         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2172         if (!cb1) {
2173                 pr_err("no mem for cb1\n");
2174                 err = -ENOMEM;
2175                 goto out;
2176         }
2177 
2178         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2179                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2180         if (hret != H_SUCCESS) {
2181                 pr_err("query_ehea_port failed\n");
2182                 err = -EINVAL;
2183                 goto out;
2184         }
2185 
2186         index = (vid / 64);
2187         cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2188 
2189         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2190                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2191         if (hret != H_SUCCESS) {
2192                 pr_err("modify_ehea_port failed\n");
2193                 err = -EINVAL;
2194         }
2195 out:
2196         free_page((unsigned long)cb1);
2197         return err;
2198 }
2199 
2200 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2201 {
2202         int ret = -EIO;
2203         u64 hret;
2204         u16 dummy16 = 0;
2205         u64 dummy64 = 0;
2206         struct hcp_modify_qp_cb0 *cb0;
2207 
2208         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2209         if (!cb0) {
2210                 ret = -ENOMEM;
2211                 goto out;
2212         }
2213 
2214         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2215                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2216         if (hret != H_SUCCESS) {
2217                 pr_err("query_ehea_qp failed (1)\n");
2218                 goto out;
2219         }
2220 
2221         cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2222         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2223                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2224                                      &dummy64, &dummy64, &dummy16, &dummy16);
2225         if (hret != H_SUCCESS) {
2226                 pr_err("modify_ehea_qp failed (1)\n");
2227                 goto out;
2228         }
2229 
2230         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2231                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2232         if (hret != H_SUCCESS) {
2233                 pr_err("query_ehea_qp failed (2)\n");
2234                 goto out;
2235         }
2236 
2237         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2238         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2239                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2240                                      &dummy64, &dummy64, &dummy16, &dummy16);
2241         if (hret != H_SUCCESS) {
2242                 pr_err("modify_ehea_qp failed (2)\n");
2243                 goto out;
2244         }
2245 
2246         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2247                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2248         if (hret != H_SUCCESS) {
2249                 pr_err("query_ehea_qp failed (3)\n");
2250                 goto out;
2251         }
2252 
2253         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2254         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2255                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2256                                      &dummy64, &dummy64, &dummy16, &dummy16);
2257         if (hret != H_SUCCESS) {
2258                 pr_err("modify_ehea_qp failed (3)\n");
2259                 goto out;
2260         }
2261 
2262         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2263                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2264         if (hret != H_SUCCESS) {
2265                 pr_err("query_ehea_qp failed (4)\n");
2266                 goto out;
2267         }
2268 
2269         ret = 0;
2270 out:
2271         free_page((unsigned long)cb0);
2272         return ret;
2273 }
2274 
2275 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2276 {
2277         int ret, i;
2278         struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2279         enum ehea_eq_type eq_type = EHEA_EQ;
2280 
2281         port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2282                                    EHEA_MAX_ENTRIES_EQ, 1);
2283         if (!port->qp_eq) {
2284                 ret = -EINVAL;
2285                 pr_err("ehea_create_eq failed (qp_eq)\n");
2286                 goto out_kill_eq;
2287         }
2288 
2289         pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2290         pr_cfg.max_entries_scq = sq_entries * 2;
2291         pr_cfg.max_entries_sq = sq_entries;
2292         pr_cfg.max_entries_rq1 = rq1_entries;
2293         pr_cfg.max_entries_rq2 = rq2_entries;
2294         pr_cfg.max_entries_rq3 = rq3_entries;
2295 
2296         pr_cfg_small_rx.max_entries_rcq = 1;
2297         pr_cfg_small_rx.max_entries_scq = sq_entries;
2298         pr_cfg_small_rx.max_entries_sq = sq_entries;
2299         pr_cfg_small_rx.max_entries_rq1 = 1;
2300         pr_cfg_small_rx.max_entries_rq2 = 1;
2301         pr_cfg_small_rx.max_entries_rq3 = 1;
2302 
2303         for (i = 0; i < def_qps; i++) {
2304                 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2305                 if (ret)
2306                         goto out_clean_pr;
2307         }
2308         for (i = def_qps; i < def_qps; i++) {
2309                 ret = ehea_init_port_res(port, &port->port_res[i],
2310                                          &pr_cfg_small_rx, i);
2311                 if (ret)
2312                         goto out_clean_pr;
2313         }
2314 
2315         return 0;
2316 
2317 out_clean_pr:
2318         while (--i >= 0)
2319                 ehea_clean_portres(port, &port->port_res[i]);
2320 
2321 out_kill_eq:
2322         ehea_destroy_eq(port->qp_eq);
2323         return ret;
2324 }
2325 
2326 static int ehea_clean_all_portres(struct ehea_port *port)
2327 {
2328         int ret = 0;
2329         int i;
2330 
2331         for (i = 0; i < port->num_def_qps; i++)
2332                 ret |= ehea_clean_portres(port, &port->port_res[i]);
2333 
2334         ret |= ehea_destroy_eq(port->qp_eq);
2335 
2336         return ret;
2337 }
2338 
2339 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2340 {
2341         if (adapter->active_ports)
2342                 return;
2343 
2344         ehea_rem_mr(&adapter->mr);
2345 }
2346 
2347 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2348 {
2349         if (adapter->active_ports)
2350                 return 0;
2351 
2352         return ehea_reg_kernel_mr(adapter, &adapter->mr);
2353 }
2354 
2355 static int ehea_up(struct net_device *dev)
2356 {
2357         int ret, i;
2358         struct ehea_port *port = netdev_priv(dev);
2359 
2360         if (port->state == EHEA_PORT_UP)
2361                 return 0;
2362 
2363         ret = ehea_port_res_setup(port, port->num_def_qps);
2364         if (ret) {
2365                 netdev_err(dev, "port_res_failed\n");
2366                 goto out;
2367         }
2368 
2369         /* Set default QP for this port */
2370         ret = ehea_configure_port(port);
2371         if (ret) {
2372                 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2373                 goto out_clean_pr;
2374         }
2375 
2376         ret = ehea_reg_interrupts(dev);
2377         if (ret) {
2378                 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2379                 goto out_clean_pr;
2380         }
2381 
2382         for (i = 0; i < port->num_def_qps; i++) {
2383                 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2384                 if (ret) {
2385                         netdev_err(dev, "activate_qp failed\n");
2386                         goto out_free_irqs;
2387                 }
2388         }
2389 
2390         for (i = 0; i < port->num_def_qps; i++) {
2391                 ret = ehea_fill_port_res(&port->port_res[i]);
2392                 if (ret) {
2393                         netdev_err(dev, "out_free_irqs\n");
2394                         goto out_free_irqs;
2395                 }
2396         }
2397 
2398         ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2399         if (ret) {
2400                 ret = -EIO;
2401                 goto out_free_irqs;
2402         }
2403 
2404         port->state = EHEA_PORT_UP;
2405 
2406         ret = 0;
2407         goto out;
2408 
2409 out_free_irqs:
2410         ehea_free_interrupts(dev);
2411 
2412 out_clean_pr:
2413         ehea_clean_all_portres(port);
2414 out:
2415         if (ret)
2416                 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2417 
2418         ehea_update_bcmc_registrations();
2419         ehea_update_firmware_handles();
2420 
2421         return ret;
2422 }
2423 
2424 static void port_napi_disable(struct ehea_port *port)
2425 {
2426         int i;
2427 
2428         for (i = 0; i < port->num_def_qps; i++)
2429                 napi_disable(&port->port_res[i].napi);
2430 }
2431 
2432 static void port_napi_enable(struct ehea_port *port)
2433 {
2434         int i;
2435 
2436         for (i = 0; i < port->num_def_qps; i++)
2437                 napi_enable(&port->port_res[i].napi);
2438 }
2439 
2440 static int ehea_open(struct net_device *dev)
2441 {
2442         int ret;
2443         struct ehea_port *port = netdev_priv(dev);
2444 
2445         mutex_lock(&port->port_lock);
2446 
2447         netif_info(port, ifup, dev, "enabling port\n");
2448 
2449         ret = ehea_up(dev);
2450         if (!ret) {
2451                 port_napi_enable(port);
2452                 netif_tx_start_all_queues(dev);
2453         }
2454 
2455         mutex_unlock(&port->port_lock);
2456         schedule_delayed_work(&port->stats_work,
2457                               round_jiffies_relative(msecs_to_jiffies(1000)));
2458 
2459         return ret;
2460 }
2461 
2462 static int ehea_down(struct net_device *dev)
2463 {
2464         int ret;
2465         struct ehea_port *port = netdev_priv(dev);
2466 
2467         if (port->state == EHEA_PORT_DOWN)
2468                 return 0;
2469 
2470         ehea_drop_multicast_list(dev);
2471         ehea_allmulti(dev, 0);
2472         ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2473 
2474         ehea_free_interrupts(dev);
2475 
2476         port->state = EHEA_PORT_DOWN;
2477 
2478         ehea_update_bcmc_registrations();
2479 
2480         ret = ehea_clean_all_portres(port);
2481         if (ret)
2482                 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2483 
2484         ehea_update_firmware_handles();
2485 
2486         return ret;
2487 }
2488 
2489 static int ehea_stop(struct net_device *dev)
2490 {
2491         int ret;
2492         struct ehea_port *port = netdev_priv(dev);
2493 
2494         netif_info(port, ifdown, dev, "disabling port\n");
2495 
2496         set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2497         cancel_work_sync(&port->reset_task);
2498         cancel_delayed_work_sync(&port->stats_work);
2499         mutex_lock(&port->port_lock);
2500         netif_tx_stop_all_queues(dev);
2501         port_napi_disable(port);
2502         ret = ehea_down(dev);
2503         mutex_unlock(&port->port_lock);
2504         clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2505         return ret;
2506 }
2507 
2508 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2509 {
2510         struct ehea_qp qp = *orig_qp;
2511         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2512         struct ehea_swqe *swqe;
2513         int wqe_index;
2514         int i;
2515 
2516         for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2517                 swqe = ehea_get_swqe(&qp, &wqe_index);
2518                 swqe->tx_control |= EHEA_SWQE_PURGE;
2519         }
2520 }
2521 
2522 static void ehea_flush_sq(struct ehea_port *port)
2523 {
2524         int i;
2525 
2526         for (i = 0; i < port->num_def_qps; i++) {
2527                 struct ehea_port_res *pr = &port->port_res[i];
2528                 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2529                 int ret;
2530 
2531                 ret = wait_event_timeout(port->swqe_avail_wq,
2532                          atomic_read(&pr->swqe_avail) >= swqe_max,
2533                          msecs_to_jiffies(100));
2534 
2535                 if (!ret) {
2536                         pr_err("WARNING: sq not flushed completely\n");
2537                         break;
2538                 }
2539         }
2540 }
2541 
2542 static int ehea_stop_qps(struct net_device *dev)
2543 {
2544         struct ehea_port *port = netdev_priv(dev);
2545         struct ehea_adapter *adapter = port->adapter;
2546         struct hcp_modify_qp_cb0 *cb0;
2547         int ret = -EIO;
2548         int dret;
2549         int i;
2550         u64 hret;
2551         u64 dummy64 = 0;
2552         u16 dummy16 = 0;
2553 
2554         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2555         if (!cb0) {
2556                 ret = -ENOMEM;
2557                 goto out;
2558         }
2559 
2560         for (i = 0; i < (port->num_def_qps); i++) {
2561                 struct ehea_port_res *pr =  &port->port_res[i];
2562                 struct ehea_qp *qp = pr->qp;
2563 
2564                 /* Purge send queue */
2565                 ehea_purge_sq(qp);
2566 
2567                 /* Disable queue pair */
2568                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2569                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2570                                             cb0);
2571                 if (hret != H_SUCCESS) {
2572                         pr_err("query_ehea_qp failed (1)\n");
2573                         goto out;
2574                 }
2575 
2576                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2577                 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2578 
2579                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2580                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2581                                                             1), cb0, &dummy64,
2582                                              &dummy64, &dummy16, &dummy16);
2583                 if (hret != H_SUCCESS) {
2584                         pr_err("modify_ehea_qp failed (1)\n");
2585                         goto out;
2586                 }
2587 
2588                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2589                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2590                                             cb0);
2591                 if (hret != H_SUCCESS) {
2592                         pr_err("query_ehea_qp failed (2)\n");
2593                         goto out;
2594                 }
2595 
2596                 /* deregister shared memory regions */
2597                 dret = ehea_rem_smrs(pr);
2598                 if (dret) {
2599                         pr_err("unreg shared memory region failed\n");
2600                         goto out;
2601                 }
2602         }
2603 
2604         ret = 0;
2605 out:
2606         free_page((unsigned long)cb0);
2607 
2608         return ret;
2609 }
2610 
2611 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2612 {
2613         struct ehea_qp qp = *orig_qp;
2614         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2615         struct ehea_rwqe *rwqe;
2616         struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2617         struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2618         struct sk_buff *skb;
2619         u32 lkey = pr->recv_mr.lkey;
2620 
2621 
2622         int i;
2623         int index;
2624 
2625         for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2626                 rwqe = ehea_get_next_rwqe(&qp, 2);
2627                 rwqe->sg_list[0].l_key = lkey;
2628                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2629                 skb = skba_rq2[index];
2630                 if (skb)
2631                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2632         }
2633 
2634         for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2635                 rwqe = ehea_get_next_rwqe(&qp, 3);
2636                 rwqe->sg_list[0].l_key = lkey;
2637                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2638                 skb = skba_rq3[index];
2639                 if (skb)
2640                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2641         }
2642 }
2643 
2644 static int ehea_restart_qps(struct net_device *dev)
2645 {
2646         struct ehea_port *port = netdev_priv(dev);
2647         struct ehea_adapter *adapter = port->adapter;
2648         int ret = 0;
2649         int i;
2650 
2651         struct hcp_modify_qp_cb0 *cb0;
2652         u64 hret;
2653         u64 dummy64 = 0;
2654         u16 dummy16 = 0;
2655 
2656         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2657         if (!cb0) {
2658                 ret = -ENOMEM;
2659                 goto out;
2660         }
2661 
2662         for (i = 0; i < (port->num_def_qps); i++) {
2663                 struct ehea_port_res *pr =  &port->port_res[i];
2664                 struct ehea_qp *qp = pr->qp;
2665 
2666                 ret = ehea_gen_smrs(pr);
2667                 if (ret) {
2668                         netdev_err(dev, "creation of shared memory regions failed\n");
2669                         goto out;
2670                 }
2671 
2672                 ehea_update_rqs(qp, pr);
2673 
2674                 /* Enable queue pair */
2675                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2676                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2677                                             cb0);
2678                 if (hret != H_SUCCESS) {
2679                         netdev_err(dev, "query_ehea_qp failed (1)\n");
2680                         goto out;
2681                 }
2682 
2683                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2684                 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2685 
2686                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2687                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2688                                                             1), cb0, &dummy64,
2689                                              &dummy64, &dummy16, &dummy16);
2690                 if (hret != H_SUCCESS) {
2691                         netdev_err(dev, "modify_ehea_qp failed (1)\n");
2692                         goto out;
2693                 }
2694 
2695                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2696                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2697                                             cb0);
2698                 if (hret != H_SUCCESS) {
2699                         netdev_err(dev, "query_ehea_qp failed (2)\n");
2700                         goto out;
2701                 }
2702 
2703                 /* refill entire queue */
2704                 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2705                 ehea_refill_rq2(pr, 0);
2706                 ehea_refill_rq3(pr, 0);
2707         }
2708 out:
2709         free_page((unsigned long)cb0);
2710 
2711         return ret;
2712 }
2713 
2714 static void ehea_reset_port(struct work_struct *work)
2715 {
2716         int ret;
2717         struct ehea_port *port =
2718                 container_of(work, struct ehea_port, reset_task);
2719         struct net_device *dev = port->netdev;
2720 
2721         mutex_lock(&dlpar_mem_lock);
2722         port->resets++;
2723         mutex_lock(&port->port_lock);
2724         netif_tx_disable(dev);
2725 
2726         port_napi_disable(port);
2727 
2728         ehea_down(dev);
2729 
2730         ret = ehea_up(dev);
2731         if (ret)
2732                 goto out;
2733 
2734         ehea_set_multicast_list(dev);
2735 
2736         netif_info(port, timer, dev, "reset successful\n");
2737 
2738         port_napi_enable(port);
2739 
2740         netif_tx_wake_all_queues(dev);
2741 out:
2742         mutex_unlock(&port->port_lock);
2743         mutex_unlock(&dlpar_mem_lock);
2744 }
2745 
2746 static void ehea_rereg_mrs(void)
2747 {
2748         int ret, i;
2749         struct ehea_adapter *adapter;
2750 
2751         pr_info("LPAR memory changed - re-initializing driver\n");
2752 
2753         list_for_each_entry(adapter, &adapter_list, list)
2754                 if (adapter->active_ports) {
2755                         /* Shutdown all ports */
2756                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2757                                 struct ehea_port *port = adapter->port[i];
2758                                 struct net_device *dev;
2759 
2760                                 if (!port)
2761                                         continue;
2762 
2763                                 dev = port->netdev;
2764 
2765                                 if (dev->flags & IFF_UP) {
2766                                         mutex_lock(&port->port_lock);
2767                                         netif_tx_disable(dev);
2768                                         ehea_flush_sq(port);
2769                                         ret = ehea_stop_qps(dev);
2770                                         if (ret) {
2771                                                 mutex_unlock(&port->port_lock);
2772                                                 goto out;
2773                                         }
2774                                         port_napi_disable(port);
2775                                         mutex_unlock(&port->port_lock);
2776                                 }
2777                                 reset_sq_restart_flag(port);
2778                         }
2779 
2780                         /* Unregister old memory region */
2781                         ret = ehea_rem_mr(&adapter->mr);
2782                         if (ret) {
2783                                 pr_err("unregister MR failed - driver inoperable!\n");
2784                                 goto out;
2785                         }
2786                 }
2787 
2788         clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2789 
2790         list_for_each_entry(adapter, &adapter_list, list)
2791                 if (adapter->active_ports) {
2792                         /* Register new memory region */
2793                         ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2794                         if (ret) {
2795                                 pr_err("register MR failed - driver inoperable!\n");
2796                                 goto out;
2797                         }
2798 
2799                         /* Restart all ports */
2800                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2801                                 struct ehea_port *port = adapter->port[i];
2802 
2803                                 if (port) {
2804                                         struct net_device *dev = port->netdev;
2805 
2806                                         if (dev->flags & IFF_UP) {
2807                                                 mutex_lock(&port->port_lock);
2808                                                 ret = ehea_restart_qps(dev);
2809                                                 if (!ret) {
2810                                                         check_sqs(port);
2811                                                         port_napi_enable(port);
2812                                                         netif_tx_wake_all_queues(dev);
2813                                                 } else {
2814                                                         netdev_err(dev, "Unable to restart QPS\n");
2815                                                 }
2816                                                 mutex_unlock(&port->port_lock);
2817                                         }
2818                                 }
2819                         }
2820                 }
2821         pr_info("re-initializing driver complete\n");
2822 out:
2823         return;
2824 }
2825 
2826 static void ehea_tx_watchdog(struct net_device *dev)
2827 {
2828         struct ehea_port *port = netdev_priv(dev);
2829 
2830         if (netif_carrier_ok(dev) &&
2831             !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2832                 ehea_schedule_port_reset(port);
2833 }
2834 
2835 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2836 {
2837         struct hcp_query_ehea *cb;
2838         u64 hret;
2839         int ret;
2840 
2841         cb = (void *)get_zeroed_page(GFP_KERNEL);
2842         if (!cb) {
2843                 ret = -ENOMEM;
2844                 goto out;
2845         }
2846 
2847         hret = ehea_h_query_ehea(adapter->handle, cb);
2848 
2849         if (hret != H_SUCCESS) {
2850                 ret = -EIO;
2851                 goto out_herr;
2852         }
2853 
2854         adapter->max_mc_mac = cb->max_mc_mac - 1;
2855         ret = 0;
2856 
2857 out_herr:
2858         free_page((unsigned long)cb);
2859 out:
2860         return ret;
2861 }
2862 
2863 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2864 {
2865         struct hcp_ehea_port_cb4 *cb4;
2866         u64 hret;
2867         int ret = 0;
2868 
2869         *jumbo = 0;
2870 
2871         /* (Try to) enable *jumbo frames */
2872         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2873         if (!cb4) {
2874                 pr_err("no mem for cb4\n");
2875                 ret = -ENOMEM;
2876                 goto out;
2877         } else {
2878                 hret = ehea_h_query_ehea_port(port->adapter->handle,
2879                                               port->logical_port_id,
2880                                               H_PORT_CB4,
2881                                               H_PORT_CB4_JUMBO, cb4);
2882                 if (hret == H_SUCCESS) {
2883                         if (cb4->jumbo_frame)
2884                                 *jumbo = 1;
2885                         else {
2886                                 cb4->jumbo_frame = 1;
2887                                 hret = ehea_h_modify_ehea_port(port->adapter->
2888                                                                handle,
2889                                                                port->
2890                                                                logical_port_id,
2891                                                                H_PORT_CB4,
2892                                                                H_PORT_CB4_JUMBO,
2893                                                                cb4);
2894                                 if (hret == H_SUCCESS)
2895                                         *jumbo = 1;
2896                         }
2897                 } else
2898                         ret = -EINVAL;
2899 
2900                 free_page((unsigned long)cb4);
2901         }
2902 out:
2903         return ret;
2904 }
2905 
2906 static ssize_t ehea_show_port_id(struct device *dev,
2907                                  struct device_attribute *attr, char *buf)
2908 {
2909         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2910         return sprintf(buf, "%d", port->logical_port_id);
2911 }
2912 
2913 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2914                    NULL);
2915 
2916 static void logical_port_release(struct device *dev)
2917 {
2918         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2919         of_node_put(port->ofdev.dev.of_node);
2920 }
2921 
2922 static struct device *ehea_register_port(struct ehea_port *port,
2923                                          struct device_node *dn)
2924 {
2925         int ret;
2926 
2927         port->ofdev.dev.of_node = of_node_get(dn);
2928         port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2929         port->ofdev.dev.bus = &ibmebus_bus_type;
2930 
2931         dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2932         port->ofdev.dev.release = logical_port_release;
2933 
2934         ret = of_device_register(&port->ofdev);
2935         if (ret) {
2936                 pr_err("failed to register device. ret=%d\n", ret);
2937                 goto out;
2938         }
2939 
2940         ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2941         if (ret) {
2942                 pr_err("failed to register attributes, ret=%d\n", ret);
2943                 goto out_unreg_of_dev;
2944         }
2945 
2946         return &port->ofdev.dev;
2947 
2948 out_unreg_of_dev:
2949         of_device_unregister(&port->ofdev);
2950 out:
2951         return NULL;
2952 }
2953 
2954 static void ehea_unregister_port(struct ehea_port *port)
2955 {
2956         device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2957         of_device_unregister(&port->ofdev);
2958 }
2959 
2960 static const struct net_device_ops ehea_netdev_ops = {
2961         .ndo_open               = ehea_open,
2962         .ndo_stop               = ehea_stop,
2963         .ndo_start_xmit         = ehea_start_xmit,
2964 #ifdef CONFIG_NET_POLL_CONTROLLER
2965         .ndo_poll_controller    = ehea_netpoll,
2966 #endif
2967         .ndo_get_stats64        = ehea_get_stats64,
2968         .ndo_set_mac_address    = ehea_set_mac_addr,
2969         .ndo_validate_addr      = eth_validate_addr,
2970         .ndo_set_rx_mode        = ehea_set_multicast_list,
2971         .ndo_change_mtu         = ehea_change_mtu,
2972         .ndo_vlan_rx_add_vid    = ehea_vlan_rx_add_vid,
2973         .ndo_vlan_rx_kill_vid   = ehea_vlan_rx_kill_vid,
2974         .ndo_tx_timeout         = ehea_tx_watchdog,
2975 };
2976 
2977 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2978                                          u32 logical_port_id,
2979                                          struct device_node *dn)
2980 {
2981         int ret;
2982         struct net_device *dev;
2983         struct ehea_port *port;
2984         struct device *port_dev;
2985         int jumbo;
2986 
2987         /* allocate memory for the port structures */
2988         dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2989 
2990         if (!dev) {
2991                 ret = -ENOMEM;
2992                 goto out_err;
2993         }
2994 
2995         port = netdev_priv(dev);
2996 
2997         mutex_init(&port->port_lock);
2998         port->state = EHEA_PORT_DOWN;
2999         port->sig_comp_iv = sq_entries / 10;
3000 
3001         port->adapter = adapter;
3002         port->netdev = dev;
3003         port->logical_port_id = logical_port_id;
3004 
3005         port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3006 
3007         port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3008         if (!port->mc_list) {
3009                 ret = -ENOMEM;
3010                 goto out_free_ethdev;
3011         }
3012 
3013         INIT_LIST_HEAD(&port->mc_list->list);
3014 
3015         ret = ehea_sense_port_attr(port);
3016         if (ret)
3017                 goto out_free_mc_list;
3018 
3019         netif_set_real_num_rx_queues(dev, port->num_def_qps);
3020         netif_set_real_num_tx_queues(dev, port->num_def_qps);
3021 
3022         port_dev = ehea_register_port(port, dn);
3023         if (!port_dev)
3024                 goto out_free_mc_list;
3025 
3026         SET_NETDEV_DEV(dev, port_dev);
3027 
3028         /* initialize net_device structure */
3029         memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3030 
3031         dev->netdev_ops = &ehea_netdev_ops;
3032         ehea_set_ethtool_ops(dev);
3033 
3034         dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3035                       NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3036         dev->features = NETIF_F_SG | NETIF_F_TSO |
3037                       NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3038                       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3039                       NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3040         dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3041                         NETIF_F_IP_CSUM;
3042         dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3043 
3044         INIT_WORK(&port->reset_task, ehea_reset_port);
3045         INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3046 
3047         init_waitqueue_head(&port->swqe_avail_wq);
3048         init_waitqueue_head(&port->restart_wq);
3049 
3050         memset(&port->stats, 0, sizeof(struct net_device_stats));
3051         ret = register_netdev(dev);
3052         if (ret) {
3053                 pr_err("register_netdev failed. ret=%d\n", ret);
3054                 goto out_unreg_port;
3055         }
3056 
3057         ret = ehea_get_jumboframe_status(port, &jumbo);
3058         if (ret)
3059                 netdev_err(dev, "failed determining jumbo frame status\n");
3060 
3061         netdev_info(dev, "Jumbo frames are %sabled\n",
3062                     jumbo == 1 ? "en" : "dis");
3063 
3064         adapter->active_ports++;
3065 
3066         return port;
3067 
3068 out_unreg_port:
3069         ehea_unregister_port(port);
3070 
3071 out_free_mc_list:
3072         kfree(port->mc_list);
3073 
3074 out_free_ethdev:
3075         free_netdev(dev);
3076 
3077 out_err:
3078         pr_err("setting up logical port with id=%d failed, ret=%d\n",
3079                logical_port_id, ret);
3080         return NULL;
3081 }
3082 
3083 static void ehea_shutdown_single_port(struct ehea_port *port)
3084 {
3085         struct ehea_adapter *adapter = port->adapter;
3086 
3087         cancel_work_sync(&port->reset_task);
3088         cancel_delayed_work_sync(&port->stats_work);
3089         unregister_netdev(port->netdev);
3090         ehea_unregister_port(port);
3091         kfree(port->mc_list);
3092         free_netdev(port->netdev);
3093         adapter->active_ports--;
3094 }
3095 
3096 static int ehea_setup_ports(struct ehea_adapter *adapter)
3097 {
3098         struct device_node *lhea_dn;
3099         struct device_node *eth_dn = NULL;
3100 
3101         const u32 *dn_log_port_id;
3102         int i = 0;
3103 
3104         lhea_dn = adapter->ofdev->dev.of_node;
3105         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3106 
3107                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3108                                                  NULL);
3109                 if (!dn_log_port_id) {
3110                         pr_err("bad device node: eth_dn name=%s\n",
3111                                eth_dn->full_name);
3112                         continue;
3113                 }
3114 
3115                 if (ehea_add_adapter_mr(adapter)) {
3116                         pr_err("creating MR failed\n");
3117                         of_node_put(eth_dn);
3118                         return -EIO;
3119                 }
3120 
3121                 adapter->port[i] = ehea_setup_single_port(adapter,
3122                                                           *dn_log_port_id,
3123                                                           eth_dn);
3124                 if (adapter->port[i])
3125                         netdev_info(adapter->port[i]->netdev,
3126                                     "logical port id #%d\n", *dn_log_port_id);
3127                 else
3128                         ehea_remove_adapter_mr(adapter);
3129 
3130                 i++;
3131         }
3132         return 0;
3133 }
3134 
3135 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3136                                            u32 logical_port_id)
3137 {
3138         struct device_node *lhea_dn;
3139         struct device_node *eth_dn = NULL;
3140         const u32 *dn_log_port_id;
3141 
3142         lhea_dn = adapter->ofdev->dev.of_node;
3143         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3144 
3145                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3146                                                  NULL);
3147                 if (dn_log_port_id)
3148                         if (*dn_log_port_id == logical_port_id)
3149                                 return eth_dn;
3150         }
3151 
3152         return NULL;
3153 }
3154 
3155 static ssize_t ehea_probe_port(struct device *dev,
3156                                struct device_attribute *attr,
3157                                const char *buf, size_t count)
3158 {
3159         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3160         struct ehea_port *port;
3161         struct device_node *eth_dn = NULL;
3162         int i;
3163 
3164         u32 logical_port_id;
3165 
3166         sscanf(buf, "%d", &logical_port_id);
3167 
3168         port = ehea_get_port(adapter, logical_port_id);
3169 
3170         if (port) {
3171                 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3172                             logical_port_id);
3173                 return -EINVAL;
3174         }
3175 
3176         eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3177 
3178         if (!eth_dn) {
3179                 pr_info("no logical port with id %d found\n", logical_port_id);
3180                 return -EINVAL;
3181         }
3182 
3183         if (ehea_add_adapter_mr(adapter)) {
3184                 pr_err("creating MR failed\n");
3185                 return -EIO;
3186         }
3187 
3188         port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3189 
3190         of_node_put(eth_dn);
3191 
3192         if (port) {
3193                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3194                         if (!adapter->port[i]) {
3195                                 adapter->port[i] = port;
3196                                 break;
3197                         }
3198 
3199                 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3200                             logical_port_id);
3201         } else {
3202                 ehea_remove_adapter_mr(adapter);
3203                 return -EIO;
3204         }
3205 
3206         return (ssize_t) count;
3207 }
3208 
3209 static ssize_t ehea_remove_port(struct device *dev,
3210                                 struct device_attribute *attr,
3211                                 const char *buf, size_t count)
3212 {
3213         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3214         struct ehea_port *port;
3215         int i;
3216         u32 logical_port_id;
3217 
3218         sscanf(buf, "%d", &logical_port_id);
3219 
3220         port = ehea_get_port(adapter, logical_port_id);
3221 
3222         if (port) {
3223                 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3224                             logical_port_id);
3225 
3226                 ehea_shutdown_single_port(port);
3227 
3228                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3229                         if (adapter->port[i] == port) {
3230                                 adapter->port[i] = NULL;
3231                                 break;
3232                         }
3233         } else {
3234                 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3235                        logical_port_id);
3236                 return -EINVAL;
3237         }
3238 
3239         ehea_remove_adapter_mr(adapter);
3240 
3241         return (ssize_t) count;
3242 }
3243 
3244 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3245 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3246 
3247 static int ehea_create_device_sysfs(struct platform_device *dev)
3248 {
3249         int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3250         if (ret)
3251                 goto out;
3252 
3253         ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3254 out:
3255         return ret;
3256 }
3257 
3258 static void ehea_remove_device_sysfs(struct platform_device *dev)
3259 {
3260         device_remove_file(&dev->dev, &dev_attr_probe_port);
3261         device_remove_file(&dev->dev, &dev_attr_remove_port);
3262 }
3263 
3264 static int ehea_probe_adapter(struct platform_device *dev)
3265 {
3266         struct ehea_adapter *adapter;
3267         const u64 *adapter_handle;
3268         int ret;
3269         int i;
3270 
3271         if (!dev || !dev->dev.of_node) {
3272                 pr_err("Invalid ibmebus device probed\n");
3273                 return -EINVAL;
3274         }
3275 
3276         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3277         if (!adapter) {
3278                 ret = -ENOMEM;
3279                 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3280                 goto out;
3281         }
3282 
3283         list_add(&adapter->list, &adapter_list);
3284 
3285         adapter->ofdev = dev;
3286 
3287         adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3288                                          NULL);
3289         if (adapter_handle)
3290                 adapter->handle = *adapter_handle;
3291 
3292         if (!adapter->handle) {
3293                 dev_err(&dev->dev, "failed getting handle for adapter"
3294                         " '%s'\n", dev->dev.of_node->full_name);
3295                 ret = -ENODEV;
3296                 goto out_free_ad;
3297         }
3298 
3299         adapter->pd = EHEA_PD_ID;
3300 
3301         platform_set_drvdata(dev, adapter);
3302 
3303 
3304         /* initialize adapter and ports */
3305         /* get adapter properties */
3306         ret = ehea_sense_adapter_attr(adapter);
3307         if (ret) {
3308                 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3309                 goto out_free_ad;
3310         }
3311 
3312         adapter->neq = ehea_create_eq(adapter,
3313                                       EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3314         if (!adapter->neq) {
3315                 ret = -EIO;
3316                 dev_err(&dev->dev, "NEQ creation failed\n");
3317                 goto out_free_ad;
3318         }
3319 
3320         tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3321                      (unsigned long)adapter);
3322 
3323         ret = ehea_create_device_sysfs(dev);
3324         if (ret)
3325                 goto out_kill_eq;
3326 
3327         ret = ehea_setup_ports(adapter);
3328         if (ret) {
3329                 dev_err(&dev->dev, "setup_ports failed\n");
3330                 goto out_rem_dev_sysfs;
3331         }
3332 
3333         ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3334                                   ehea_interrupt_neq, 0,
3335                                   "ehea_neq", adapter);
3336         if (ret) {
3337                 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3338                 goto out_shutdown_ports;
3339         }
3340 
3341         /* Handle any events that might be pending. */
3342         tasklet_hi_schedule(&adapter->neq_tasklet);
3343 
3344         ret = 0;
3345         goto out;
3346 
3347 out_shutdown_ports:
3348         for (i = 0; i < EHEA_MAX_PORTS; i++)
3349                 if (adapter->port[i]) {
3350                         ehea_shutdown_single_port(adapter->port[i]);
3351                         adapter->port[i] = NULL;
3352                 }
3353 
3354 out_rem_dev_sysfs:
3355         ehea_remove_device_sysfs(dev);
3356 
3357 out_kill_eq:
3358         ehea_destroy_eq(adapter->neq);
3359 
3360 out_free_ad:
3361         list_del(&adapter->list);
3362         kfree(adapter);
3363 
3364 out:
3365         ehea_update_firmware_handles();
3366 
3367         return ret;
3368 }
3369 
3370 static int ehea_remove(struct platform_device *dev)
3371 {
3372         struct ehea_adapter *adapter = platform_get_drvdata(dev);
3373         int i;
3374 
3375         for (i = 0; i < EHEA_MAX_PORTS; i++)
3376                 if (adapter->port[i]) {
3377                         ehea_shutdown_single_port(adapter->port[i]);
3378                         adapter->port[i] = NULL;
3379                 }
3380 
3381         ehea_remove_device_sysfs(dev);
3382 
3383         ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3384         tasklet_kill(&adapter->neq_tasklet);
3385 
3386         ehea_destroy_eq(adapter->neq);
3387         ehea_remove_adapter_mr(adapter);
3388         list_del(&adapter->list);
3389         kfree(adapter);
3390 
3391         ehea_update_firmware_handles();
3392 
3393         return 0;
3394 }
3395 
3396 static void ehea_crash_handler(void)
3397 {
3398         int i;
3399 
3400         if (ehea_fw_handles.arr)
3401                 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3402                         ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3403                                              ehea_fw_handles.arr[i].fwh,
3404                                              FORCE_FREE);
3405 
3406         if (ehea_bcmc_regs.arr)
3407                 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3408                         ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3409                                               ehea_bcmc_regs.arr[i].port_id,
3410                                               ehea_bcmc_regs.arr[i].reg_type,
3411                                               ehea_bcmc_regs.arr[i].macaddr,
3412                                               0, H_DEREG_BCMC);
3413 }
3414 
3415 static int ehea_mem_notifier(struct notifier_block *nb,
3416                              unsigned long action, void *data)
3417 {
3418         int ret = NOTIFY_BAD;
3419         struct memory_notify *arg = data;
3420 
3421         mutex_lock(&dlpar_mem_lock);
3422 
3423         switch (action) {
3424         case MEM_CANCEL_OFFLINE:
3425                 pr_info("memory offlining canceled");
3426                 /* Readd canceled memory block */
3427         case MEM_ONLINE:
3428                 pr_info("memory is going online");
3429                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3430                 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3431                         goto out_unlock;
3432                 ehea_rereg_mrs();
3433                 break;
3434         case MEM_GOING_OFFLINE:
3435                 pr_info("memory is going offline");
3436                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3437                 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3438                         goto out_unlock;
3439                 ehea_rereg_mrs();
3440                 break;
3441         default:
3442                 break;
3443         }
3444 
3445         ehea_update_firmware_handles();
3446         ret = NOTIFY_OK;
3447 
3448 out_unlock:
3449         mutex_unlock(&dlpar_mem_lock);
3450         return ret;
3451 }
3452 
3453 static struct notifier_block ehea_mem_nb = {
3454         .notifier_call = ehea_mem_notifier,
3455 };
3456 
3457 static int ehea_reboot_notifier(struct notifier_block *nb,
3458                                 unsigned long action, void *unused)
3459 {
3460         if (action == SYS_RESTART) {
3461                 pr_info("Reboot: freeing all eHEA resources\n");
3462                 ibmebus_unregister_driver(&ehea_driver);
3463         }
3464         return NOTIFY_DONE;
3465 }
3466 
3467 static struct notifier_block ehea_reboot_nb = {
3468         .notifier_call = ehea_reboot_notifier,
3469 };
3470 
3471 static int check_module_parm(void)
3472 {
3473         int ret = 0;
3474 
3475         if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3476             (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3477                 pr_info("Bad parameter: rq1_entries\n");
3478                 ret = -EINVAL;
3479         }
3480         if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3481             (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3482                 pr_info("Bad parameter: rq2_entries\n");
3483                 ret = -EINVAL;
3484         }
3485         if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3486             (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3487                 pr_info("Bad parameter: rq3_entries\n");
3488                 ret = -EINVAL;
3489         }
3490         if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3491             (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3492                 pr_info("Bad parameter: sq_entries\n");
3493                 ret = -EINVAL;
3494         }
3495 
3496         return ret;
3497 }
3498 
3499 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3500                                       char *buf)
3501 {
3502         return sprintf(buf, "%d", EHEA_CAPABILITIES);
3503 }
3504 
3505 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3506                    ehea_show_capabilities, NULL);
3507 
3508 static int __init ehea_module_init(void)
3509 {
3510         int ret;
3511 
3512         pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3513 
3514         memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3515         memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3516 
3517         mutex_init(&ehea_fw_handles.lock);
3518         spin_lock_init(&ehea_bcmc_regs.lock);
3519 
3520         ret = check_module_parm();
3521         if (ret)
3522                 goto out;
3523 
3524         ret = ehea_create_busmap();
3525         if (ret)
3526                 goto out;
3527 
3528         ret = register_reboot_notifier(&ehea_reboot_nb);
3529         if (ret)
3530                 pr_info("failed registering reboot notifier\n");
3531 
3532         ret = register_memory_notifier(&ehea_mem_nb);
3533         if (ret)
3534                 pr_info("failed registering memory remove notifier\n");
3535 
3536         ret = crash_shutdown_register(ehea_crash_handler);
3537         if (ret)
3538                 pr_info("failed registering crash handler\n");
3539 
3540         ret = ibmebus_register_driver(&ehea_driver);
3541         if (ret) {
3542                 pr_err("failed registering eHEA device driver on ebus\n");
3543                 goto out2;
3544         }
3545 
3546         ret = driver_create_file(&ehea_driver.driver,
3547                                  &driver_attr_capabilities);
3548         if (ret) {
3549                 pr_err("failed to register capabilities attribute, ret=%d\n",
3550                        ret);
3551                 goto out3;
3552         }
3553 
3554         return ret;
3555 
3556 out3:
3557         ibmebus_unregister_driver(&ehea_driver);
3558 out2:
3559         unregister_memory_notifier(&ehea_mem_nb);
3560         unregister_reboot_notifier(&ehea_reboot_nb);
3561         crash_shutdown_unregister(ehea_crash_handler);
3562 out:
3563         return ret;
3564 }
3565 
3566 static void __exit ehea_module_exit(void)
3567 {
3568         int ret;
3569 
3570         driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3571         ibmebus_unregister_driver(&ehea_driver);
3572         unregister_reboot_notifier(&ehea_reboot_nb);
3573         ret = crash_shutdown_unregister(ehea_crash_handler);
3574         if (ret)
3575                 pr_info("failed unregistering crash handler\n");
3576         unregister_memory_notifier(&ehea_mem_nb);
3577         kfree(ehea_fw_handles.arr);
3578         kfree(ehea_bcmc_regs.arr);
3579         ehea_destroy_busmap();
3580 }
3581 
3582 module_init(ehea_module_init);
3583 module_exit(ehea_module_exit);
3584 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us