Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c

  1 /* bnx2x_main.c: QLogic Everest network driver.
  2  *
  3  * Copyright (c) 2007-2013 Broadcom Corporation
  4  * Copyright (c) 2014 QLogic Corporation
  5  * All rights reserved
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License as published by
  9  * the Free Software Foundation.
 10  *
 11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
 12  * Written by: Eliezer Tamir
 13  * Based on code from Michael Chan's bnx2 driver
 14  * UDP CSUM errata workaround by Arik Gendelman
 15  * Slowpath and fastpath rework by Vladislav Zolotarov
 16  * Statistics and Link management by Yitchak Gertner
 17  *
 18  */
 19 
 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 21 
 22 #include <linux/module.h>
 23 #include <linux/moduleparam.h>
 24 #include <linux/kernel.h>
 25 #include <linux/device.h>  /* for dev_info() */
 26 #include <linux/timer.h>
 27 #include <linux/errno.h>
 28 #include <linux/ioport.h>
 29 #include <linux/slab.h>
 30 #include <linux/interrupt.h>
 31 #include <linux/pci.h>
 32 #include <linux/aer.h>
 33 #include <linux/init.h>
 34 #include <linux/netdevice.h>
 35 #include <linux/etherdevice.h>
 36 #include <linux/skbuff.h>
 37 #include <linux/dma-mapping.h>
 38 #include <linux/bitops.h>
 39 #include <linux/irq.h>
 40 #include <linux/delay.h>
 41 #include <asm/byteorder.h>
 42 #include <linux/time.h>
 43 #include <linux/ethtool.h>
 44 #include <linux/mii.h>
 45 #include <linux/if_vlan.h>
 46 #include <linux/crash_dump.h>
 47 #include <net/ip.h>
 48 #include <net/ipv6.h>
 49 #include <net/tcp.h>
 50 #include <net/vxlan.h>
 51 #include <net/checksum.h>
 52 #include <net/ip6_checksum.h>
 53 #include <linux/workqueue.h>
 54 #include <linux/crc32.h>
 55 #include <linux/crc32c.h>
 56 #include <linux/prefetch.h>
 57 #include <linux/zlib.h>
 58 #include <linux/io.h>
 59 #include <linux/semaphore.h>
 60 #include <linux/stringify.h>
 61 #include <linux/vmalloc.h>
 62 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
 63 #include <net/geneve.h>
 64 #endif
 65 #include "bnx2x.h"
 66 #include "bnx2x_init.h"
 67 #include "bnx2x_init_ops.h"
 68 #include "bnx2x_cmn.h"
 69 #include "bnx2x_vfpf.h"
 70 #include "bnx2x_dcb.h"
 71 #include "bnx2x_sp.h"
 72 #include <linux/firmware.h>
 73 #include "bnx2x_fw_file_hdr.h"
 74 /* FW files */
 75 #define FW_FILE_VERSION                                 \
 76         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
 77         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
 78         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
 79         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
 80 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
 81 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 82 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 83 
 84 /* Time in jiffies before concluding the transmitter is hung */
 85 #define TX_TIMEOUT              (5*HZ)
 86 
 87 static char version[] =
 88         "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
 89         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 90 
 91 MODULE_AUTHOR("Eliezer Tamir");
 92 MODULE_DESCRIPTION("QLogic "
 93                    "BCM57710/57711/57711E/"
 94                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 95                    "57840/57840_MF Driver");
 96 MODULE_LICENSE("GPL");
 97 MODULE_VERSION(DRV_MODULE_VERSION);
 98 MODULE_FIRMWARE(FW_FILE_NAME_E1);
 99 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
100 MODULE_FIRMWARE(FW_FILE_NAME_E2);
101 
102 int bnx2x_num_queues;
103 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
104 MODULE_PARM_DESC(num_queues,
105                  " Set number of queues (default is as a number of CPUs)");
106 
107 static int disable_tpa;
108 module_param(disable_tpa, int, S_IRUGO);
109 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
110 
111 static int int_mode;
112 module_param(int_mode, int, S_IRUGO);
113 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
114                                 "(1 INT#x; 2 MSI)");
115 
116 static int dropless_fc;
117 module_param(dropless_fc, int, S_IRUGO);
118 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
119 
120 static int mrrs = -1;
121 module_param(mrrs, int, S_IRUGO);
122 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123 
124 static int debug;
125 module_param(debug, int, S_IRUGO);
126 MODULE_PARM_DESC(debug, " Default debug msglevel");
127 
128 static struct workqueue_struct *bnx2x_wq;
129 struct workqueue_struct *bnx2x_iov_wq;
130 
131 struct bnx2x_mac_vals {
132         u32 xmac_addr;
133         u32 xmac_val;
134         u32 emac_addr;
135         u32 emac_val;
136         u32 umac_addr[2];
137         u32 umac_val[2];
138         u32 bmac_addr;
139         u32 bmac_val[2];
140 };
141 
142 enum bnx2x_board_type {
143         BCM57710 = 0,
144         BCM57711,
145         BCM57711E,
146         BCM57712,
147         BCM57712_MF,
148         BCM57712_VF,
149         BCM57800,
150         BCM57800_MF,
151         BCM57800_VF,
152         BCM57810,
153         BCM57810_MF,
154         BCM57810_VF,
155         BCM57840_4_10,
156         BCM57840_2_20,
157         BCM57840_MF,
158         BCM57840_VF,
159         BCM57811,
160         BCM57811_MF,
161         BCM57840_O,
162         BCM57840_MFO,
163         BCM57811_VF
164 };
165 
166 /* indexed by board_type, above */
167 static struct {
168         char *name;
169 } board_info[] = {
170         [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
171         [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
172         [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
173         [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
174         [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
175         [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
176         [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
177         [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
178         [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
179         [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
180         [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
181         [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
182         [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
183         [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
184         [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
185         [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
186         [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
187         [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
188         [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
189         [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
190         [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
191 };
192 
193 #ifndef PCI_DEVICE_ID_NX2_57710
194 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
195 #endif
196 #ifndef PCI_DEVICE_ID_NX2_57711
197 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
198 #endif
199 #ifndef PCI_DEVICE_ID_NX2_57711E
200 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
201 #endif
202 #ifndef PCI_DEVICE_ID_NX2_57712
203 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
204 #endif
205 #ifndef PCI_DEVICE_ID_NX2_57712_MF
206 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
207 #endif
208 #ifndef PCI_DEVICE_ID_NX2_57712_VF
209 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
210 #endif
211 #ifndef PCI_DEVICE_ID_NX2_57800
212 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
213 #endif
214 #ifndef PCI_DEVICE_ID_NX2_57800_MF
215 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
216 #endif
217 #ifndef PCI_DEVICE_ID_NX2_57800_VF
218 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
219 #endif
220 #ifndef PCI_DEVICE_ID_NX2_57810
221 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
222 #endif
223 #ifndef PCI_DEVICE_ID_NX2_57810_MF
224 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
225 #endif
226 #ifndef PCI_DEVICE_ID_NX2_57840_O
227 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
228 #endif
229 #ifndef PCI_DEVICE_ID_NX2_57810_VF
230 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
231 #endif
232 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
233 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
234 #endif
235 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
236 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
237 #endif
238 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
239 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
240 #endif
241 #ifndef PCI_DEVICE_ID_NX2_57840_MF
242 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
243 #endif
244 #ifndef PCI_DEVICE_ID_NX2_57840_VF
245 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
246 #endif
247 #ifndef PCI_DEVICE_ID_NX2_57811
248 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
249 #endif
250 #ifndef PCI_DEVICE_ID_NX2_57811_MF
251 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
252 #endif
253 #ifndef PCI_DEVICE_ID_NX2_57811_VF
254 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
255 #endif
256 
257 static const struct pci_device_id bnx2x_pci_tbl[] = {
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
271         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
272         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
273         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
274         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
275         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
276         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
277         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
278         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
279         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
280         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
281         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
282         { 0 }
283 };
284 
285 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
286 
287 /* Global resources for unloading a previously loaded device */
288 #define BNX2X_PREV_WAIT_NEEDED 1
289 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
290 static LIST_HEAD(bnx2x_prev_list);
291 
292 /* Forward declaration */
293 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
294 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
295 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
296 
297 /****************************************************************************
298 * General service functions
299 ****************************************************************************/
300 
301 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
302 
303 static void __storm_memset_dma_mapping(struct bnx2x *bp,
304                                        u32 addr, dma_addr_t mapping)
305 {
306         REG_WR(bp,  addr, U64_LO(mapping));
307         REG_WR(bp,  addr + 4, U64_HI(mapping));
308 }
309 
310 static void storm_memset_spq_addr(struct bnx2x *bp,
311                                   dma_addr_t mapping, u16 abs_fid)
312 {
313         u32 addr = XSEM_REG_FAST_MEMORY +
314                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
315 
316         __storm_memset_dma_mapping(bp, addr, mapping);
317 }
318 
319 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
320                                   u16 pf_id)
321 {
322         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
323                 pf_id);
324         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
325                 pf_id);
326         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
327                 pf_id);
328         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330 }
331 
332 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
333                                  u8 enable)
334 {
335         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
336                 enable);
337         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
338                 enable);
339         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
340                 enable);
341         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343 }
344 
345 static void storm_memset_eq_data(struct bnx2x *bp,
346                                  struct event_ring_data *eq_data,
347                                 u16 pfid)
348 {
349         size_t size = sizeof(struct event_ring_data);
350 
351         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
352 
353         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
354 }
355 
356 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
357                                  u16 pfid)
358 {
359         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
360         REG_WR16(bp, addr, eq_prod);
361 }
362 
363 /* used only at init
364  * locking is done by mcp
365  */
366 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
367 {
368         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
369         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
370         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
371                                PCICFG_VENDOR_ID_OFFSET);
372 }
373 
374 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
375 {
376         u32 val;
377 
378         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
379         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
380         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
381                                PCICFG_VENDOR_ID_OFFSET);
382 
383         return val;
384 }
385 
386 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
387 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
388 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
389 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
390 #define DMAE_DP_DST_NONE        "dst_addr [none]"
391 
392 static void bnx2x_dp_dmae(struct bnx2x *bp,
393                           struct dmae_command *dmae, int msglvl)
394 {
395         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
396         int i;
397 
398         switch (dmae->opcode & DMAE_COMMAND_DST) {
399         case DMAE_CMD_DST_PCI:
400                 if (src_type == DMAE_CMD_SRC_PCI)
401                         DP(msglvl, "DMAE: opcode 0x%08x\n"
402                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
403                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
404                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
406                            dmae->comp_addr_hi, dmae->comp_addr_lo,
407                            dmae->comp_val);
408                 else
409                         DP(msglvl, "DMAE: opcode 0x%08x\n"
410                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
411                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
412                            dmae->opcode, dmae->src_addr_lo >> 2,
413                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
414                            dmae->comp_addr_hi, dmae->comp_addr_lo,
415                            dmae->comp_val);
416                 break;
417         case DMAE_CMD_DST_GRC:
418                 if (src_type == DMAE_CMD_SRC_PCI)
419                         DP(msglvl, "DMAE: opcode 0x%08x\n"
420                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
421                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
422                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423                            dmae->len, dmae->dst_addr_lo >> 2,
424                            dmae->comp_addr_hi, dmae->comp_addr_lo,
425                            dmae->comp_val);
426                 else
427                         DP(msglvl, "DMAE: opcode 0x%08x\n"
428                            "src [%08x], len [%d*4], dst [%08x]\n"
429                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
430                            dmae->opcode, dmae->src_addr_lo >> 2,
431                            dmae->len, dmae->dst_addr_lo >> 2,
432                            dmae->comp_addr_hi, dmae->comp_addr_lo,
433                            dmae->comp_val);
434                 break;
435         default:
436                 if (src_type == DMAE_CMD_SRC_PCI)
437                         DP(msglvl, "DMAE: opcode 0x%08x\n"
438                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
439                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
440                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
441                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
442                            dmae->comp_val);
443                 else
444                         DP(msglvl, "DMAE: opcode 0x%08x\n"
445                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
446                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
447                            dmae->opcode, dmae->src_addr_lo >> 2,
448                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
449                            dmae->comp_val);
450                 break;
451         }
452 
453         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
454                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
455                    i, *(((u32 *)dmae) + i));
456 }
457 
458 /* copy command into DMAE command memory and set DMAE command go */
459 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
460 {
461         u32 cmd_offset;
462         int i;
463 
464         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
465         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
466                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
467         }
468         REG_WR(bp, dmae_reg_go_c[idx], 1);
469 }
470 
471 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
472 {
473         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
474                            DMAE_CMD_C_ENABLE);
475 }
476 
477 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
478 {
479         return opcode & ~DMAE_CMD_SRC_RESET;
480 }
481 
482 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
483                              bool with_comp, u8 comp_type)
484 {
485         u32 opcode = 0;
486 
487         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
488                    (dst_type << DMAE_COMMAND_DST_SHIFT));
489 
490         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
491 
492         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
493         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
494                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
495         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
496 
497 #ifdef __BIG_ENDIAN
498         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
499 #else
500         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
501 #endif
502         if (with_comp)
503                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
504         return opcode;
505 }
506 
507 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
508                                       struct dmae_command *dmae,
509                                       u8 src_type, u8 dst_type)
510 {
511         memset(dmae, 0, sizeof(struct dmae_command));
512 
513         /* set the opcode */
514         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
515                                          true, DMAE_COMP_PCI);
516 
517         /* fill in the completion parameters */
518         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
519         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
520         dmae->comp_val = DMAE_COMP_VAL;
521 }
522 
523 /* issue a dmae command over the init-channel and wait for completion */
524 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
525                                u32 *comp)
526 {
527         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
528         int rc = 0;
529 
530         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
531 
532         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
533          * as long as this code is called both from syscall context and
534          * from ndo_set_rx_mode() flow that may be called from BH.
535          */
536 
537         spin_lock_bh(&bp->dmae_lock);
538 
539         /* reset completion */
540         *comp = 0;
541 
542         /* post the command on the channel used for initializations */
543         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
544 
545         /* wait for completion */
546         udelay(5);
547         while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
548 
549                 if (!cnt ||
550                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
551                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
552                         BNX2X_ERR("DMAE timeout!\n");
553                         rc = DMAE_TIMEOUT;
554                         goto unlock;
555                 }
556                 cnt--;
557                 udelay(50);
558         }
559         if (*comp & DMAE_PCI_ERR_FLAG) {
560                 BNX2X_ERR("DMAE PCI error!\n");
561                 rc = DMAE_PCI_ERROR;
562         }
563 
564 unlock:
565 
566         spin_unlock_bh(&bp->dmae_lock);
567 
568         return rc;
569 }
570 
571 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
572                       u32 len32)
573 {
574         int rc;
575         struct dmae_command dmae;
576 
577         if (!bp->dmae_ready) {
578                 u32 *data = bnx2x_sp(bp, wb_data[0]);
579 
580                 if (CHIP_IS_E1(bp))
581                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
582                 else
583                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
584                 return;
585         }
586 
587         /* set opcode and fixed command fields */
588         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
589 
590         /* fill in addresses and len */
591         dmae.src_addr_lo = U64_LO(dma_addr);
592         dmae.src_addr_hi = U64_HI(dma_addr);
593         dmae.dst_addr_lo = dst_addr >> 2;
594         dmae.dst_addr_hi = 0;
595         dmae.len = len32;
596 
597         /* issue the command and wait for completion */
598         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
599         if (rc) {
600                 BNX2X_ERR("DMAE returned failure %d\n", rc);
601 #ifdef BNX2X_STOP_ON_ERROR
602                 bnx2x_panic();
603 #endif
604         }
605 }
606 
607 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
608 {
609         int rc;
610         struct dmae_command dmae;
611 
612         if (!bp->dmae_ready) {
613                 u32 *data = bnx2x_sp(bp, wb_data[0]);
614                 int i;
615 
616                 if (CHIP_IS_E1(bp))
617                         for (i = 0; i < len32; i++)
618                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
619                 else
620                         for (i = 0; i < len32; i++)
621                                 data[i] = REG_RD(bp, src_addr + i*4);
622 
623                 return;
624         }
625 
626         /* set opcode and fixed command fields */
627         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
628 
629         /* fill in addresses and len */
630         dmae.src_addr_lo = src_addr >> 2;
631         dmae.src_addr_hi = 0;
632         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
633         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
634         dmae.len = len32;
635 
636         /* issue the command and wait for completion */
637         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
638         if (rc) {
639                 BNX2X_ERR("DMAE returned failure %d\n", rc);
640 #ifdef BNX2X_STOP_ON_ERROR
641                 bnx2x_panic();
642 #endif
643         }
644 }
645 
646 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
647                                       u32 addr, u32 len)
648 {
649         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
650         int offset = 0;
651 
652         while (len > dmae_wr_max) {
653                 bnx2x_write_dmae(bp, phys_addr + offset,
654                                  addr + offset, dmae_wr_max);
655                 offset += dmae_wr_max * 4;
656                 len -= dmae_wr_max;
657         }
658 
659         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
660 }
661 
662 enum storms {
663            XSTORM,
664            TSTORM,
665            CSTORM,
666            USTORM,
667            MAX_STORMS
668 };
669 
670 #define STORMS_NUM 4
671 #define REGS_IN_ENTRY 4
672 
673 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
674                                               enum storms storm,
675                                               int entry)
676 {
677         switch (storm) {
678         case XSTORM:
679                 return XSTORM_ASSERT_LIST_OFFSET(entry);
680         case TSTORM:
681                 return TSTORM_ASSERT_LIST_OFFSET(entry);
682         case CSTORM:
683                 return CSTORM_ASSERT_LIST_OFFSET(entry);
684         case USTORM:
685                 return USTORM_ASSERT_LIST_OFFSET(entry);
686         case MAX_STORMS:
687         default:
688                 BNX2X_ERR("unknown storm\n");
689         }
690         return -EINVAL;
691 }
692 
693 static int bnx2x_mc_assert(struct bnx2x *bp)
694 {
695         char last_idx;
696         int i, j, rc = 0;
697         enum storms storm;
698         u32 regs[REGS_IN_ENTRY];
699         u32 bar_storm_intmem[STORMS_NUM] = {
700                 BAR_XSTRORM_INTMEM,
701                 BAR_TSTRORM_INTMEM,
702                 BAR_CSTRORM_INTMEM,
703                 BAR_USTRORM_INTMEM
704         };
705         u32 storm_assert_list_index[STORMS_NUM] = {
706                 XSTORM_ASSERT_LIST_INDEX_OFFSET,
707                 TSTORM_ASSERT_LIST_INDEX_OFFSET,
708                 CSTORM_ASSERT_LIST_INDEX_OFFSET,
709                 USTORM_ASSERT_LIST_INDEX_OFFSET
710         };
711         char *storms_string[STORMS_NUM] = {
712                 "XSTORM",
713                 "TSTORM",
714                 "CSTORM",
715                 "USTORM"
716         };
717 
718         for (storm = XSTORM; storm < MAX_STORMS; storm++) {
719                 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
720                                    storm_assert_list_index[storm]);
721                 if (last_idx)
722                         BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
723                                   storms_string[storm], last_idx);
724 
725                 /* print the asserts */
726                 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
727                         /* read a single assert entry */
728                         for (j = 0; j < REGS_IN_ENTRY; j++)
729                                 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
730                                           bnx2x_get_assert_list_entry(bp,
731                                                                       storm,
732                                                                       i) +
733                                           sizeof(u32) * j);
734 
735                         /* log entry if it contains a valid assert */
736                         if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
737                                 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
738                                           storms_string[storm], i, regs[3],
739                                           regs[2], regs[1], regs[0]);
740                                 rc++;
741                         } else {
742                                 break;
743                         }
744                 }
745         }
746 
747         BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
748                   CHIP_IS_E1(bp) ? "everest1" :
749                   CHIP_IS_E1H(bp) ? "everest1h" :
750                   CHIP_IS_E2(bp) ? "everest2" : "everest3",
751                   BCM_5710_FW_MAJOR_VERSION,
752                   BCM_5710_FW_MINOR_VERSION,
753                   BCM_5710_FW_REVISION_VERSION);
754 
755         return rc;
756 }
757 
758 #define MCPR_TRACE_BUFFER_SIZE  (0x800)
759 #define SCRATCH_BUFFER_SIZE(bp) \
760         (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
761 
762 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
763 {
764         u32 addr, val;
765         u32 mark, offset;
766         __be32 data[9];
767         int word;
768         u32 trace_shmem_base;
769         if (BP_NOMCP(bp)) {
770                 BNX2X_ERR("NO MCP - can not dump\n");
771                 return;
772         }
773         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
774                 (bp->common.bc_ver & 0xff0000) >> 16,
775                 (bp->common.bc_ver & 0xff00) >> 8,
776                 (bp->common.bc_ver & 0xff));
777 
778         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
779         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
780                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
781 
782         if (BP_PATH(bp) == 0)
783                 trace_shmem_base = bp->common.shmem_base;
784         else
785                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
786 
787         /* sanity */
788         if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
789             trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
790                                 SCRATCH_BUFFER_SIZE(bp)) {
791                 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
792                           trace_shmem_base);
793                 return;
794         }
795 
796         addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
797 
798         /* validate TRCB signature */
799         mark = REG_RD(bp, addr);
800         if (mark != MFW_TRACE_SIGNATURE) {
801                 BNX2X_ERR("Trace buffer signature is missing.");
802                 return ;
803         }
804 
805         /* read cyclic buffer pointer */
806         addr += 4;
807         mark = REG_RD(bp, addr);
808         mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
809         if (mark >= trace_shmem_base || mark < addr + 4) {
810                 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
811                 return;
812         }
813         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
814 
815         printk("%s", lvl);
816 
817         /* dump buffer after the mark */
818         for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
819                 for (word = 0; word < 8; word++)
820                         data[word] = htonl(REG_RD(bp, offset + 4*word));
821                 data[8] = 0x0;
822                 pr_cont("%s", (char *)data);
823         }
824 
825         /* dump buffer before the mark */
826         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
827                 for (word = 0; word < 8; word++)
828                         data[word] = htonl(REG_RD(bp, offset + 4*word));
829                 data[8] = 0x0;
830                 pr_cont("%s", (char *)data);
831         }
832         printk("%s" "end of fw dump\n", lvl);
833 }
834 
835 static void bnx2x_fw_dump(struct bnx2x *bp)
836 {
837         bnx2x_fw_dump_lvl(bp, KERN_ERR);
838 }
839 
840 static void bnx2x_hc_int_disable(struct bnx2x *bp)
841 {
842         int port = BP_PORT(bp);
843         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
844         u32 val = REG_RD(bp, addr);
845 
846         /* in E1 we must use only PCI configuration space to disable
847          * MSI/MSIX capability
848          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
849          */
850         if (CHIP_IS_E1(bp)) {
851                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
852                  * Use mask register to prevent from HC sending interrupts
853                  * after we exit the function
854                  */
855                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
856 
857                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
858                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
859                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
860         } else
861                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
862                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
863                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
864                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
865 
866         DP(NETIF_MSG_IFDOWN,
867            "write %x to HC %d (addr 0x%x)\n",
868            val, port, addr);
869 
870         /* flush all outstanding writes */
871         mmiowb();
872 
873         REG_WR(bp, addr, val);
874         if (REG_RD(bp, addr) != val)
875                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
876 }
877 
878 static void bnx2x_igu_int_disable(struct bnx2x *bp)
879 {
880         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
881 
882         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
883                  IGU_PF_CONF_INT_LINE_EN |
884                  IGU_PF_CONF_ATTN_BIT_EN);
885 
886         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
887 
888         /* flush all outstanding writes */
889         mmiowb();
890 
891         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
892         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
893                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
894 }
895 
896 static void bnx2x_int_disable(struct bnx2x *bp)
897 {
898         if (bp->common.int_block == INT_BLOCK_HC)
899                 bnx2x_hc_int_disable(bp);
900         else
901                 bnx2x_igu_int_disable(bp);
902 }
903 
904 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
905 {
906         int i;
907         u16 j;
908         struct hc_sp_status_block_data sp_sb_data;
909         int func = BP_FUNC(bp);
910 #ifdef BNX2X_STOP_ON_ERROR
911         u16 start = 0, end = 0;
912         u8 cos;
913 #endif
914         if (IS_PF(bp) && disable_int)
915                 bnx2x_int_disable(bp);
916 
917         bp->stats_state = STATS_STATE_DISABLED;
918         bp->eth_stats.unrecoverable_error++;
919         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
920 
921         BNX2X_ERR("begin crash dump -----------------\n");
922 
923         /* Indices */
924         /* Common */
925         if (IS_PF(bp)) {
926                 struct host_sp_status_block *def_sb = bp->def_status_blk;
927                 int data_size, cstorm_offset;
928 
929                 BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
930                           bp->def_idx, bp->def_att_idx, bp->attn_state,
931                           bp->spq_prod_idx, bp->stats_counter);
932                 BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
933                           def_sb->atten_status_block.attn_bits,
934                           def_sb->atten_status_block.attn_bits_ack,
935                           def_sb->atten_status_block.status_block_id,
936                           def_sb->atten_status_block.attn_bits_index);
937                 BNX2X_ERR("     def (");
938                 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
939                         pr_cont("0x%x%s",
940                                 def_sb->sp_sb.index_values[i],
941                                 (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
942 
943                 data_size = sizeof(struct hc_sp_status_block_data) /
944                             sizeof(u32);
945                 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
946                 for (i = 0; i < data_size; i++)
947                         *((u32 *)&sp_sb_data + i) =
948                                 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
949                                            i * sizeof(u32));
950 
951                 pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
952                         sp_sb_data.igu_sb_id,
953                         sp_sb_data.igu_seg_id,
954                         sp_sb_data.p_func.pf_id,
955                         sp_sb_data.p_func.vnic_id,
956                         sp_sb_data.p_func.vf_id,
957                         sp_sb_data.p_func.vf_valid,
958                         sp_sb_data.state);
959         }
960 
961         for_each_eth_queue(bp, i) {
962                 struct bnx2x_fastpath *fp = &bp->fp[i];
963                 int loop;
964                 struct hc_status_block_data_e2 sb_data_e2;
965                 struct hc_status_block_data_e1x sb_data_e1x;
966                 struct hc_status_block_sm  *hc_sm_p =
967                         CHIP_IS_E1x(bp) ?
968                         sb_data_e1x.common.state_machine :
969                         sb_data_e2.common.state_machine;
970                 struct hc_index_data *hc_index_p =
971                         CHIP_IS_E1x(bp) ?
972                         sb_data_e1x.index_data :
973                         sb_data_e2.index_data;
974                 u8 data_size, cos;
975                 u32 *sb_data_p;
976                 struct bnx2x_fp_txdata txdata;
977 
978                 if (!bp->fp)
979                         break;
980 
981                 if (!fp->rx_cons_sb)
982                         continue;
983 
984                 /* Rx */
985                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
986                           i, fp->rx_bd_prod, fp->rx_bd_cons,
987                           fp->rx_comp_prod,
988                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
989                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
990                           fp->rx_sge_prod, fp->last_max_sge,
991                           le16_to_cpu(fp->fp_hc_idx));
992 
993                 /* Tx */
994                 for_each_cos_in_tx_queue(fp, cos)
995                 {
996                         if (!fp->txdata_ptr[cos])
997                                 break;
998 
999                         txdata = *fp->txdata_ptr[cos];
1000 
1001                         if (!txdata.tx_cons_sb)
1002                                 continue;
1003 
1004                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
1005                                   i, txdata.tx_pkt_prod,
1006                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
1007                                   txdata.tx_bd_cons,
1008                                   le16_to_cpu(*txdata.tx_cons_sb));
1009                 }
1010 
1011                 loop = CHIP_IS_E1x(bp) ?
1012                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1013 
1014                 /* host sb data */
1015 
1016                 if (IS_FCOE_FP(fp))
1017                         continue;
1018 
1019                 BNX2X_ERR("     run indexes (");
1020                 for (j = 0; j < HC_SB_MAX_SM; j++)
1021                         pr_cont("0x%x%s",
1022                                fp->sb_running_index[j],
1023                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1024 
1025                 BNX2X_ERR("     indexes (");
1026                 for (j = 0; j < loop; j++)
1027                         pr_cont("0x%x%s",
1028                                fp->sb_index_values[j],
1029                                (j == loop - 1) ? ")" : " ");
1030 
1031                 /* VF cannot access FW refelection for status block */
1032                 if (IS_VF(bp))
1033                         continue;
1034 
1035                 /* fw sb data */
1036                 data_size = CHIP_IS_E1x(bp) ?
1037                         sizeof(struct hc_status_block_data_e1x) :
1038                         sizeof(struct hc_status_block_data_e2);
1039                 data_size /= sizeof(u32);
1040                 sb_data_p = CHIP_IS_E1x(bp) ?
1041                         (u32 *)&sb_data_e1x :
1042                         (u32 *)&sb_data_e2;
1043                 /* copy sb data in here */
1044                 for (j = 0; j < data_size; j++)
1045                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1046                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1047                                 j * sizeof(u32));
1048 
1049                 if (!CHIP_IS_E1x(bp)) {
1050                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1051                                 sb_data_e2.common.p_func.pf_id,
1052                                 sb_data_e2.common.p_func.vf_id,
1053                                 sb_data_e2.common.p_func.vf_valid,
1054                                 sb_data_e2.common.p_func.vnic_id,
1055                                 sb_data_e2.common.same_igu_sb_1b,
1056                                 sb_data_e2.common.state);
1057                 } else {
1058                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1059                                 sb_data_e1x.common.p_func.pf_id,
1060                                 sb_data_e1x.common.p_func.vf_id,
1061                                 sb_data_e1x.common.p_func.vf_valid,
1062                                 sb_data_e1x.common.p_func.vnic_id,
1063                                 sb_data_e1x.common.same_igu_sb_1b,
1064                                 sb_data_e1x.common.state);
1065                 }
1066 
1067                 /* SB_SMs data */
1068                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1069                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1070                                 j, hc_sm_p[j].__flags,
1071                                 hc_sm_p[j].igu_sb_id,
1072                                 hc_sm_p[j].igu_seg_id,
1073                                 hc_sm_p[j].time_to_expire,
1074                                 hc_sm_p[j].timer_value);
1075                 }
1076 
1077                 /* Indices data */
1078                 for (j = 0; j < loop; j++) {
1079                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1080                                hc_index_p[j].flags,
1081                                hc_index_p[j].timeout);
1082                 }
1083         }
1084 
1085 #ifdef BNX2X_STOP_ON_ERROR
1086         if (IS_PF(bp)) {
1087                 /* event queue */
1088                 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1089                 for (i = 0; i < NUM_EQ_DESC; i++) {
1090                         u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1091 
1092                         BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1093                                   i, bp->eq_ring[i].message.opcode,
1094                                   bp->eq_ring[i].message.error);
1095                         BNX2X_ERR("data: %x %x %x\n",
1096                                   data[0], data[1], data[2]);
1097                 }
1098         }
1099 
1100         /* Rings */
1101         /* Rx */
1102         for_each_valid_rx_queue(bp, i) {
1103                 struct bnx2x_fastpath *fp = &bp->fp[i];
1104 
1105                 if (!bp->fp)
1106                         break;
1107 
1108                 if (!fp->rx_cons_sb)
1109                         continue;
1110 
1111                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1112                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1113                 for (j = start; j != end; j = RX_BD(j + 1)) {
1114                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1115                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1116 
1117                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1118                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1119                 }
1120 
1121                 start = RX_SGE(fp->rx_sge_prod);
1122                 end = RX_SGE(fp->last_max_sge);
1123                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1124                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1125                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1126 
1127                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1128                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1129                 }
1130 
1131                 start = RCQ_BD(fp->rx_comp_cons - 10);
1132                 end = RCQ_BD(fp->rx_comp_cons + 503);
1133                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1134                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1135 
1136                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1137                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1138                 }
1139         }
1140 
1141         /* Tx */
1142         for_each_valid_tx_queue(bp, i) {
1143                 struct bnx2x_fastpath *fp = &bp->fp[i];
1144 
1145                 if (!bp->fp)
1146                         break;
1147 
1148                 for_each_cos_in_tx_queue(fp, cos) {
1149                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1150 
1151                         if (!fp->txdata_ptr[cos])
1152                                 break;
1153 
1154                         if (!txdata->tx_cons_sb)
1155                                 continue;
1156 
1157                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1158                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1159                         for (j = start; j != end; j = TX_BD(j + 1)) {
1160                                 struct sw_tx_bd *sw_bd =
1161                                         &txdata->tx_buf_ring[j];
1162 
1163                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1164                                           i, cos, j, sw_bd->skb,
1165                                           sw_bd->first_bd);
1166                         }
1167 
1168                         start = TX_BD(txdata->tx_bd_cons - 10);
1169                         end = TX_BD(txdata->tx_bd_cons + 254);
1170                         for (j = start; j != end; j = TX_BD(j + 1)) {
1171                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1172 
1173                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1174                                           i, cos, j, tx_bd[0], tx_bd[1],
1175                                           tx_bd[2], tx_bd[3]);
1176                         }
1177                 }
1178         }
1179 #endif
1180         if (IS_PF(bp)) {
1181                 bnx2x_fw_dump(bp);
1182                 bnx2x_mc_assert(bp);
1183         }
1184         BNX2X_ERR("end crash dump -----------------\n");
1185 }
1186 
1187 /*
1188  * FLR Support for E2
1189  *
1190  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1191  * initialization.
1192  */
1193 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1194 #define FLR_WAIT_INTERVAL       50      /* usec */
1195 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1196 
1197 struct pbf_pN_buf_regs {
1198         int pN;
1199         u32 init_crd;
1200         u32 crd;
1201         u32 crd_freed;
1202 };
1203 
1204 struct pbf_pN_cmd_regs {
1205         int pN;
1206         u32 lines_occup;
1207         u32 lines_freed;
1208 };
1209 
1210 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1211                                      struct pbf_pN_buf_regs *regs,
1212                                      u32 poll_count)
1213 {
1214         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1215         u32 cur_cnt = poll_count;
1216 
1217         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1218         crd = crd_start = REG_RD(bp, regs->crd);
1219         init_crd = REG_RD(bp, regs->init_crd);
1220 
1221         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1222         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1223         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1224 
1225         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1226                (init_crd - crd_start))) {
1227                 if (cur_cnt--) {
1228                         udelay(FLR_WAIT_INTERVAL);
1229                         crd = REG_RD(bp, regs->crd);
1230                         crd_freed = REG_RD(bp, regs->crd_freed);
1231                 } else {
1232                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1233                            regs->pN);
1234                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1235                            regs->pN, crd);
1236                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1237                            regs->pN, crd_freed);
1238                         break;
1239                 }
1240         }
1241         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1242            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1243 }
1244 
1245 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1246                                      struct pbf_pN_cmd_regs *regs,
1247                                      u32 poll_count)
1248 {
1249         u32 occup, to_free, freed, freed_start;
1250         u32 cur_cnt = poll_count;
1251 
1252         occup = to_free = REG_RD(bp, regs->lines_occup);
1253         freed = freed_start = REG_RD(bp, regs->lines_freed);
1254 
1255         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1256         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1257 
1258         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1259                 if (cur_cnt--) {
1260                         udelay(FLR_WAIT_INTERVAL);
1261                         occup = REG_RD(bp, regs->lines_occup);
1262                         freed = REG_RD(bp, regs->lines_freed);
1263                 } else {
1264                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1265                            regs->pN);
1266                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1267                            regs->pN, occup);
1268                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1269                            regs->pN, freed);
1270                         break;
1271                 }
1272         }
1273         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1274            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1275 }
1276 
1277 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1278                                     u32 expected, u32 poll_count)
1279 {
1280         u32 cur_cnt = poll_count;
1281         u32 val;
1282 
1283         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1284                 udelay(FLR_WAIT_INTERVAL);
1285 
1286         return val;
1287 }
1288 
1289 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1290                                     char *msg, u32 poll_cnt)
1291 {
1292         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1293         if (val != 0) {
1294                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1295                 return 1;
1296         }
1297         return 0;
1298 }
1299 
1300 /* Common routines with VF FLR cleanup */
1301 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1302 {
1303         /* adjust polling timeout */
1304         if (CHIP_REV_IS_EMUL(bp))
1305                 return FLR_POLL_CNT * 2000;
1306 
1307         if (CHIP_REV_IS_FPGA(bp))
1308                 return FLR_POLL_CNT * 120;
1309 
1310         return FLR_POLL_CNT;
1311 }
1312 
1313 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1314 {
1315         struct pbf_pN_cmd_regs cmd_regs[] = {
1316                 {0, (CHIP_IS_E3B0(bp)) ?
1317                         PBF_REG_TQ_OCCUPANCY_Q0 :
1318                         PBF_REG_P0_TQ_OCCUPANCY,
1319                     (CHIP_IS_E3B0(bp)) ?
1320                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1321                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1322                 {1, (CHIP_IS_E3B0(bp)) ?
1323                         PBF_REG_TQ_OCCUPANCY_Q1 :
1324                         PBF_REG_P1_TQ_OCCUPANCY,
1325                     (CHIP_IS_E3B0(bp)) ?
1326                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1327                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1328                 {4, (CHIP_IS_E3B0(bp)) ?
1329                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1330                         PBF_REG_P4_TQ_OCCUPANCY,
1331                     (CHIP_IS_E3B0(bp)) ?
1332                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1333                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1334         };
1335 
1336         struct pbf_pN_buf_regs buf_regs[] = {
1337                 {0, (CHIP_IS_E3B0(bp)) ?
1338                         PBF_REG_INIT_CRD_Q0 :
1339                         PBF_REG_P0_INIT_CRD ,
1340                     (CHIP_IS_E3B0(bp)) ?
1341                         PBF_REG_CREDIT_Q0 :
1342                         PBF_REG_P0_CREDIT,
1343                     (CHIP_IS_E3B0(bp)) ?
1344                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1345                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1346                 {1, (CHIP_IS_E3B0(bp)) ?
1347                         PBF_REG_INIT_CRD_Q1 :
1348                         PBF_REG_P1_INIT_CRD,
1349                     (CHIP_IS_E3B0(bp)) ?
1350                         PBF_REG_CREDIT_Q1 :
1351                         PBF_REG_P1_CREDIT,
1352                     (CHIP_IS_E3B0(bp)) ?
1353                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1354                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1355                 {4, (CHIP_IS_E3B0(bp)) ?
1356                         PBF_REG_INIT_CRD_LB_Q :
1357                         PBF_REG_P4_INIT_CRD,
1358                     (CHIP_IS_E3B0(bp)) ?
1359                         PBF_REG_CREDIT_LB_Q :
1360                         PBF_REG_P4_CREDIT,
1361                     (CHIP_IS_E3B0(bp)) ?
1362                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1363                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1364         };
1365 
1366         int i;
1367 
1368         /* Verify the command queues are flushed P0, P1, P4 */
1369         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1370                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1371 
1372         /* Verify the transmission buffers are flushed P0, P1, P4 */
1373         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1374                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1375 }
1376 
1377 #define OP_GEN_PARAM(param) \
1378         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1379 
1380 #define OP_GEN_TYPE(type) \
1381         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1382 
1383 #define OP_GEN_AGG_VECT(index) \
1384         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1385 
1386 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1387 {
1388         u32 op_gen_command = 0;
1389         u32 comp_addr = BAR_CSTRORM_INTMEM +
1390                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1391         int ret = 0;
1392 
1393         if (REG_RD(bp, comp_addr)) {
1394                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1395                 return 1;
1396         }
1397 
1398         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1399         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1400         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1401         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1402 
1403         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1404         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1405 
1406         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1407                 BNX2X_ERR("FW final cleanup did not succeed\n");
1408                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1409                    (REG_RD(bp, comp_addr)));
1410                 bnx2x_panic();
1411                 return 1;
1412         }
1413         /* Zero completion for next FLR */
1414         REG_WR(bp, comp_addr, 0);
1415 
1416         return ret;
1417 }
1418 
1419 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1420 {
1421         u16 status;
1422 
1423         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1424         return status & PCI_EXP_DEVSTA_TRPND;
1425 }
1426 
1427 /* PF FLR specific routines
1428 */
1429 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1430 {
1431         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1432         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1433                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1434                         "CFC PF usage counter timed out",
1435                         poll_cnt))
1436                 return 1;
1437 
1438         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1439         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440                         DORQ_REG_PF_USAGE_CNT,
1441                         "DQ PF usage counter timed out",
1442                         poll_cnt))
1443                 return 1;
1444 
1445         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1446         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1448                         "QM PF usage counter timed out",
1449                         poll_cnt))
1450                 return 1;
1451 
1452         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1453         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1454                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1455                         "Timers VNIC usage counter timed out",
1456                         poll_cnt))
1457                 return 1;
1458         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1460                         "Timers NUM_SCANS usage counter timed out",
1461                         poll_cnt))
1462                 return 1;
1463 
1464         /* Wait DMAE PF usage counter to zero */
1465         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1466                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1467                         "DMAE command register timed out",
1468                         poll_cnt))
1469                 return 1;
1470 
1471         return 0;
1472 }
1473 
1474 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1475 {
1476         u32 val;
1477 
1478         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1479         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1480 
1481         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1482         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1483 
1484         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1485         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1486 
1487         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1488         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1489 
1490         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1491         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1492 
1493         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1494         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1495 
1496         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1497         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1498 
1499         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1500         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1501            val);
1502 }
1503 
1504 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1505 {
1506         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1507 
1508         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1509 
1510         /* Re-enable PF target read access */
1511         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1512 
1513         /* Poll HW usage counters */
1514         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1515         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1516                 return -EBUSY;
1517 
1518         /* Zero the igu 'trailing edge' and 'leading edge' */
1519 
1520         /* Send the FW cleanup command */
1521         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1522                 return -EBUSY;
1523 
1524         /* ATC cleanup */
1525 
1526         /* Verify TX hw is flushed */
1527         bnx2x_tx_hw_flushed(bp, poll_cnt);
1528 
1529         /* Wait 100ms (not adjusted according to platform) */
1530         msleep(100);
1531 
1532         /* Verify no pending pci transactions */
1533         if (bnx2x_is_pcie_pending(bp->pdev))
1534                 BNX2X_ERR("PCIE Transactions still pending\n");
1535 
1536         /* Debug */
1537         bnx2x_hw_enable_status(bp);
1538 
1539         /*
1540          * Master enable - Due to WB DMAE writes performed before this
1541          * register is re-initialized as part of the regular function init
1542          */
1543         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1544 
1545         return 0;
1546 }
1547 
1548 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1549 {
1550         int port = BP_PORT(bp);
1551         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1552         u32 val = REG_RD(bp, addr);
1553         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1554         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1555         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1556 
1557         if (msix) {
1558                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1559                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1560                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1561                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1562                 if (single_msix)
1563                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1564         } else if (msi) {
1565                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1566                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1567                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1569         } else {
1570                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1571                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1572                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1573                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1574 
1575                 if (!CHIP_IS_E1(bp)) {
1576                         DP(NETIF_MSG_IFUP,
1577                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1578 
1579                         REG_WR(bp, addr, val);
1580 
1581                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1582                 }
1583         }
1584 
1585         if (CHIP_IS_E1(bp))
1586                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1587 
1588         DP(NETIF_MSG_IFUP,
1589            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1590            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1591 
1592         REG_WR(bp, addr, val);
1593         /*
1594          * Ensure that HC_CONFIG is written before leading/trailing edge config
1595          */
1596         mmiowb();
1597         barrier();
1598 
1599         if (!CHIP_IS_E1(bp)) {
1600                 /* init leading/trailing edge */
1601                 if (IS_MF(bp)) {
1602                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1603                         if (bp->port.pmf)
1604                                 /* enable nig and gpio3 attention */
1605                                 val |= 0x1100;
1606                 } else
1607                         val = 0xffff;
1608 
1609                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1610                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1611         }
1612 
1613         /* Make sure that interrupts are indeed enabled from here on */
1614         mmiowb();
1615 }
1616 
1617 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1618 {
1619         u32 val;
1620         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1621         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1622         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1623 
1624         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1625 
1626         if (msix) {
1627                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1628                          IGU_PF_CONF_SINGLE_ISR_EN);
1629                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1630                         IGU_PF_CONF_ATTN_BIT_EN);
1631 
1632                 if (single_msix)
1633                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1634         } else if (msi) {
1635                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1636                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1637                         IGU_PF_CONF_ATTN_BIT_EN |
1638                         IGU_PF_CONF_SINGLE_ISR_EN);
1639         } else {
1640                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1641                 val |= (IGU_PF_CONF_INT_LINE_EN |
1642                         IGU_PF_CONF_ATTN_BIT_EN |
1643                         IGU_PF_CONF_SINGLE_ISR_EN);
1644         }
1645 
1646         /* Clean previous status - need to configure igu prior to ack*/
1647         if ((!msix) || single_msix) {
1648                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1649                 bnx2x_ack_int(bp);
1650         }
1651 
1652         val |= IGU_PF_CONF_FUNC_EN;
1653 
1654         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1655            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1656 
1657         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1658 
1659         if (val & IGU_PF_CONF_INT_LINE_EN)
1660                 pci_intx(bp->pdev, true);
1661 
1662         barrier();
1663 
1664         /* init leading/trailing edge */
1665         if (IS_MF(bp)) {
1666                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1667                 if (bp->port.pmf)
1668                         /* enable nig and gpio3 attention */
1669                         val |= 0x1100;
1670         } else
1671                 val = 0xffff;
1672 
1673         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1674         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1675 
1676         /* Make sure that interrupts are indeed enabled from here on */
1677         mmiowb();
1678 }
1679 
1680 void bnx2x_int_enable(struct bnx2x *bp)
1681 {
1682         if (bp->common.int_block == INT_BLOCK_HC)
1683                 bnx2x_hc_int_enable(bp);
1684         else
1685                 bnx2x_igu_int_enable(bp);
1686 }
1687 
1688 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1689 {
1690         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1691         int i, offset;
1692 
1693         if (disable_hw)
1694                 /* prevent the HW from sending interrupts */
1695                 bnx2x_int_disable(bp);
1696 
1697         /* make sure all ISRs are done */
1698         if (msix) {
1699                 synchronize_irq(bp->msix_table[0].vector);
1700                 offset = 1;
1701                 if (CNIC_SUPPORT(bp))
1702                         offset++;
1703                 for_each_eth_queue(bp, i)
1704                         synchronize_irq(bp->msix_table[offset++].vector);
1705         } else
1706                 synchronize_irq(bp->pdev->irq);
1707 
1708         /* make sure sp_task is not running */
1709         cancel_delayed_work(&bp->sp_task);
1710         cancel_delayed_work(&bp->period_task);
1711         flush_workqueue(bnx2x_wq);
1712 }
1713 
1714 /* fast path */
1715 
1716 /*
1717  * General service functions
1718  */
1719 
1720 /* Return true if succeeded to acquire the lock */
1721 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1722 {
1723         u32 lock_status;
1724         u32 resource_bit = (1 << resource);
1725         int func = BP_FUNC(bp);
1726         u32 hw_lock_control_reg;
1727 
1728         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1729            "Trying to take a lock on resource %d\n", resource);
1730 
1731         /* Validating that the resource is within range */
1732         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1733                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1734                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1735                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1736                 return false;
1737         }
1738 
1739         if (func <= 5)
1740                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1741         else
1742                 hw_lock_control_reg =
1743                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1744 
1745         /* Try to acquire the lock */
1746         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1747         lock_status = REG_RD(bp, hw_lock_control_reg);
1748         if (lock_status & resource_bit)
1749                 return true;
1750 
1751         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1752            "Failed to get a lock on resource %d\n", resource);
1753         return false;
1754 }
1755 
1756 /**
1757  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1758  *
1759  * @bp: driver handle
1760  *
1761  * Returns the recovery leader resource id according to the engine this function
1762  * belongs to. Currently only only 2 engines is supported.
1763  */
1764 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1765 {
1766         if (BP_PATH(bp))
1767                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1768         else
1769                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1770 }
1771 
1772 /**
1773  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1774  *
1775  * @bp: driver handle
1776  *
1777  * Tries to acquire a leader lock for current engine.
1778  */
1779 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1780 {
1781         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1782 }
1783 
1784 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1785 
1786 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1787 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1788 {
1789         /* Set the interrupt occurred bit for the sp-task to recognize it
1790          * must ack the interrupt and transition according to the IGU
1791          * state machine.
1792          */
1793         atomic_set(&bp->interrupt_occurred, 1);
1794 
1795         /* The sp_task must execute only after this bit
1796          * is set, otherwise we will get out of sync and miss all
1797          * further interrupts. Hence, the barrier.
1798          */
1799         smp_wmb();
1800 
1801         /* schedule sp_task to workqueue */
1802         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1803 }
1804 
1805 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1806 {
1807         struct bnx2x *bp = fp->bp;
1808         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1810         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1811         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1812 
1813         DP(BNX2X_MSG_SP,
1814            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1815            fp->index, cid, command, bp->state,
1816            rr_cqe->ramrod_cqe.ramrod_type);
1817 
1818         /* If cid is within VF range, replace the slowpath object with the
1819          * one corresponding to this VF
1820          */
1821         if (cid >= BNX2X_FIRST_VF_CID  &&
1822             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1823                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1824 
1825         switch (command) {
1826         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1827                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1828                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1829                 break;
1830 
1831         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1832                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1833                 drv_cmd = BNX2X_Q_CMD_SETUP;
1834                 break;
1835 
1836         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1837                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1838                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1839                 break;
1840 
1841         case (RAMROD_CMD_ID_ETH_HALT):
1842                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1843                 drv_cmd = BNX2X_Q_CMD_HALT;
1844                 break;
1845 
1846         case (RAMROD_CMD_ID_ETH_TERMINATE):
1847                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1848                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1849                 break;
1850 
1851         case (RAMROD_CMD_ID_ETH_EMPTY):
1852                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1853                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1854                 break;
1855 
1856         case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1857                 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1858                 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1859                 break;
1860 
1861         default:
1862                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1863                           command, fp->index);
1864                 return;
1865         }
1866 
1867         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1868             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1869                 /* q_obj->complete_cmd() failure means that this was
1870                  * an unexpected completion.
1871                  *
1872                  * In this case we don't want to increase the bp->spq_left
1873                  * because apparently we haven't sent this command the first
1874                  * place.
1875                  */
1876 #ifdef BNX2X_STOP_ON_ERROR
1877                 bnx2x_panic();
1878 #else
1879                 return;
1880 #endif
1881 
1882         smp_mb__before_atomic();
1883         atomic_inc(&bp->cq_spq_left);
1884         /* push the change in bp->spq_left and towards the memory */
1885         smp_mb__after_atomic();
1886 
1887         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1888 
1889         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1890             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1891                 /* if Q update ramrod is completed for last Q in AFEX vif set
1892                  * flow, then ACK MCP at the end
1893                  *
1894                  * mark pending ACK to MCP bit.
1895                  * prevent case that both bits are cleared.
1896                  * At the end of load/unload driver checks that
1897                  * sp_state is cleared, and this order prevents
1898                  * races
1899                  */
1900                 smp_mb__before_atomic();
1901                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1902                 wmb();
1903                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1904                 smp_mb__after_atomic();
1905 
1906                 /* schedule the sp task as mcp ack is required */
1907                 bnx2x_schedule_sp_task(bp);
1908         }
1909 
1910         return;
1911 }
1912 
1913 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1914 {
1915         struct bnx2x *bp = netdev_priv(dev_instance);
1916         u16 status = bnx2x_ack_int(bp);
1917         u16 mask;
1918         int i;
1919         u8 cos;
1920 
1921         /* Return here if interrupt is shared and it's not for us */
1922         if (unlikely(status == 0)) {
1923                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1924                 return IRQ_NONE;
1925         }
1926         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1927 
1928 #ifdef BNX2X_STOP_ON_ERROR
1929         if (unlikely(bp->panic))
1930                 return IRQ_HANDLED;
1931 #endif
1932 
1933         for_each_eth_queue(bp, i) {
1934                 struct bnx2x_fastpath *fp = &bp->fp[i];
1935 
1936                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1937                 if (status & mask) {
1938                         /* Handle Rx or Tx according to SB id */
1939                         for_each_cos_in_tx_queue(fp, cos)
1940                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1941                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1942                         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1943                         status &= ~mask;
1944                 }
1945         }
1946 
1947         if (CNIC_SUPPORT(bp)) {
1948                 mask = 0x2;
1949                 if (status & (mask | 0x1)) {
1950                         struct cnic_ops *c_ops = NULL;
1951 
1952                         rcu_read_lock();
1953                         c_ops = rcu_dereference(bp->cnic_ops);
1954                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1955                                       CNIC_DRV_STATE_HANDLES_IRQ))
1956                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1957                         rcu_read_unlock();
1958 
1959                         status &= ~mask;
1960                 }
1961         }
1962 
1963         if (unlikely(status & 0x1)) {
1964 
1965                 /* schedule sp task to perform default status block work, ack
1966                  * attentions and enable interrupts.
1967                  */
1968                 bnx2x_schedule_sp_task(bp);
1969 
1970                 status &= ~0x1;
1971                 if (!status)
1972                         return IRQ_HANDLED;
1973         }
1974 
1975         if (unlikely(status))
1976                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1977                    status);
1978 
1979         return IRQ_HANDLED;
1980 }
1981 
1982 /* Link */
1983 
1984 /*
1985  * General service functions
1986  */
1987 
1988 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1989 {
1990         u32 lock_status;
1991         u32 resource_bit = (1 << resource);
1992         int func = BP_FUNC(bp);
1993         u32 hw_lock_control_reg;
1994         int cnt;
1995 
1996         /* Validating that the resource is within range */
1997         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1998                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1999                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2000                 return -EINVAL;
2001         }
2002 
2003         if (func <= 5) {
2004                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2005         } else {
2006                 hw_lock_control_reg =
2007                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2008         }
2009 
2010         /* Validating that the resource is not already taken */
2011         lock_status = REG_RD(bp, hw_lock_control_reg);
2012         if (lock_status & resource_bit) {
2013                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
2014                    lock_status, resource_bit);
2015                 return -EEXIST;
2016         }
2017 
2018         /* Try for 5 second every 5ms */
2019         for (cnt = 0; cnt < 1000; cnt++) {
2020                 /* Try to acquire the lock */
2021                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2022                 lock_status = REG_RD(bp, hw_lock_control_reg);
2023                 if (lock_status & resource_bit)
2024                         return 0;
2025 
2026                 usleep_range(5000, 10000);
2027         }
2028         BNX2X_ERR("Timeout\n");
2029         return -EAGAIN;
2030 }
2031 
2032 int bnx2x_release_leader_lock(struct bnx2x *bp)
2033 {
2034         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2035 }
2036 
2037 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2038 {
2039         u32 lock_status;
2040         u32 resource_bit = (1 << resource);
2041         int func = BP_FUNC(bp);
2042         u32 hw_lock_control_reg;
2043 
2044         /* Validating that the resource is within range */
2045         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2046                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2047                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2048                 return -EINVAL;
2049         }
2050 
2051         if (func <= 5) {
2052                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2053         } else {
2054                 hw_lock_control_reg =
2055                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2056         }
2057 
2058         /* Validating that the resource is currently taken */
2059         lock_status = REG_RD(bp, hw_lock_control_reg);
2060         if (!(lock_status & resource_bit)) {
2061                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2062                           lock_status, resource_bit);
2063                 return -EFAULT;
2064         }
2065 
2066         REG_WR(bp, hw_lock_control_reg, resource_bit);
2067         return 0;
2068 }
2069 
2070 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2071 {
2072         /* The GPIO should be swapped if swap register is set and active */
2073         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2074                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2075         int gpio_shift = gpio_num +
2076                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2077         u32 gpio_mask = (1 << gpio_shift);
2078         u32 gpio_reg;
2079         int value;
2080 
2081         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2082                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2083                 return -EINVAL;
2084         }
2085 
2086         /* read GPIO value */
2087         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2088 
2089         /* get the requested pin value */
2090         if ((gpio_reg & gpio_mask) == gpio_mask)
2091                 value = 1;
2092         else
2093                 value = 0;
2094 
2095         return value;
2096 }
2097 
2098 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2099 {
2100         /* The GPIO should be swapped if swap register is set and active */
2101         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2102                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2103         int gpio_shift = gpio_num +
2104                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2105         u32 gpio_mask = (1 << gpio_shift);
2106         u32 gpio_reg;
2107 
2108         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2109                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2110                 return -EINVAL;
2111         }
2112 
2113         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2114         /* read GPIO and mask except the float bits */
2115         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2116 
2117         switch (mode) {
2118         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2119                 DP(NETIF_MSG_LINK,
2120                    "Set GPIO %d (shift %d) -> output low\n",
2121                    gpio_num, gpio_shift);
2122                 /* clear FLOAT and set CLR */
2123                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2124                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2125                 break;
2126 
2127         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2128                 DP(NETIF_MSG_LINK,
2129                    "Set GPIO %d (shift %d) -> output high\n",
2130                    gpio_num, gpio_shift);
2131                 /* clear FLOAT and set SET */
2132                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2133                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2134                 break;
2135 
2136         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2137                 DP(NETIF_MSG_LINK,
2138                    "Set GPIO %d (shift %d) -> input\n",
2139                    gpio_num, gpio_shift);
2140                 /* set FLOAT */
2141                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2142                 break;
2143 
2144         default:
2145                 break;
2146         }
2147 
2148         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2149         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2150 
2151         return 0;
2152 }
2153 
2154 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2155 {
2156         u32 gpio_reg = 0;
2157         int rc = 0;
2158 
2159         /* Any port swapping should be handled by caller. */
2160 
2161         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2162         /* read GPIO and mask except the float bits */
2163         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2164         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2165         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2166         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2167 
2168         switch (mode) {
2169         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2170                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2171                 /* set CLR */
2172                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2173                 break;
2174 
2175         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2176                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2177                 /* set SET */
2178                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2179                 break;
2180 
2181         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2182                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2183                 /* set FLOAT */
2184                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2185                 break;
2186 
2187         default:
2188                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2189                 rc = -EINVAL;
2190                 break;
2191         }
2192 
2193         if (rc == 0)
2194                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2195 
2196         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2197 
2198         return rc;
2199 }
2200 
2201 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2202 {
2203         /* The GPIO should be swapped if swap register is set and active */
2204         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2205                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2206         int gpio_shift = gpio_num +
2207                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2208         u32 gpio_mask = (1 << gpio_shift);
2209         u32 gpio_reg;
2210 
2211         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2212                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2213                 return -EINVAL;
2214         }
2215 
2216         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2217         /* read GPIO int */
2218         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2219 
2220         switch (mode) {
2221         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2222                 DP(NETIF_MSG_LINK,
2223                    "Clear GPIO INT %d (shift %d) -> output low\n",
2224                    gpio_num, gpio_shift);
2225                 /* clear SET and set CLR */
2226                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2227                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2228                 break;
2229 
2230         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2231                 DP(NETIF_MSG_LINK,
2232                    "Set GPIO INT %d (shift %d) -> output high\n",
2233                    gpio_num, gpio_shift);
2234                 /* clear CLR and set SET */
2235                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2236                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2237                 break;
2238 
2239         default:
2240                 break;
2241         }
2242 
2243         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2244         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2245 
2246         return 0;
2247 }
2248 
2249 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2250 {
2251         u32 spio_reg;
2252 
2253         /* Only 2 SPIOs are configurable */
2254         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2255                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2256                 return -EINVAL;
2257         }
2258 
2259         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2260         /* read SPIO and mask except the float bits */
2261         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2262 
2263         switch (mode) {
2264         case MISC_SPIO_OUTPUT_LOW:
2265                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2266                 /* clear FLOAT and set CLR */
2267                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2269                 break;
2270 
2271         case MISC_SPIO_OUTPUT_HIGH:
2272                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2273                 /* clear FLOAT and set SET */
2274                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2275                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2276                 break;
2277 
2278         case MISC_SPIO_INPUT_HI_Z:
2279                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2280                 /* set FLOAT */
2281                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2282                 break;
2283 
2284         default:
2285                 break;
2286         }
2287 
2288         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2289         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2290 
2291         return 0;
2292 }
2293 
2294 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2295 {
2296         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2297 
2298         bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2299                                            ADVERTISED_Pause);
2300         switch (bp->link_vars.ieee_fc &
2301                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2302         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2303                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2304                                                   ADVERTISED_Pause);
2305                 break;
2306 
2307         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2308                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2309                 break;
2310 
2311         default:
2312                 break;
2313         }
2314 }
2315 
2316 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2317 {
2318         /* Initialize link parameters structure variables
2319          * It is recommended to turn off RX FC for jumbo frames
2320          *  for better performance
2321          */
2322         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2323                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2324         else
2325                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2326 }
2327 
2328 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2329 {
2330         u32 pause_enabled = 0;
2331 
2332         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2333                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2334                         pause_enabled = 1;
2335 
2336                 REG_WR(bp, BAR_USTRORM_INTMEM +
2337                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2338                        pause_enabled);
2339         }
2340 
2341         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2342            pause_enabled ? "enabled" : "disabled");
2343 }
2344 
2345 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2346 {
2347         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2348         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2349 
2350         if (!BP_NOMCP(bp)) {
2351                 bnx2x_set_requested_fc(bp);
2352                 bnx2x_acquire_phy_lock(bp);
2353 
2354                 if (load_mode == LOAD_DIAG) {
2355                         struct link_params *lp = &bp->link_params;
2356                         lp->loopback_mode = LOOPBACK_XGXS;
2357                         /* Prefer doing PHY loopback at highest speed */
2358                         if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2359                                 if (lp->speed_cap_mask[cfx_idx] &
2360                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2361                                         lp->req_line_speed[cfx_idx] =
2362                                         SPEED_20000;
2363                                 else if (lp->speed_cap_mask[cfx_idx] &
2364                                             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2365                                                 lp->req_line_speed[cfx_idx] =
2366                                                 SPEED_10000;
2367                                 else
2368                                         lp->req_line_speed[cfx_idx] =
2369                                         SPEED_1000;
2370                         }
2371                 }
2372 
2373                 if (load_mode == LOAD_LOOPBACK_EXT) {
2374                         struct link_params *lp = &bp->link_params;
2375                         lp->loopback_mode = LOOPBACK_EXT;
2376                 }
2377 
2378                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2379 
2380                 bnx2x_release_phy_lock(bp);
2381 
2382                 bnx2x_init_dropless_fc(bp);
2383 
2384                 bnx2x_calc_fc_adv(bp);
2385 
2386                 if (bp->link_vars.link_up) {
2387                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2388                         bnx2x_link_report(bp);
2389                 }
2390                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2391                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2392                 return rc;
2393         }
2394         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2395         return -EINVAL;
2396 }
2397 
2398 void bnx2x_link_set(struct bnx2x *bp)
2399 {
2400         if (!BP_NOMCP(bp)) {
2401                 bnx2x_acquire_phy_lock(bp);
2402                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2403                 bnx2x_release_phy_lock(bp);
2404 
2405                 bnx2x_init_dropless_fc(bp);
2406 
2407                 bnx2x_calc_fc_adv(bp);
2408         } else
2409                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2410 }
2411 
2412 static void bnx2x__link_reset(struct bnx2x *bp)
2413 {
2414         if (!BP_NOMCP(bp)) {
2415                 bnx2x_acquire_phy_lock(bp);
2416                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2417                 bnx2x_release_phy_lock(bp);
2418         } else
2419                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2420 }
2421 
2422 void bnx2x_force_link_reset(struct bnx2x *bp)
2423 {
2424         bnx2x_acquire_phy_lock(bp);
2425         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2426         bnx2x_release_phy_lock(bp);
2427 }
2428 
2429 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2430 {
2431         u8 rc = 0;
2432 
2433         if (!BP_NOMCP(bp)) {
2434                 bnx2x_acquire_phy_lock(bp);
2435                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2436                                      is_serdes);
2437                 bnx2x_release_phy_lock(bp);
2438         } else
2439                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2440 
2441         return rc;
2442 }
2443 
2444 /* Calculates the sum of vn_min_rates.
2445    It's needed for further normalizing of the min_rates.
2446    Returns:
2447      sum of vn_min_rates.
2448        or
2449      0 - if all the min_rates are 0.
2450      In the later case fairness algorithm should be deactivated.
2451      If not all min_rates are zero then those that are zeroes will be set to 1.
2452  */
2453 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2454                                       struct cmng_init_input *input)
2455 {
2456         int all_zero = 1;
2457         int vn;
2458 
2459         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2460                 u32 vn_cfg = bp->mf_config[vn];
2461                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2462                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2463 
2464                 /* Skip hidden vns */
2465                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2466                         vn_min_rate = 0;
2467                 /* If min rate is zero - set it to 1 */
2468                 else if (!vn_min_rate)
2469                         vn_min_rate = DEF_MIN_RATE;
2470                 else
2471                         all_zero = 0;
2472 
2473                 input->vnic_min_rate[vn] = vn_min_rate;
2474         }
2475 
2476         /* if ETS or all min rates are zeros - disable fairness */
2477         if (BNX2X_IS_ETS_ENABLED(bp)) {
2478                 input->flags.cmng_enables &=
2479                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2480                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2481         } else if (all_zero) {
2482                 input->flags.cmng_enables &=
2483                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2484                 DP(NETIF_MSG_IFUP,
2485                    "All MIN values are zeroes fairness will be disabled\n");
2486         } else
2487                 input->flags.cmng_enables |=
2488                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2489 }
2490 
2491 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2492                                     struct cmng_init_input *input)
2493 {
2494         u16 vn_max_rate;
2495         u32 vn_cfg = bp->mf_config[vn];
2496 
2497         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2498                 vn_max_rate = 0;
2499         else {
2500                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2501 
2502                 if (IS_MF_PERCENT_BW(bp)) {
2503                         /* maxCfg in percents of linkspeed */
2504                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2505                 } else /* SD modes */
2506                         /* maxCfg is absolute in 100Mb units */
2507                         vn_max_rate = maxCfg * 100;
2508         }
2509 
2510         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2511 
2512         input->vnic_max_rate[vn] = vn_max_rate;
2513 }
2514 
2515 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2516 {
2517         if (CHIP_REV_IS_SLOW(bp))
2518                 return CMNG_FNS_NONE;
2519         if (IS_MF(bp))
2520                 return CMNG_FNS_MINMAX;
2521 
2522         return CMNG_FNS_NONE;
2523 }
2524 
2525 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2526 {
2527         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2528 
2529         if (BP_NOMCP(bp))
2530                 return; /* what should be the default value in this case */
2531 
2532         /* For 2 port configuration the absolute function number formula
2533          * is:
2534          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2535          *
2536          *      and there are 4 functions per port
2537          *
2538          * For 4 port configuration it is
2539          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2540          *
2541          *      and there are 2 functions per port
2542          */
2543         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2544                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2545 
2546                 if (func >= E1H_FUNC_MAX)
2547                         break;
2548 
2549                 bp->mf_config[vn] =
2550                         MF_CFG_RD(bp, func_mf_config[func].config);
2551         }
2552         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2553                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2554                 bp->flags |= MF_FUNC_DIS;
2555         } else {
2556                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2557                 bp->flags &= ~MF_FUNC_DIS;
2558         }
2559 }
2560 
2561 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2562 {
2563         struct cmng_init_input input;
2564         memset(&input, 0, sizeof(struct cmng_init_input));
2565 
2566         input.port_rate = bp->link_vars.line_speed;
2567 
2568         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2569                 int vn;
2570 
2571                 /* read mf conf from shmem */
2572                 if (read_cfg)
2573                         bnx2x_read_mf_cfg(bp);
2574 
2575                 /* vn_weight_sum and enable fairness if not 0 */
2576                 bnx2x_calc_vn_min(bp, &input);
2577 
2578                 /* calculate and set min-max rate for each vn */
2579                 if (bp->port.pmf)
2580                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2581                                 bnx2x_calc_vn_max(bp, vn, &input);
2582 
2583                 /* always enable rate shaping and fairness */
2584                 input.flags.cmng_enables |=
2585                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2586 
2587                 bnx2x_init_cmng(&input, &bp->cmng);
2588                 return;
2589         }
2590 
2591         /* rate shaping and fairness are disabled */
2592         DP(NETIF_MSG_IFUP,
2593            "rate shaping and fairness are disabled\n");
2594 }
2595 
2596 static void storm_memset_cmng(struct bnx2x *bp,
2597                               struct cmng_init *cmng,
2598                               u8 port)
2599 {
2600         int vn;
2601         size_t size = sizeof(struct cmng_struct_per_port);
2602 
2603         u32 addr = BAR_XSTRORM_INTMEM +
2604                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2605 
2606         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2607 
2608         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2609                 int func = func_by_vn(bp, vn);
2610 
2611                 addr = BAR_XSTRORM_INTMEM +
2612                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2613                 size = sizeof(struct rate_shaping_vars_per_vn);
2614                 __storm_memset_struct(bp, addr, size,
2615                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2616 
2617                 addr = BAR_XSTRORM_INTMEM +
2618                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2619                 size = sizeof(struct fairness_vars_per_vn);
2620                 __storm_memset_struct(bp, addr, size,
2621                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2622         }
2623 }
2624 
2625 /* init cmng mode in HW according to local configuration */
2626 void bnx2x_set_local_cmng(struct bnx2x *bp)
2627 {
2628         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2629 
2630         if (cmng_fns != CMNG_FNS_NONE) {
2631                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2632                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2633         } else {
2634                 /* rate shaping and fairness are disabled */
2635                 DP(NETIF_MSG_IFUP,
2636                    "single function mode without fairness\n");
2637         }
2638 }
2639 
2640 /* This function is called upon link interrupt */
2641 static void bnx2x_link_attn(struct bnx2x *bp)
2642 {
2643         /* Make sure that we are synced with the current statistics */
2644         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2645 
2646         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2647 
2648         bnx2x_init_dropless_fc(bp);
2649 
2650         if (bp->link_vars.link_up) {
2651 
2652                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2653                         struct host_port_stats *pstats;
2654 
2655                         pstats = bnx2x_sp(bp, port_stats);
2656                         /* reset old mac stats */
2657                         memset(&(pstats->mac_stx[0]), 0,
2658                                sizeof(struct mac_stx));
2659                 }
2660                 if (bp->state == BNX2X_STATE_OPEN)
2661                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2662         }
2663 
2664         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2665                 bnx2x_set_local_cmng(bp);
2666 
2667         __bnx2x_link_report(bp);
2668 
2669         if (IS_MF(bp))
2670                 bnx2x_link_sync_notify(bp);
2671 }
2672 
2673 void bnx2x__link_status_update(struct bnx2x *bp)
2674 {
2675         if (bp->state != BNX2X_STATE_OPEN)
2676                 return;
2677 
2678         /* read updated dcb configuration */
2679         if (IS_PF(bp)) {
2680                 bnx2x_dcbx_pmf_update(bp);
2681                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2682                 if (bp->link_vars.link_up)
2683                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2684                 else
2685                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2686                         /* indicate link status */
2687                 bnx2x_link_report(bp);
2688 
2689         } else { /* VF */
2690                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2691                                           SUPPORTED_10baseT_Full |
2692                                           SUPPORTED_100baseT_Half |
2693                                           SUPPORTED_100baseT_Full |
2694                                           SUPPORTED_1000baseT_Full |
2695                                           SUPPORTED_2500baseX_Full |
2696                                           SUPPORTED_10000baseT_Full |
2697                                           SUPPORTED_TP |
2698                                           SUPPORTED_FIBRE |
2699                                           SUPPORTED_Autoneg |
2700                                           SUPPORTED_Pause |
2701                                           SUPPORTED_Asym_Pause);
2702                 bp->port.advertising[0] = bp->port.supported[0];
2703 
2704                 bp->link_params.bp = bp;
2705                 bp->link_params.port = BP_PORT(bp);
2706                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2707                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2708                 bp->link_params.req_line_speed[0] = SPEED_10000;
2709                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2710                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2711                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2712                 bp->link_vars.line_speed = SPEED_10000;
2713                 bp->link_vars.link_status =
2714                         (LINK_STATUS_LINK_UP |
2715                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2716                 bp->link_vars.link_up = 1;
2717                 bp->link_vars.duplex = DUPLEX_FULL;
2718                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2719                 __bnx2x_link_report(bp);
2720 
2721                 bnx2x_sample_bulletin(bp);
2722 
2723                 /* if bulletin board did not have an update for link status
2724                  * __bnx2x_link_report will report current status
2725                  * but it will NOT duplicate report in case of already reported
2726                  * during sampling bulletin board.
2727                  */
2728                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2729         }
2730 }
2731 
2732 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2733                                   u16 vlan_val, u8 allowed_prio)
2734 {
2735         struct bnx2x_func_state_params func_params = {NULL};
2736         struct bnx2x_func_afex_update_params *f_update_params =
2737                 &func_params.params.afex_update;
2738 
2739         func_params.f_obj = &bp->func_obj;
2740         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2741 
2742         /* no need to wait for RAMROD completion, so don't
2743          * set RAMROD_COMP_WAIT flag
2744          */
2745 
2746         f_update_params->vif_id = vifid;
2747         f_update_params->afex_default_vlan = vlan_val;
2748         f_update_params->allowed_priorities = allowed_prio;
2749 
2750         /* if ramrod can not be sent, response to MCP immediately */
2751         if (bnx2x_func_state_change(bp, &func_params) < 0)
2752                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2753 
2754         return 0;
2755 }
2756 
2757 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2758                                           u16 vif_index, u8 func_bit_map)
2759 {
2760         struct bnx2x_func_state_params func_params = {NULL};
2761         struct bnx2x_func_afex_viflists_params *update_params =
2762                 &func_params.params.afex_viflists;
2763         int rc;
2764         u32 drv_msg_code;
2765 
2766         /* validate only LIST_SET and LIST_GET are received from switch */
2767         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2768                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2769                           cmd_type);
2770 
2771         func_params.f_obj = &bp->func_obj;
2772         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2773 
2774         /* set parameters according to cmd_type */
2775         update_params->afex_vif_list_command = cmd_type;
2776         update_params->vif_list_index = vif_index;
2777         update_params->func_bit_map =
2778                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2779         update_params->func_to_clear = 0;
2780         drv_msg_code =
2781                 (cmd_type == VIF_LIST_RULE_GET) ?
2782                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2783                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2784 
2785         /* if ramrod can not be sent, respond to MCP immediately for
2786          * SET and GET requests (other are not triggered from MCP)
2787          */
2788         rc = bnx2x_func_state_change(bp, &func_params);
2789         if (rc < 0)
2790                 bnx2x_fw_command(bp, drv_msg_code, 0);
2791 
2792         return 0;
2793 }
2794 
2795 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2796 {
2797         struct afex_stats afex_stats;
2798         u32 func = BP_ABS_FUNC(bp);
2799         u32 mf_config;
2800         u16 vlan_val;
2801         u32 vlan_prio;
2802         u16 vif_id;
2803         u8 allowed_prio;
2804         u8 vlan_mode;
2805         u32 addr_to_write, vifid, addrs, stats_type, i;
2806 
2807         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2808                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809                 DP(BNX2X_MSG_MCP,
2810                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2811                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2812         }
2813 
2814         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2815                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2816                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2817                 DP(BNX2X_MSG_MCP,
2818                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2819                    vifid, addrs);
2820                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2821                                                addrs);
2822         }
2823 
2824         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2825                 addr_to_write = SHMEM2_RD(bp,
2826                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2827                 stats_type = SHMEM2_RD(bp,
2828                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2829 
2830                 DP(BNX2X_MSG_MCP,
2831                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2832                    addr_to_write);
2833 
2834                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2835 
2836                 /* write response to scratchpad, for MCP */
2837                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2838                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2839                                *(((u32 *)(&afex_stats))+i));
2840 
2841                 /* send ack message to MCP */
2842                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2843         }
2844 
2845         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2846                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2847                 bp->mf_config[BP_VN(bp)] = mf_config;
2848                 DP(BNX2X_MSG_MCP,
2849                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2850                    mf_config);
2851 
2852                 /* if VIF_SET is "enabled" */
2853                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2854                         /* set rate limit directly to internal RAM */
2855                         struct cmng_init_input cmng_input;
2856                         struct rate_shaping_vars_per_vn m_rs_vn;
2857                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2858                         u32 addr = BAR_XSTRORM_INTMEM +
2859                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2860 
2861                         bp->mf_config[BP_VN(bp)] = mf_config;
2862 
2863                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2864                         m_rs_vn.vn_counter.rate =
2865                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2866                         m_rs_vn.vn_counter.quota =
2867                                 (m_rs_vn.vn_counter.rate *
2868                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2869 
2870                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2871 
2872                         /* read relevant values from mf_cfg struct in shmem */
2873                         vif_id =
2874                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2875                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2876                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2877                         vlan_val =
2878                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2879                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2880                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2881                         vlan_prio = (mf_config &
2882                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2883                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2884                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2885                         vlan_mode =
2886                                 (MF_CFG_RD(bp,
2887                                            func_mf_config[func].afex_config) &
2888                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2889                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2890                         allowed_prio =
2891                                 (MF_CFG_RD(bp,
2892                                            func_mf_config[func].afex_config) &
2893                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2894                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2895 
2896                         /* send ramrod to FW, return in case of failure */
2897                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2898                                                    allowed_prio))
2899                                 return;
2900 
2901                         bp->afex_def_vlan_tag = vlan_val;
2902                         bp->afex_vlan_mode = vlan_mode;
2903                 } else {
2904                         /* notify link down because BP->flags is disabled */
2905                         bnx2x_link_report(bp);
2906 
2907                         /* send INVALID VIF ramrod to FW */
2908                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2909 
2910                         /* Reset the default afex VLAN */
2911                         bp->afex_def_vlan_tag = -1;
2912                 }
2913         }
2914 }
2915 
2916 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2917 {
2918         struct bnx2x_func_switch_update_params *switch_update_params;
2919         struct bnx2x_func_state_params func_params;
2920 
2921         memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2922         switch_update_params = &func_params.params.switch_update;
2923         func_params.f_obj = &bp->func_obj;
2924         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2925 
2926         if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2927                 int func = BP_ABS_FUNC(bp);
2928                 u32 val;
2929 
2930                 /* Re-learn the S-tag from shmem */
2931                 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2932                                 FUNC_MF_CFG_E1HOV_TAG_MASK;
2933                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2934                         bp->mf_ov = val;
2935                 } else {
2936                         BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2937                         goto fail;
2938                 }
2939 
2940                 /* Configure new S-tag in LLH */
2941                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2942                        bp->mf_ov);
2943 
2944                 /* Send Ramrod to update FW of change */
2945                 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2946                           &switch_update_params->changes);
2947                 switch_update_params->vlan = bp->mf_ov;
2948 
2949                 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2950                         BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2951                                   bp->mf_ov);
2952                         goto fail;
2953                 } else {
2954                         DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2955                            bp->mf_ov);
2956                 }
2957         } else {
2958                 goto fail;
2959         }
2960 
2961         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2962         return;
2963 fail:
2964         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2965 }
2966 
2967 static void bnx2x_pmf_update(struct bnx2x *bp)
2968 {
2969         int port = BP_PORT(bp);
2970         u32 val;
2971 
2972         bp->port.pmf = 1;
2973         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2974 
2975         /*
2976          * We need the mb() to ensure the ordering between the writing to
2977          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2978          */
2979         smp_mb();
2980 
2981         /* queue a periodic task */
2982         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2983 
2984         bnx2x_dcbx_pmf_update(bp);
2985 
2986         /* enable nig attention */
2987         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2988         if (bp->common.int_block == INT_BLOCK_HC) {
2989                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2990                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2991         } else if (!CHIP_IS_E1x(bp)) {
2992                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2993                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2994         }
2995 
2996         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2997 }
2998 
2999 /* end of Link */
3000 
3001 /* slow path */
3002 
3003 /*
3004  * General service functions
3005  */
3006 
3007 /* send the MCP a request, block until there is a reply */
3008 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3009 {
3010         int mb_idx = BP_FW_MB_IDX(bp);
3011         u32 seq;
3012         u32 rc = 0;
3013         u32 cnt = 1;
3014         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3015 
3016         mutex_lock(&bp->fw_mb_mutex);
3017         seq = ++bp->fw_seq;
3018         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3019         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3020 
3021         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3022                         (command | seq), param);
3023 
3024         do {
3025                 /* let the FW do it's magic ... */
3026                 msleep(delay);
3027 
3028                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3029 
3030                 /* Give the FW up to 5 second (500*10ms) */
3031         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3032 
3033         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3034            cnt*delay, rc, seq);
3035 
3036         /* is this a reply to our command? */
3037         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3038                 rc &= FW_MSG_CODE_MASK;
3039         else {
3040                 /* FW BUG! */
3041                 BNX2X_ERR("FW failed to respond!\n");
3042                 bnx2x_fw_dump(bp);
3043                 rc = 0;
3044         }
3045         mutex_unlock(&bp->fw_mb_mutex);
3046 
3047         return rc;
3048 }
3049 
3050 static void storm_memset_func_cfg(struct bnx2x *bp,
3051                                  struct tstorm_eth_function_common_config *tcfg,
3052                                  u16 abs_fid)
3053 {
3054         size_t size = sizeof(struct tstorm_eth_function_common_config);
3055 
3056         u32 addr = BAR_TSTRORM_INTMEM +
3057                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3058 
3059         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3060 }
3061 
3062 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3063 {
3064         if (CHIP_IS_E1x(bp)) {
3065                 struct tstorm_eth_function_common_config tcfg = {0};
3066 
3067                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3068         }
3069 
3070         /* Enable the function in the FW */
3071         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3072         storm_memset_func_en(bp, p->func_id, 1);
3073 
3074         /* spq */
3075         if (p->spq_active) {
3076                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3077                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3078                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3079         }
3080 }
3081 
3082 /**
3083  * bnx2x_get_common_flags - Return common flags
3084  *
3085  * @bp          device handle
3086  * @fp          queue handle
3087  * @zero_stats  TRUE if statistics zeroing is needed
3088  *
3089  * Return the flags that are common for the Tx-only and not normal connections.
3090  */
3091 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3092                                             struct bnx2x_fastpath *fp,
3093                                             bool zero_stats)
3094 {
3095         unsigned long flags = 0;
3096 
3097         /* PF driver will always initialize the Queue to an ACTIVE state */
3098         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3099 
3100         /* tx only connections collect statistics (on the same index as the
3101          * parent connection). The statistics are zeroed when the parent
3102          * connection is initialized.
3103          */
3104 
3105         __set_bit(BNX2X_Q_FLG_STATS, &flags);
3106         if (zero_stats)
3107                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3108 
3109         if (bp->flags & TX_SWITCHING)
3110                 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3111 
3112         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3113         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3114 
3115 #ifdef BNX2X_STOP_ON_ERROR
3116         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3117 #endif
3118 
3119         return flags;
3120 }
3121 
3122 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3123                                        struct bnx2x_fastpath *fp,
3124                                        bool leading)
3125 {
3126         unsigned long flags = 0;
3127 
3128         /* calculate other queue flags */
3129         if (IS_MF_SD(bp))
3130                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3131 
3132         if (IS_FCOE_FP(fp)) {
3133                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3134                 /* For FCoE - force usage of default priority (for afex) */
3135                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3136         }
3137 
3138         if (fp->mode != TPA_MODE_DISABLED) {
3139                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3140                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3141                 if (fp->mode == TPA_MODE_GRO)
3142                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3143         }
3144 
3145         if (leading) {
3146                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3147                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3148         }
3149 
3150         /* Always set HW VLAN stripping */
3151         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3152 
3153         /* configure silent vlan removal */
3154         if (IS_MF_AFEX(bp))
3155                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3156 
3157         return flags | bnx2x_get_common_flags(bp, fp, true);
3158 }
3159 
3160 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3161         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3162         u8 cos)
3163 {
3164         gen_init->stat_id = bnx2x_stats_id(fp);
3165         gen_init->spcl_id = fp->cl_id;
3166 
3167         /* Always use mini-jumbo MTU for FCoE L2 ring */
3168         if (IS_FCOE_FP(fp))
3169                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3170         else
3171                 gen_init->mtu = bp->dev->mtu;
3172 
3173         gen_init->cos = cos;
3174 
3175         gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3176 }
3177 
3178 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3179         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3180         struct bnx2x_rxq_setup_params *rxq_init)
3181 {
3182         u8 max_sge = 0;
3183         u16 sge_sz = 0;
3184         u16 tpa_agg_size = 0;
3185 
3186         if (fp->mode != TPA_MODE_DISABLED) {
3187                 pause->sge_th_lo = SGE_TH_LO(bp);
3188                 pause->sge_th_hi = SGE_TH_HI(bp);
3189 
3190                 /* validate SGE ring has enough to cross high threshold */
3191                 WARN_ON(bp->dropless_fc &&
3192                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3193                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3194 
3195                 tpa_agg_size = TPA_AGG_SIZE;
3196                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3197                         SGE_PAGE_SHIFT;
3198                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3199                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3200                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3201         }
3202 
3203         /* pause - not for e1 */
3204         if (!CHIP_IS_E1(bp)) {
3205                 pause->bd_th_lo = BD_TH_LO(bp);
3206                 pause->bd_th_hi = BD_TH_HI(bp);
3207 
3208                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3209                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3210                 /*
3211                  * validate that rings have enough entries to cross
3212                  * high thresholds
3213                  */
3214                 WARN_ON(bp->dropless_fc &&
3215                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3216                                 bp->rx_ring_size);
3217                 WARN_ON(bp->dropless_fc &&
3218                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3219                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3220 
3221                 pause->pri_map = 1;
3222         }
3223 
3224         /* rxq setup */
3225         rxq_init->dscr_map = fp->rx_desc_mapping;
3226         rxq_init->sge_map = fp->rx_sge_mapping;
3227         rxq_init->rcq_map = fp->rx_comp_mapping;
3228         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3229 
3230         /* This should be a maximum number of data bytes that may be
3231          * placed on the BD (not including paddings).
3232          */
3233         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3234                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3235 
3236         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3237         rxq_init->tpa_agg_sz = tpa_agg_size;
3238         rxq_init->sge_buf_sz = sge_sz;
3239         rxq_init->max_sges_pkt = max_sge;
3240         rxq_init->rss_engine_id = BP_FUNC(bp);
3241         rxq_init->mcast_engine_id = BP_FUNC(bp);
3242 
3243         /* Maximum number or simultaneous TPA aggregation for this Queue.
3244          *
3245          * For PF Clients it should be the maximum available number.
3246          * VF driver(s) may want to define it to a smaller value.
3247          */
3248         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3249 
3250         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3251         rxq_init->fw_sb_id = fp->fw_sb_id;
3252 
3253         if (IS_FCOE_FP(fp))
3254                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3255         else
3256                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3257         /* configure silent vlan removal
3258          * if multi function mode is afex, then mask default vlan
3259          */
3260         if (IS_MF_AFEX(bp)) {
3261                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3262                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3263         }
3264 }
3265 
3266 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3267         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3268         u8 cos)
3269 {
3270         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3271         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3272         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3273         txq_init->fw_sb_id = fp->fw_sb_id;
3274 
3275         /*
3276          * set the tss leading client id for TX classification ==
3277          * leading RSS client id
3278          */
3279         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3280 
3281         if (IS_FCOE_FP(fp)) {
3282                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3283                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3284         }
3285 }
3286 
3287 static void bnx2x_pf_init(struct bnx2x *bp)
3288 {
3289         struct bnx2x_func_init_params func_init = {0};
3290         struct event_ring_data eq_data = { {0} };
3291 
3292         if (!CHIP_IS_E1x(bp)) {
3293                 /* reset IGU PF statistics: MSIX + ATTN */
3294                 /* PF */
3295                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3296                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3297                            (CHIP_MODE_IS_4_PORT(bp) ?
3298                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3299                 /* ATTN */
3300                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3301                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3302                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3303                            (CHIP_MODE_IS_4_PORT(bp) ?
3304                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3305         }
3306 
3307         func_init.spq_active = true;
3308         func_init.pf_id = BP_FUNC(bp);
3309         func_init.func_id = BP_FUNC(bp);
3310         func_init.spq_map = bp->spq_mapping;
3311         func_init.spq_prod = bp->spq_prod_idx;
3312 
3313         bnx2x_func_init(bp, &func_init);
3314 
3315         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3316 
3317         /*
3318          * Congestion management values depend on the link rate
3319          * There is no active link so initial link rate is set to 10 Gbps.
3320          * When the link comes up The congestion management values are
3321          * re-calculated according to the actual link rate.
3322          */
3323         bp->link_vars.line_speed = SPEED_10000;
3324         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3325 
3326         /* Only the PMF sets the HW */
3327         if (bp->port.pmf)
3328                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3329 
3330         /* init Event Queue - PCI bus guarantees correct endianity*/
3331         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3332         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3333         eq_data.producer = bp->eq_prod;
3334         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3335         eq_data.sb_id = DEF_SB_ID;
3336         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3337 }
3338 
3339 static void bnx2x_e1h_disable(struct bnx2x *bp)
3340 {
3341         int port = BP_PORT(bp);
3342 
3343         bnx2x_tx_disable(bp);
3344 
3345         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3346 }
3347 
3348 static void bnx2x_e1h_enable(struct bnx2x *bp)
3349 {
3350         int port = BP_PORT(bp);
3351 
3352         if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3353                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3354 
3355         /* Tx queue should be only re-enabled */
3356         netif_tx_wake_all_queues(bp->dev);
3357 
3358         /*
3359          * Should not call netif_carrier_on since it will be called if the link
3360          * is up when checking for link state
3361          */
3362 }
3363 
3364 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3365 
3366 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3367 {
3368         struct eth_stats_info *ether_stat =
3369                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3370         struct bnx2x_vlan_mac_obj *mac_obj =
3371                 &bp->sp_objs->mac_obj;
3372         int i;
3373 
3374         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3375                 ETH_STAT_INFO_VERSION_LEN);
3376 
3377         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3378          * mac_local field in ether_stat struct. The base address is offset by 2
3379          * bytes to account for the field being 8 bytes but a mac address is
3380          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3381          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3382          * allocated by the ether_stat struct, so the macs will land in their
3383          * proper positions.
3384          */
3385         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3386                 memset(ether_stat->mac_local + i, 0,
3387                        sizeof(ether_stat->mac_local[0]));
3388         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3389                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3390                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3391                                 ETH_ALEN);
3392         ether_stat->mtu_size = bp->dev->mtu;
3393         if (bp->dev->features & NETIF_F_RXCSUM)
3394                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3395         if (bp->dev->features & NETIF_F_TSO)
3396                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3397         ether_stat->feature_flags |= bp->common.boot_mode;
3398 
3399         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3400 
3401         ether_stat->txq_size = bp->tx_ring_size;
3402         ether_stat->rxq_size = bp->rx_ring_size;
3403 
3404 #ifdef CONFIG_BNX2X_SRIOV
3405         ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3406 #endif
3407 }
3408 
3409 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3410 {
3411         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3412         struct fcoe_stats_info *fcoe_stat =
3413                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3414 
3415         if (!CNIC_LOADED(bp))
3416                 return;
3417 
3418         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3419 
3420         fcoe_stat->qos_priority =
3421                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3422 
3423         /* insert FCoE stats from ramrod response */
3424         if (!NO_FCOE(bp)) {
3425                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3426                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3427                         tstorm_queue_statistics;
3428 
3429                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3430                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3431                         xstorm_queue_statistics;
3432 
3433                 struct fcoe_statistics_params *fw_fcoe_stat =
3434                         &bp->fw_stats_data->fcoe;
3435 
3436                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3437                           fcoe_stat->rx_bytes_lo,
3438                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3439 
3440                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3441                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3442                           fcoe_stat->rx_bytes_lo,
3443                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3444 
3445                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3446                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3447                           fcoe_stat->rx_bytes_lo,
3448                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3449 
3450                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3451                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3452                           fcoe_stat->rx_bytes_lo,
3453                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3454 
3455                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3456                           fcoe_stat->rx_frames_lo,
3457                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3458 
3459                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3460                           fcoe_stat->rx_frames_lo,
3461                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3462 
3463                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3464                           fcoe_stat->rx_frames_lo,
3465                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3466 
3467                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3468                           fcoe_stat->rx_frames_lo,
3469                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3470 
3471                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3472                           fcoe_stat->tx_bytes_lo,
3473                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3474 
3475                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3476                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3477                           fcoe_stat->tx_bytes_lo,
3478                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3479 
3480                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3481                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3482                           fcoe_stat->tx_bytes_lo,
3483                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3484 
3485                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3486                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3487                           fcoe_stat->tx_bytes_lo,
3488                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3489 
3490                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3491                           fcoe_stat->tx_frames_lo,
3492                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3493 
3494                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3495                           fcoe_stat->tx_frames_lo,
3496                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3497 
3498                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3499                           fcoe_stat->tx_frames_lo,
3500                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3501 
3502                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3503                           fcoe_stat->tx_frames_lo,
3504                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3505         }
3506 
3507         /* ask L5 driver to add data to the struct */
3508         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3509 }
3510 
3511 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3512 {
3513         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3514         struct iscsi_stats_info *iscsi_stat =
3515                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3516 
3517         if (!CNIC_LOADED(bp))
3518                 return;
3519 
3520         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3521                ETH_ALEN);
3522 
3523         iscsi_stat->qos_priority =
3524                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3525 
3526         /* ask L5 driver to add data to the struct */
3527         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3528 }
3529 
3530 /* called due to MCP event (on pmf):
3531  *      reread new bandwidth configuration
3532  *      configure FW
3533  *      notify others function about the change
3534  */
3535 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3536 {
3537         if (bp->link_vars.link_up) {
3538                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3539                 bnx2x_link_sync_notify(bp);
3540         }
3541         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3542 }
3543 
3544 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3545 {
3546         bnx2x_config_mf_bw(bp);
3547         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3548 }
3549 
3550 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3551 {
3552         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3553         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3554 }
3555 
3556 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3557 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3558 
3559 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3560 {
3561         enum drv_info_opcode op_code;
3562         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3563         bool release = false;
3564         int wait;
3565 
3566         /* if drv_info version supported by MFW doesn't match - send NACK */
3567         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3568                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3569                 return;
3570         }
3571 
3572         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3573                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3574 
3575         /* Must prevent other flows from accessing drv_info_to_mcp */
3576         mutex_lock(&bp->drv_info_mutex);
3577 
3578         memset(&bp->slowpath->drv_info_to_mcp, 0,
3579                sizeof(union drv_info_to_mcp));
3580 
3581         switch (op_code) {
3582         case ETH_STATS_OPCODE:
3583                 bnx2x_drv_info_ether_stat(bp);
3584                 break;
3585         case FCOE_STATS_OPCODE:
3586                 bnx2x_drv_info_fcoe_stat(bp);
3587                 break;
3588         case ISCSI_STATS_OPCODE:
3589                 bnx2x_drv_info_iscsi_stat(bp);
3590                 break;
3591         default:
3592                 /* if op code isn't supported - send NACK */
3593                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3594                 goto out;
3595         }
3596 
3597         /* if we got drv_info attn from MFW then these fields are defined in
3598          * shmem2 for sure
3599          */
3600         SHMEM2_WR(bp, drv_info_host_addr_lo,
3601                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3602         SHMEM2_WR(bp, drv_info_host_addr_hi,
3603                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3604 
3605         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3606 
3607         /* Since possible management wants both this and get_driver_version
3608          * need to wait until management notifies us it finished utilizing
3609          * the buffer.
3610          */
3611         if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3612                 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3613         } else if (!bp->drv_info_mng_owner) {
3614                 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3615 
3616                 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3617                         u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3618 
3619                         /* Management is done; need to clear indication */
3620                         if (indication & bit) {
3621                                 SHMEM2_WR(bp, mfw_drv_indication,
3622                                           indication & ~bit);
3623                                 release = true;
3624                                 break;
3625                         }
3626 
3627                         msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3628                 }
3629         }
3630         if (!release) {
3631                 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3632                 bp->drv_info_mng_owner = true;
3633         }
3634 
3635 out:
3636         mutex_unlock(&bp->drv_info_mutex);
3637 }
3638 
3639 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3640 {
3641         u8 vals[4];
3642         int i = 0;
3643 
3644         if (bnx2x_format) {
3645                 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3646                            &vals[0], &vals[1], &vals[2], &vals[3]);
3647                 if (i > 0)
3648                         vals[0] -= '';
3649         } else {
3650                 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3651                            &vals[0], &vals[1], &vals[2], &vals[3]);
3652         }
3653 
3654         while (i < 4)
3655                 vals[i++] = 0;
3656 
3657         return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3658 }
3659 
3660 void bnx2x_update_mng_version(struct bnx2x *bp)
3661 {
3662         u32 iscsiver = DRV_VER_NOT_LOADED;
3663         u32 fcoever = DRV_VER_NOT_LOADED;
3664         u32 ethver = DRV_VER_NOT_LOADED;
3665         int idx = BP_FW_MB_IDX(bp);
3666         u8 *version;
3667 
3668         if (!SHMEM2_HAS(bp, func_os_drv_ver))
3669                 return;
3670 
3671         mutex_lock(&bp->drv_info_mutex);
3672         /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3673         if (bp->drv_info_mng_owner)
3674                 goto out;
3675 
3676         if (bp->state != BNX2X_STATE_OPEN)
3677                 goto out;
3678 
3679         /* Parse ethernet driver version */
3680         ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3681         if (!CNIC_LOADED(bp))
3682                 goto out;
3683 
3684         /* Try getting storage driver version via cnic */
3685         memset(&bp->slowpath->drv_info_to_mcp, 0,
3686                sizeof(union drv_info_to_mcp));
3687         bnx2x_drv_info_iscsi_stat(bp);
3688         version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3689         iscsiver = bnx2x_update_mng_version_utility(version, false);
3690 
3691         memset(&bp->slowpath->drv_info_to_mcp, 0,
3692                sizeof(union drv_info_to_mcp));
3693         bnx2x_drv_info_fcoe_stat(bp);
3694         version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3695         fcoever = bnx2x_update_mng_version_utility(version, false);
3696 
3697 out:
3698         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3699         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3700         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3701 
3702         mutex_unlock(&bp->drv_info_mutex);
3703 
3704         DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3705            ethver, iscsiver, fcoever);
3706 }
3707 
3708 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3709 {
3710         u32 drv_ver;
3711         u32 valid_dump;
3712 
3713         if (!SHMEM2_HAS(bp, drv_info))
3714                 return;
3715 
3716         /* Update Driver load time, possibly broken in y2038 */
3717         SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3718 
3719         drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720         SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3721 
3722         SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3723 
3724         /* Check & notify On-Chip dump. */
3725         valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3726 
3727         if (valid_dump & FIRST_DUMP_VALID)
3728                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3729 
3730         if (valid_dump & SECOND_DUMP_VALID)
3731                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3732 }
3733 
3734 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3735 {
3736         u32 cmd_ok, cmd_fail;
3737 
3738         /* sanity */
3739         if (event & DRV_STATUS_DCC_EVENT_MASK &&
3740             event & DRV_STATUS_OEM_EVENT_MASK) {
3741                 BNX2X_ERR("Received simultaneous events %08x\n", event);
3742                 return;
3743         }
3744 
3745         if (event & DRV_STATUS_DCC_EVENT_MASK) {
3746                 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3747                 cmd_ok = DRV_MSG_CODE_DCC_OK;
3748         } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3749                 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3750                 cmd_ok = DRV_MSG_CODE_OEM_OK;
3751         }
3752 
3753         DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3754 
3755         if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3756                      DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3757                 /* This is the only place besides the function initialization
3758                  * where the bp->flags can change so it is done without any
3759                  * locks
3760                  */
3761                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3762                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3763                         bp->flags |= MF_FUNC_DIS;
3764 
3765                         bnx2x_e1h_disable(bp);
3766                 } else {
3767                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3768                         bp->flags &= ~MF_FUNC_DIS;
3769 
3770                         bnx2x_e1h_enable(bp);
3771                 }
3772                 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3773                            DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3774         }
3775 
3776         if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3777                      DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3778                 bnx2x_config_mf_bw(bp);
3779                 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3780                            DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3781         }
3782 
3783         /* Report results to MCP */
3784         if (event)
3785                 bnx2x_fw_command(bp, cmd_fail, 0);
3786         else
3787                 bnx2x_fw_command(bp, cmd_ok, 0);
3788 }
3789 
3790 /* must be called under the spq lock */
3791 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3792 {
3793         struct eth_spe *next_spe = bp->spq_prod_bd;
3794 
3795         if (bp->spq_prod_bd == bp->spq_last_bd) {
3796                 bp->spq_prod_bd = bp->spq;
3797                 bp->spq_prod_idx = 0;
3798                 DP(BNX2X_MSG_SP, "end of spq\n");
3799         } else {
3800                 bp->spq_prod_bd++;
3801                 bp->spq_prod_idx++;
3802         }
3803         return next_spe;
3804 }
3805 
3806 /* must be called under the spq lock */
3807 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3808 {
3809         int func = BP_FUNC(bp);
3810 
3811         /*
3812          * Make sure that BD data is updated before writing the producer:
3813          * BD data is written to the memory, the producer is read from the
3814          * memory, thus we need a full memory barrier to ensure the ordering.
3815          */
3816         mb();
3817 
3818         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3819                  bp->spq_prod_idx);
3820         mmiowb();
3821 }
3822 
3823 /**
3824  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3825  *
3826  * @cmd:        command to check
3827  * @cmd_type:   command type
3828  */
3829 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3830 {
3831         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3832             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3833             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3834             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3835             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3836             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3837             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3838                 return true;
3839         else
3840                 return false;
3841 }
3842 
3843 /**
3844  * bnx2x_sp_post - place a single command on an SP ring
3845  *
3846  * @bp:         driver handle
3847  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3848  * @cid:        SW CID the command is related to
3849  * @data_hi:    command private data address (high 32 bits)
3850  * @data_lo:    command private data address (low 32 bits)
3851  * @cmd_type:   command type (e.g. NONE, ETH)
3852  *
3853  * SP data is handled as if it's always an address pair, thus data fields are
3854  * not swapped to little endian in upper functions. Instead this function swaps
3855  * data as if it's two u32 fields.
3856  */
3857 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3858                   u32 data_hi, u32 data_lo, int cmd_type)
3859 {
3860         struct eth_spe *spe;
3861         u16 type;
3862         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3863 
3864 #ifdef BNX2X_STOP_ON_ERROR
3865         if (unlikely(bp->panic)) {
3866                 BNX2X_ERR("Can't post SP when there is panic\n");
3867                 return -EIO;
3868         }
3869 #endif
3870 
3871         spin_lock_bh(&bp->spq_lock);
3872 
3873         if (common) {
3874                 if (!atomic_read(&bp->eq_spq_left)) {
3875                         BNX2X_ERR("BUG! EQ ring full!\n");
3876                         spin_unlock_bh(&bp->spq_lock);
3877                         bnx2x_panic();
3878                         return -EBUSY;
3879                 }
3880         } else if (!atomic_read(&bp->cq_spq_left)) {
3881                         BNX2X_ERR("BUG! SPQ ring full!\n");
3882                         spin_unlock_bh(&bp->spq_lock);
3883                         bnx2x_panic();
3884                         return -EBUSY;
3885         }
3886 
3887         spe = bnx2x_sp_get_next(bp);
3888 
3889         /* CID needs port number to be encoded int it */
3890         spe->hdr.conn_and_cmd_data =
3891                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3892                                     HW_CID(bp, cid));
3893 
3894         /* In some cases, type may already contain the func-id
3895          * mainly in SRIOV related use cases, so we add it here only
3896          * if it's not already set.
3897          */
3898         if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3899                 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3900                         SPE_HDR_CONN_TYPE;
3901                 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3902                          SPE_HDR_FUNCTION_ID);
3903         } else {
3904                 type = cmd_type;
3905         }
3906 
3907         spe->hdr.type = cpu_to_le16(type);
3908 
3909         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3910         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3911 
3912         /*
3913          * It's ok if the actual decrement is issued towards the memory
3914          * somewhere between the spin_lock and spin_unlock. Thus no
3915          * more explicit memory barrier is needed.
3916          */
3917         if (common)
3918                 atomic_dec(&bp->eq_spq_left);
3919         else
3920                 atomic_dec(&bp->cq_spq_left);
3921 
3922         DP(BNX2X_MSG_SP,
3923            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3924            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3925            (u32)(U64_LO(bp->spq_mapping) +
3926            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3927            HW_CID(bp, cid), data_hi, data_lo, type,
3928            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3929 
3930         bnx2x_sp_prod_update(bp);
3931         spin_unlock_bh(&bp->spq_lock);
3932         return 0;
3933 }
3934 
3935 /* acquire split MCP access lock register */
3936 static int bnx2x_acquire_alr(struct bnx2x *bp)
3937 {
3938         u32 j, val;
3939         int rc = 0;
3940 
3941         might_sleep();
3942         for (j = 0; j < 1000; j++) {
3943                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3944                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3945                 if (val & MCPR_ACCESS_LOCK_LOCK)
3946                         break;
3947 
3948                 usleep_range(5000, 10000);
3949         }
3950         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3951                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3952                 rc = -EBUSY;
3953         }
3954 
3955         return rc;
3956 }
3957 
3958 /* release split MCP access lock register */
3959 static void bnx2x_release_alr(struct bnx2x *bp)
3960 {
3961         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3962 }
3963 
3964 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3965 #define BNX2X_DEF_SB_IDX        0x0002
3966 
3967 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3968 {
3969         struct host_sp_status_block *def_sb = bp->def_status_blk;
3970         u16 rc = 0;
3971 
3972         barrier(); /* status block is written to by the chip */
3973         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3974                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3975                 rc |= BNX2X_DEF_SB_ATT_IDX;
3976         }
3977 
3978         if (bp->def_idx != def_sb->sp_sb.running_index) {
3979                 bp->def_idx = def_sb->sp_sb.running_index;
3980                 rc |= BNX2X_DEF_SB_IDX;
3981         }
3982 
3983         /* Do not reorder: indices reading should complete before handling */
3984         barrier();
3985         return rc;
3986 }
3987 
3988 /*
3989  * slow path service functions
3990  */
3991 
3992 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3993 {
3994         int port = BP_PORT(bp);
3995         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3996                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3997         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3998                                        NIG_REG_MASK_INTERRUPT_PORT0;
3999         u32 aeu_mask;
4000         u32 nig_mask = 0;
4001         u32 reg_addr;
4002 
4003         if (bp->attn_state & asserted)
4004                 BNX2X_ERR("IGU ERROR\n");
4005 
4006         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4007         aeu_mask = REG_RD(bp, aeu_addr);
4008 
4009         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4010            aeu_mask, asserted);
4011         aeu_mask &= ~(asserted & 0x3ff);
4012         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4013 
4014         REG_WR(bp, aeu_addr, aeu_mask);
4015         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4016 
4017         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4018         bp->attn_state |= asserted;
4019         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4020 
4021         if (asserted & ATTN_HARD_WIRED_MASK) {
4022                 if (asserted & ATTN_NIG_FOR_FUNC) {
4023 
4024                         bnx2x_acquire_phy_lock(bp);
4025 
4026                         /* save nig interrupt mask */
4027                         nig_mask = REG_RD(bp, nig_int_mask_addr);
4028 
4029                         /* If nig_mask is not set, no need to call the update
4030                          * function.
4031                          */
4032                         if (nig_mask) {
4033                                 REG_WR(bp, nig_int_mask_addr, 0);
4034 
4035                                 bnx2x_link_attn(bp);
4036                         }
4037 
4038                         /* handle unicore attn? */
4039                 }
4040                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4041                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4042 
4043                 if (asserted & GPIO_2_FUNC)
4044                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4045 
4046                 if (asserted & GPIO_3_FUNC)
4047                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4048 
4049                 if (asserted & GPIO_4_FUNC)
4050                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4051 
4052                 if (port == 0) {
4053                         if (asserted & ATTN_GENERAL_ATTN_1) {
4054                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4055                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4056                         }
4057                         if (asserted & ATTN_GENERAL_ATTN_2) {
4058                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4059                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4060                         }
4061                         if (asserted & ATTN_GENERAL_ATTN_3) {
4062                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4063                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4064                         }
4065                 } else {
4066                         if (asserted & ATTN_GENERAL_ATTN_4) {
4067                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4068                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4069                         }
4070                         if (asserted & ATTN_GENERAL_ATTN_5) {
4071                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4072                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4073                         }
4074                         if (asserted & ATTN_GENERAL_ATTN_6) {
4075                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4076                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4077                         }
4078                 }
4079 
4080         } /* if hardwired */
4081 
4082         if (bp->common.int_block == INT_BLOCK_HC)
4083                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4084                             COMMAND_REG_ATTN_BITS_SET);
4085         else
4086                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4087 
4088         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4089            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4090         REG_WR(bp, reg_addr, asserted);
4091 
4092         /* now set back the mask */
4093         if (asserted & ATTN_NIG_FOR_FUNC) {
4094                 /* Verify that IGU ack through BAR was written before restoring
4095                  * NIG mask. This loop should exit after 2-3 iterations max.
4096                  */
4097                 if (bp->common.int_block != INT_BLOCK_HC) {
4098                         u32 cnt = 0, igu_acked;
4099                         do {
4100                                 igu_acked = REG_RD(bp,
4101                                                    IGU_REG_ATTENTION_ACK_BITS);
4102                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4103                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
4104                         if (!igu_acked)
4105                                 DP(NETIF_MSG_HW,
4106                                    "Failed to verify IGU ack on time\n");
4107                         barrier();
4108                 }
4109                 REG_WR(bp, nig_int_mask_addr, nig_mask);
4110                 bnx2x_release_phy_lock(bp);
4111         }
4112 }
4113 
4114 static void bnx2x_fan_failure(struct bnx2x *bp)
4115 {
4116         int port = BP_PORT(bp);
4117         u32 ext_phy_config;
4118         /* mark the failure */
4119         ext_phy_config =
4120                 SHMEM_RD(bp,
4121                          dev_info.port_hw_config[port].external_phy_config);
4122 
4123         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4124         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4125         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4126                  ext_phy_config);
4127 
4128         /* log the failure */
4129         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4130                             "Please contact OEM Support for assistance\n");
4131 
4132         /* Schedule device reset (unload)
4133          * This is due to some boards consuming sufficient power when driver is
4134          * up to overheat if fan fails.
4135          */
4136         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4137 }
4138 
4139 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4140 {
4141         int port = BP_PORT(bp);
4142         int reg_offset;
4143         u32 val;
4144 
4145         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4146                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4147 
4148         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4149 
4150                 val = REG_RD(bp, reg_offset);
4151                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4152                 REG_WR(bp, reg_offset, val);
4153 
4154                 BNX2X_ERR("SPIO5 hw attention\n");
4155 
4156                 /* Fan failure attention */
4157                 bnx2x_hw_reset_phy(&bp->link_params);
4158                 bnx2x_fan_failure(bp);
4159         }
4160 
4161         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4162                 bnx2x_acquire_phy_lock(bp);
4163                 bnx2x_handle_module_detect_int(&bp->link_params);
4164                 bnx2x_release_phy_lock(bp);
4165         }
4166 
4167         if (attn & HW_INTERRUT_ASSERT_SET_0) {
4168 
4169                 val = REG_RD(bp, reg_offset);
4170                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4171                 REG_WR(bp, reg_offset, val);
4172 
4173                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4174                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4175                 bnx2x_panic();
4176         }
4177 }
4178 
4179 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4180 {
4181         u32 val;
4182 
4183         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4184 
4185                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4186                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4187                 /* DORQ discard attention */
4188                 if (val & 0x2)
4189                         BNX2X_ERR("FATAL error from DORQ\n");
4190         }
4191 
4192         if (attn & HW_INTERRUT_ASSERT_SET_1) {
4193 
4194                 int port = BP_PORT(bp);
4195                 int reg_offset;
4196 
4197                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4198                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4199 
4200                 val = REG_RD(bp, reg_offset);
4201                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4202                 REG_WR(bp, reg_offset, val);
4203 
4204                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4205                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4206                 bnx2x_panic();
4207         }
4208 }
4209 
4210 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4211 {
4212         u32 val;
4213 
4214         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4215 
4216                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4217                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4218                 /* CFC error attention */
4219                 if (val & 0x2)
4220                         BNX2X_ERR("FATAL error from CFC\n");
4221         }
4222 
4223         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4224                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4225                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4226                 /* RQ_USDMDP_FIFO_OVERFLOW */
4227                 if (val & 0x18000)
4228                         BNX2X_ERR("FATAL error from PXP\n");
4229 
4230                 if (!CHIP_IS_E1x(bp)) {
4231                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4232                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4233                 }
4234         }
4235 
4236         if (attn & HW_INTERRUT_ASSERT_SET_2) {
4237 
4238                 int port = BP_PORT(bp);
4239                 int reg_offset;
4240 
4241                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4242                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4243 
4244                 val = REG_RD(bp, reg_offset);
4245                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4246                 REG_WR(bp, reg_offset, val);
4247 
4248                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4249                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4250                 bnx2x_panic();
4251         }
4252 }
4253 
4254 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4255 {
4256         u32 val;
4257 
4258         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4259 
4260                 if (attn & BNX2X_PMF_LINK_ASSERT) {
4261                         int func = BP_FUNC(bp);
4262 
4263                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4264                         bnx2x_read_mf_cfg(bp);
4265                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4266                                         func_mf_config[BP_ABS_FUNC(bp)].config);
4267                         val = SHMEM_RD(bp,
4268                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
4269 
4270                         if (val & (DRV_STATUS_DCC_EVENT_MASK |
4271                                    DRV_STATUS_OEM_EVENT_MASK))
4272                                 bnx2x_oem_event(bp,
4273                                         (val & (DRV_STATUS_DCC_EVENT_MASK |
4274                                                 DRV_STATUS_OEM_EVENT_MASK)));
4275 
4276                         if (val & DRV_STATUS_SET_MF_BW)
4277                                 bnx2x_set_mf_bw(bp);
4278 
4279                         if (val & DRV_STATUS_DRV_INFO_REQ)
4280                                 bnx2x_handle_drv_info_req(bp);
4281 
4282                         if (val & DRV_STATUS_VF_DISABLED)
4283                                 bnx2x_schedule_iov_task(bp,
4284                                                         BNX2X_IOV_HANDLE_FLR);
4285 
4286                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4287                                 bnx2x_pmf_update(bp);
4288 
4289                         if (bp->port.pmf &&
4290                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4291                                 bp->dcbx_enabled > 0)
4292                                 /* start dcbx state machine */
4293                                 bnx2x_dcbx_set_params(bp,
4294                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4295                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4296                                 bnx2x_handle_afex_cmd(bp,
4297                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4298                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4299                                 bnx2x_handle_eee_event(bp);
4300 
4301                         if (val & DRV_STATUS_OEM_UPDATE_SVID)
4302                                 bnx2x_handle_update_svid_cmd(bp);
4303 
4304                         if (bp->link_vars.periodic_flags &
4305                             PERIODIC_FLAGS_LINK_EVENT) {
4306                                 /*  sync with link */
4307                                 bnx2x_acquire_phy_lock(bp);
4308                                 bp->link_vars.periodic_flags &=
4309                                         ~PERIODIC_FLAGS_LINK_EVENT;
4310                                 bnx2x_release_phy_lock(bp);
4311                                 if (IS_MF(bp))
4312                                         bnx2x_link_sync_notify(bp);
4313                                 bnx2x_link_report(bp);
4314                         }
4315                         /* Always call it here: bnx2x_link_report() will
4316                          * prevent the link indication duplication.
4317                          */
4318                         bnx2x__link_status_update(bp);
4319                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4320 
4321                         BNX2X_ERR("MC assert!\n");
4322                         bnx2x_mc_assert(bp);
4323                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4324                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4325                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4326                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4327                         bnx2x_panic();
4328 
4329                 } else if (attn & BNX2X_MCP_ASSERT) {
4330 
4331                         BNX2X_ERR("MCP assert!\n");
4332                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4333                         bnx2x_fw_dump(bp);
4334 
4335                 } else
4336                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4337         }
4338 
4339         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4340                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4341                 if (attn & BNX2X_GRC_TIMEOUT) {
4342                         val = CHIP_IS_E1(bp) ? 0 :
4343                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4344                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4345                 }
4346                 if (attn & BNX2X_GRC_RSV) {
4347                         val = CHIP_IS_E1(bp) ? 0 :
4348                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4349                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4350                 }
4351                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4352         }
4353 }
4354 
4355 /*
4356  * Bits map:
4357  * 0-7   - Engine0 load counter.
4358  * 8-15  - Engine1 load counter.
4359  * 16    - Engine0 RESET_IN_PROGRESS bit.
4360  * 17    - Engine1 RESET_IN_PROGRESS bit.
4361  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4362  *         on the engine
4363  * 19    - Engine1 ONE_IS_LOADED.
4364  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4365  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4366  *         just the one belonging to its engine).
4367  *
4368  */
4369 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4370 
4371 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4372 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4373 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4374 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4375 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4376 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4377 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4378 
4379 /*
4380  * Set the GLOBAL_RESET bit.
4381  *
4382  * Should be run under rtnl lock
4383  */
4384 void bnx2x_set_reset_global(struct bnx2x *bp)
4385 {
4386         u32 val;
4387         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4388         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4389         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4390         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4391 }
4392 
4393 /*
4394  * Clear the GLOBAL_RESET bit.
4395  *
4396  * Should be run under rtnl lock
4397  */
4398 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4399 {
4400         u32 val;
4401         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4402         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4403         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4404         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405 }
4406 
4407 /*
4408  * Checks the GLOBAL_RESET bit.
4409  *
4410  * should be run under rtnl lock
4411  */
4412 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4413 {
4414         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4415 
4416         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4417         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4418 }
4419 
4420 /*
4421  * Clear RESET_IN_PROGRESS bit for the current engine.
4422  *
4423  * Should be run under rtnl lock
4424  */
4425 static void bnx2x_set_reset_done(struct bnx2x *bp)
4426 {
4427         u32 val;
4428         u32 bit = BP_PATH(bp) ?
4429                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4430         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4431         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4432 
4433         /* Clear the bit */
4434         val &= ~bit;
4435         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4436 
4437         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4438 }
4439 
4440 /*
4441  * Set RESET_IN_PROGRESS for the current engine.
4442  *
4443  * should be run under rtnl lock
4444  */
4445 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4446 {
4447         u32 val;
4448         u32 bit = BP_PATH(bp) ?
4449                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4450         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4451         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4452 
4453         /* Set the bit */
4454         val |= bit;
4455         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4456         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4457 }
4458 
4459 /*
4460  * Checks the RESET_IN_PROGRESS bit for the given engine.
4461  * should be run under rtnl lock
4462  */
4463 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4464 {
4465         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4466         u32 bit = engine ?
4467                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4468 
4469         /* return false if bit is set */
4470         return (val & bit) ? false : true;
4471 }
4472 
4473 /*
4474  * set pf load for the current pf.
4475  *
4476  * should be run under rtnl lock
4477  */
4478 void bnx2x_set_pf_load(struct bnx2x *bp)
4479 {
4480         u32 val1, val;
4481         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4482                              BNX2X_PATH0_LOAD_CNT_MASK;
4483         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4484                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4485 
4486         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4487         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4488 
4489         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4490 
4491         /* get the current counter value */
4492         val1 = (val & mask) >> shift;
4493 
4494         /* set bit of that PF */
4495         val1 |= (1 << bp->pf_num);
4496 
4497         /* clear the old value */
4498         val &= ~mask;
4499 
4500         /* set the new one */
4501         val |= ((val1 << shift) & mask);
4502 
4503         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4504         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4505 }
4506 
4507 /**
4508  * bnx2x_clear_pf_load - clear pf load mark
4509  *
4510  * @bp:         driver handle
4511  *
4512  * Should be run under rtnl lock.
4513  * Decrements the load counter for the current engine. Returns
4514  * whether other functions are still loaded
4515  */
4516 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4517 {
4518         u32 val1, val;
4519         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4520                              BNX2X_PATH0_LOAD_CNT_MASK;
4521         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4522                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4523 
4524         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4525         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4526         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4527 
4528         /* get the current counter value */
4529         val1 = (val & mask) >> shift;
4530 
4531         /* clear bit of that PF */
4532         val1 &= ~(1 << bp->pf_num);
4533 
4534         /* clear the old value */
4535         val &= ~mask;
4536 
4537         /* set the new one */
4538         val |= ((val1 << shift) & mask);
4539 
4540         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4541         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4542         return val1 != 0;
4543 }
4544 
4545 /*
4546  * Read the load status for the current engine.
4547  *
4548  * should be run under rtnl lock
4549  */
4550 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4551 {
4552         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4553                              BNX2X_PATH0_LOAD_CNT_MASK);
4554         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4555                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4556         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4557 
4558         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4559 
4560         val = (val & mask) >> shift;
4561 
4562         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4563            engine, val);
4564 
4565         return val != 0;
4566 }
4567 
4568 static void _print_parity(struct bnx2x *bp, u32 reg)
4569 {
4570         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4571 }
4572 
4573 static void _print_next_block(int idx, const char *blk)
4574 {
4575         pr_cont("%s%s", idx ? ", " : "", blk);
4576 }
4577 
4578 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4579                                             int *par_num, bool print)
4580 {
4581         u32 cur_bit;
4582         bool res;
4583         int i;
4584 
4585         res = false;
4586 
4587         for (i = 0; sig; i++) {
4588                 cur_bit = (0x1UL << i);
4589                 if (sig & cur_bit) {
4590                         res |= true; /* Each bit is real error! */
4591 
4592                         if (print) {
4593                                 switch (cur_bit) {
4594                                 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4595                                         _print_next_block((*par_num)++, "BRB");
4596                                         _print_parity(bp,
4597                                                       BRB1_REG_BRB1_PRTY_STS);
4598                                         break;
4599                                 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4600                                         _print_next_block((*par_num)++,
4601                                                           "PARSER");
4602                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4603                                         break;
4604                                 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4605                                         _print_next_block((*par_num)++, "TSDM");
4606                                         _print_parity(bp,
4607                                                       TSDM_REG_TSDM_PRTY_STS);
4608                                         break;
4609                                 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4610                                         _print_next_block((*par_num)++,
4611                                                           "SEARCHER");
4612                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4613                                         break;
4614                                 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4615                                         _print_next_block((*par_num)++, "TCM");
4616                                         _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4617                                         break;
4618                                 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4619                                         _print_next_block((*par_num)++,
4620                                                           "TSEMI");
4621                                         _print_parity(bp,
4622                                                       TSEM_REG_TSEM_PRTY_STS_0);
4623                                         _print_parity(bp,
4624                                                       TSEM_REG_TSEM_PRTY_STS_1);
4625                                         break;
4626                                 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4627                                         _print_next_block((*par_num)++, "XPB");
4628                                         _print_parity(bp, GRCBASE_XPB +
4629                                                           PB_REG_PB_PRTY_STS);
4630                                         break;
4631                                 }
4632                         }
4633 
4634                         /* Clear the bit */
4635                         sig &= ~cur_bit;
4636                 }
4637         }
4638 
4639         return res;
4640 }
4641 
4642 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4643                                             int *par_num, bool *global,
4644                                             bool print)
4645 {
4646         u32 cur_bit;
4647         bool res;
4648         int i;
4649 
4650         res = false;
4651 
4652         for (i = 0; sig; i++) {
4653                 cur_bit = (0x1UL << i);
4654                 if (sig & cur_bit) {
4655                         res |= true; /* Each bit is real error! */
4656                         switch (cur_bit) {
4657                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4658                                 if (print) {
4659                                         _print_next_block((*par_num)++, "PBF");
4660                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4661                                 }
4662                                 break;
4663                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4664                                 if (print) {
4665                                         _print_next_block((*par_num)++, "QM");
4666                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4667                                 }
4668                                 break;
4669                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4670                                 if (print) {
4671                                         _print_next_block((*par_num)++, "TM");
4672                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4673                                 }
4674                                 break;
4675                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4676                                 if (print) {
4677                                         _print_next_block((*par_num)++, "XSDM");
4678                                         _print_parity(bp,
4679                                                       XSDM_REG_XSDM_PRTY_STS);
4680                                 }
4681                                 break;
4682                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4683                                 if (print) {
4684                                         _print_next_block((*par_num)++, "XCM");
4685                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4686                                 }
4687                                 break;
4688                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4689                                 if (print) {
4690                                         _print_next_block((*par_num)++,
4691                                                           "XSEMI");
4692                                         _print_parity(bp,
4693                                                       XSEM_REG_XSEM_PRTY_STS_0);
4694                                         _print_parity(bp,
4695                                                       XSEM_REG_XSEM_PRTY_STS_1);
4696                                 }
4697                                 break;
4698                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4699                                 if (print) {
4700                                         _print_next_block((*par_num)++,
4701                                                           "DOORBELLQ");
4702                                         _print_parity(bp,
4703                                                       DORQ_REG_DORQ_PRTY_STS);
4704                                 }
4705                                 break;
4706                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4707                                 if (print) {
4708                                         _print_next_block((*par_num)++, "NIG");
4709                                         if (CHIP_IS_E1x(bp)) {
4710                                                 _print_parity(bp,
4711                                                         NIG_REG_NIG_PRTY_STS);
4712                                         } else {
4713                                                 _print_parity(bp,
4714                                                         NIG_REG_NIG_PRTY_STS_0);
4715                                                 _print_parity(bp,
4716                                                         NIG_REG_NIG_PRTY_STS_1);
4717                                         }
4718                                 }
4719                                 break;
4720                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4721                                 if (print)
4722                                         _print_next_block((*par_num)++,
4723                                                           "VAUX PCI CORE");
4724                                 *global = true;
4725                                 break;
4726                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4727                                 if (print) {
4728                                         _print_next_block((*par_num)++,
4729                                                           "DEBUG");
4730                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4731                                 }
4732                                 break;
4733                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4734                                 if (print) {
4735                                         _print_next_block((*par_num)++, "USDM");
4736                                         _print_parity(bp,
4737                                                       USDM_REG_USDM_PRTY_STS);
4738                                 }
4739                                 break;
4740                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4741                                 if (print) {
4742                                         _print_next_block((*par_num)++, "UCM");
4743                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4744                                 }
4745                                 break;
4746                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4747                                 if (print) {
4748                                         _print_next_block((*par_num)++,
4749                                                           "USEMI");
4750                                         _print_parity(bp,
4751                                                       USEM_REG_USEM_PRTY_STS_0);
4752                                         _print_parity(bp,
4753                                                       USEM_REG_USEM_PRTY_STS_1);
4754                                 }
4755                                 break;
4756                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4757                                 if (print) {
4758                                         _print_next_block((*par_num)++, "UPB");
4759                                         _print_parity(bp, GRCBASE_UPB +
4760                                                           PB_REG_PB_PRTY_STS);
4761                                 }
4762                                 break;
4763                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4764                                 if (print) {
4765                                         _print_next_block((*par_num)++, "CSDM");
4766                                         _print_parity(bp,
4767                                                       CSDM_REG_CSDM_PRTY_STS);
4768                                 }
4769                                 break;
4770                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4771                                 if (print) {
4772                                         _print_next_block((*par_num)++, "CCM");
4773                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4774                                 }
4775                                 break;
4776                         }
4777 
4778                         /* Clear the bit */
4779                         sig &= ~cur_bit;
4780                 }
4781         }
4782 
4783         return res;
4784 }
4785 
4786 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4787                                             int *par_num, bool print)
4788 {
4789         u32 cur_bit;
4790         bool res;
4791         int i;
4792 
4793         res = false;
4794 
4795         for (i = 0; sig; i++) {
4796                 cur_bit = (0x1UL << i);
4797                 if (sig & cur_bit) {
4798                         res = true; /* Each bit is real error! */
4799                         if (print) {
4800                                 switch (cur_bit) {
4801                                 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4802                                         _print_next_block((*par_num)++,
4803                                                           "CSEMI");
4804                                         _print_parity(bp,
4805                                                       CSEM_REG_CSEM_PRTY_STS_0);
4806                                         _print_parity(bp,
4807                                                       CSEM_REG_CSEM_PRTY_STS_1);
4808                                         break;
4809                                 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4810                                         _print_next_block((*par_num)++, "PXP");
4811                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4812                                         _print_parity(bp,
4813                                                       PXP2_REG_PXP2_PRTY_STS_0);
4814                                         _print_parity(bp,
4815                                                       PXP2_REG_PXP2_PRTY_STS_1);
4816                                         break;
4817                                 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4818                                         _print_next_block((*par_num)++,
4819                                                           "PXPPCICLOCKCLIENT");
4820                                         break;
4821                                 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4822                                         _print_next_block((*par_num)++, "CFC");
4823                                         _print_parity(bp,
4824                                                       CFC_REG_CFC_PRTY_STS);
4825                                         break;
4826                                 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4827                                         _print_next_block((*par_num)++, "CDU");
4828                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4829                                         break;
4830                                 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4831                                         _print_next_block((*par_num)++, "DMAE");
4832                                         _print_parity(bp,
4833                                                       DMAE_REG_DMAE_PRTY_STS);
4834                                         break;
4835                                 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4836                                         _print_next_block((*par_num)++, "IGU");
4837                                         if (CHIP_IS_E1x(bp))
4838                                                 _print_parity(bp,
4839                                                         HC_REG_HC_PRTY_STS);
4840                                         else
4841                                                 _print_parity(bp,
4842                                                         IGU_REG_IGU_PRTY_STS);
4843                                         break;
4844                                 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4845                                         _print_next_block((*par_num)++, "MISC");
4846                                         _print_parity(bp,
4847                                                       MISC_REG_MISC_PRTY_STS);
4848                                         break;
4849                                 }
4850                         }
4851 
4852                         /* Clear the bit */
4853                         sig &= ~cur_bit;
4854                 }
4855         }
4856 
4857         return res;
4858 }
4859 
4860 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4861                                             int *par_num, bool *global,
4862                                             bool print)
4863 {
4864         bool res = false;
4865         u32 cur_bit;
4866         int i;
4867 
4868         for (i = 0; sig; i++) {
4869                 cur_bit = (0x1UL << i);
4870                 if (sig & cur_bit) {
4871                         switch (cur_bit) {
4872                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4873                                 if (print)
4874                                         _print_next_block((*par_num)++,
4875                                                           "MCP ROM");
4876                                 *global = true;
4877                                 res = true;
4878                                 break;
4879                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4880                                 if (print)
4881                                         _print_next_block((*par_num)++,
4882                                                           "MCP UMP RX");
4883                                 *global = true;
4884                                 res = true;
4885                                 break;
4886                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4887                                 if (print)
4888                                         _print_next_block((*par_num)++,
4889                                                           "MCP UMP TX");
4890                                 *global = true;
4891                                 res = true;
4892                                 break;
4893                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4894                                 (*par_num)++;
4895                                 /* clear latched SCPAD PATIRY from MCP */
4896                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4897                                        1UL << 10);
4898                                 break;
4899                         }
4900 
4901                         /* Clear the bit */
4902                         sig &= ~cur_bit;
4903                 }
4904         }
4905 
4906         return res;
4907 }
4908 
4909 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4910                                             int *par_num, bool print)
4911 {
4912         u32 cur_bit;
4913         bool res;
4914         int i;
4915 
4916         res = false;
4917 
4918         for (i = 0; sig; i++) {
4919                 cur_bit = (0x1UL << i);
4920                 if (sig & cur_bit) {
4921                         res = true; /* Each bit is real error! */
4922                         if (print) {
4923                                 switch (cur_bit) {
4924                                 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4925                                         _print_next_block((*par_num)++,
4926                                                           "PGLUE_B");
4927                                         _print_parity(bp,
4928                                                       PGLUE_B_REG_PGLUE_B_PRTY_STS);
4929                                         break;
4930                                 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4931                                         _print_next_block((*par_num)++, "ATC");
4932                                         _print_parity(bp,
4933                                                       ATC_REG_ATC_PRTY_STS);
4934                                         break;
4935                                 }
4936                         }
4937                         /* Clear the bit */
4938                         sig &= ~cur_bit;
4939                 }
4940         }
4941 
4942         return res;
4943 }
4944 
4945 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4946                               u32 *sig)
4947 {
4948         bool res = false;
4949 
4950         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4951             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4952             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4953             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4954             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4955                 int par_num = 0;
4956 
4957                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4958                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4959                           sig[0] & HW_PRTY_ASSERT_SET_0,
4960                           sig[1] & HW_PRTY_ASSERT_SET_1,
4961                           sig[2] & HW_PRTY_ASSERT_SET_2,
4962                           sig[3] & HW_PRTY_ASSERT_SET_3,
4963                           sig[4] & HW_PRTY_ASSERT_SET_4);
4964                 if (print) {
4965                         if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4966                              (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4967                              (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4968                              (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4969                              (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4970                                 netdev_err(bp->dev,
4971                                            "Parity errors detected in blocks: ");
4972                         } else {
4973                                 print = false;
4974                         }
4975                 }
4976                 res |= bnx2x_check_blocks_with_parity0(bp,
4977                         sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4978                 res |= bnx2x_check_blocks_with_parity1(bp,
4979                         sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4980                 res |= bnx2x_check_blocks_with_parity2(bp,
4981                         sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4982                 res |= bnx2x_check_blocks_with_parity3(bp,
4983                         sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4984                 res |= bnx2x_check_blocks_with_parity4(bp,
4985                         sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4986 
4987                 if (print)
4988                         pr_cont("\n");
4989         }
4990 
4991         return res;
4992 }
4993 
4994 /**
4995  * bnx2x_chk_parity_attn - checks for parity attentions.
4996  *
4997  * @bp:         driver handle
4998  * @global:     true if there was a global attention
4999  * @print:      show parity attention in syslog
5000  */
5001 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5002 {
5003         struct attn_route attn = { {0} };
5004         int port = BP_PORT(bp);
5005 
5006         attn.sig[0] = REG_RD(bp,
5007                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5008                              port*4);
5009         attn.sig[1] = REG_RD(bp,
5010                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5011                              port*4);
5012         attn.sig[2] = REG_RD(bp,
5013                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5014                              port*4);
5015         attn.sig[3] = REG_RD(bp,
5016                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5017                              port*4);
5018         /* Since MCP attentions can't be disabled inside the block, we need to
5019          * read AEU registers to see whether they're currently disabled
5020          */
5021         attn.sig[3] &= ((REG_RD(bp,
5022                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5023                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5024                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5025                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5026 
5027         if (!CHIP_IS_E1x(bp))
5028                 attn.sig[4] = REG_RD(bp,
5029                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5030                                      port*4);
5031 
5032         return bnx2x_parity_attn(bp, global, print, attn.sig);
5033 }
5034 
5035 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5036 {
5037         u32 val;
5038         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5039 
5040                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5041                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5042                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5043                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5044                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5045                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5046                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5047                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5048                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5049                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5050                 if (val &
5051                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5052                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5053                 if (val &
5054                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5055                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5056                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5057                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5058                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5059                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5060                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5061                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5062         }
5063         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5064                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5065                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5066                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5067                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5068                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5069                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5070                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5071                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5072                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5073                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5074                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5075                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5076                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5077                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5078         }
5079 
5080         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5081                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5082                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5083                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5084                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5085         }
5086 }
5087 
5088 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5089 {
5090         struct attn_route attn, *group_mask;
5091         int port = BP_PORT(bp);
5092         int index;
5093         u32 reg_addr;
5094         u32 val;
5095         u32 aeu_mask;
5096         bool global = false;
5097 
5098         /* need to take HW lock because MCP or other port might also
5099            try to handle this event */
5100         bnx2x_acquire_alr(bp);
5101 
5102         if (bnx2x_chk_parity_attn(bp, &global, true)) {
5103 #ifndef BNX2X_STOP_ON_ERROR
5104                 bp->recovery_state = BNX2X_RECOVERY_INIT;
5105                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5106                 /* Disable HW interrupts */
5107                 bnx2x_int_disable(bp);
5108                 /* In case of parity errors don't handle attentions so that
5109                  * other function would "see" parity errors.
5110                  */
5111 #else
5112                 bnx2x_panic();
5113 #endif
5114                 bnx2x_release_alr(bp);
5115                 return;
5116         }
5117 
5118         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5119         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5120         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5121         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5122         if (!CHIP_IS_E1x(bp))
5123                 attn.sig[4] =
5124                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5125         else
5126                 attn.sig[4] = 0;
5127 
5128         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5129            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5130 
5131         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5132                 if (deasserted & (1 << index)) {
5133                         group_mask = &bp->attn_group[index];
5134 
5135                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5136                            index,
5137                            group_mask->sig[0], group_mask->sig[1],
5138                            group_mask->sig[2], group_mask->sig[3],
5139                            group_mask->sig[4]);
5140 
5141                         bnx2x_attn_int_deasserted4(bp,
5142                                         attn.sig[4] & group_mask->sig[4]);
5143                         bnx2x_attn_int_deasserted3(bp,
5144                                         attn.sig[3] & group_mask->sig[3]);
5145                         bnx2x_attn_int_deasserted1(bp,
5146                                         attn.sig[1] & group_mask->sig[1]);
5147                         bnx2x_attn_int_deasserted2(bp,
5148                                         attn.sig[2] & group_mask->sig[2]);
5149                         bnx2x_attn_int_deasserted0(bp,
5150                                         attn.sig[0] & group_mask->sig[0]);
5151                 }
5152         }
5153 
5154         bnx2x_release_alr(bp);
5155 
5156         if (bp->common.int_block == INT_BLOCK_HC)
5157                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5158                             COMMAND_REG_ATTN_BITS_CLR);
5159         else
5160                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5161 
5162         val = ~deasserted;
5163         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5164            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5165         REG_WR(bp, reg_addr, val);
5166 
5167         if (~bp->attn_state & deasserted)
5168                 BNX2X_ERR("IGU ERROR\n");
5169 
5170         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5171                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
5172 
5173         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5174         aeu_mask = REG_RD(bp, reg_addr);
5175 
5176         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
5177            aeu_mask, deasserted);
5178         aeu_mask |= (deasserted & 0x3ff);
5179         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5180 
5181         REG_WR(bp, reg_addr, aeu_mask);
5182         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5183 
5184         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5185         bp->attn_state &= ~deasserted;
5186         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5187 }
5188 
5189 static void bnx2x_attn_int(struct bnx2x *bp)
5190 {
5191         /* read local copy of bits */
5192         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5193                                                                 attn_bits);
5194         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5195                                                                 attn_bits_ack);
5196         u32 attn_state = bp->attn_state;
5197 
5198         /* look for changed bits */
5199         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
5200         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
5201 
5202         DP(NETIF_MSG_HW,
5203            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
5204            attn_bits, attn_ack, asserted, deasserted);
5205 
5206         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5207                 BNX2X_ERR("BAD attention state\n");
5208 
5209         /* handle bits that were raised */
5210         if (asserted)
5211                 bnx2x_attn_int_asserted(bp, asserted);
5212 
5213         if (deasserted)
5214                 bnx2x_attn_int_deasserted(bp, deasserted);
5215 }
5216 
5217 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5218                       u16 index, u8 op, u8 update)
5219 {
5220         u32 igu_addr = bp->igu_base_addr;
5221         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5222         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5223                              igu_addr);
5224 }
5225 
5226 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5227 {
5228         /* No memory barriers */
5229         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5230         mmiowb(); /* keep prod updates ordered */
5231 }
5232 
5233 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5234                                       union event_ring_elem *elem)
5235 {
5236         u8 err = elem->message.error;
5237 
5238         if (!bp->cnic_eth_dev.starting_cid  ||
5239             (cid < bp->cnic_eth_dev.starting_cid &&
5240             cid != bp->cnic_eth_dev.iscsi_l2_cid))
5241                 return 1;
5242 
5243         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5244 
5245         if (unlikely(err)) {
5246 
5247                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5248                           cid);
5249                 bnx2x_panic_dump(bp, false);
5250         }
5251         bnx2x_cnic_cfc_comp(bp, cid, err);
5252         return 0;
5253 }
5254 
5255 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5256 {
5257         struct bnx2x_mcast_ramrod_params rparam;
5258         int rc;
5259 
5260         memset(&rparam, 0, sizeof(rparam));
5261 
5262         rparam.mcast_obj = &bp->mcast_obj;
5263 
5264         netif_addr_lock_bh(bp->dev);
5265 
5266         /* Clear pending state for the last command */
5267         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5268 
5269         /* If there are pending mcast commands - send them */
5270         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5271                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5272                 if (rc < 0)
5273                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5274                                   rc);
5275         }
5276 
5277         netif_addr_unlock_bh(bp->dev);
5278 }
5279 
5280 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5281                                             union event_ring_elem *elem)
5282 {
5283         unsigned long ramrod_flags = 0;
5284         int rc = 0;
5285         u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5286         u32 cid = echo & BNX2X_SWCID_MASK;
5287         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5288 
5289         /* Always push next commands out, don't wait here */
5290         __set_bit(RAMROD_CONT, &ramrod_flags);
5291 
5292         switch (echo >> BNX2X_SWCID_SHIFT) {
5293         case BNX2X_FILTER_MAC_PENDING:
5294                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5295                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5296                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5297                 else
5298                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5299 
5300                 break;
5301         case BNX2X_FILTER_VLAN_PENDING:
5302                 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5303                 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5304                 break;
5305         case BNX2X_FILTER_MCAST_PENDING:
5306                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5307                 /* This is only relevant for 57710 where multicast MACs are
5308                  * configured as unicast MACs using the same ramrod.
5309                  */
5310                 bnx2x_handle_mcast_eqe(bp);
5311                 return;
5312         default:
5313                 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5314                 return;
5315         }
5316 
5317         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5318 
5319         if (rc < 0)
5320                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5321         else if (rc > 0)
5322                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5323 }
5324 
5325 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5326 
5327 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5328 {
5329         netif_addr_lock_bh(bp->dev);
5330 
5331         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5332 
5333         /* Send rx_mode command again if was requested */
5334         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5335                 bnx2x_set_storm_rx_mode(bp);
5336         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5337                                     &bp->sp_state))
5338                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5339         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5340                                     &bp->sp_state))
5341                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5342 
5343         netif_addr_unlock_bh(bp->dev);
5344 }
5345 
5346 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5347                                               union event_ring_elem *elem)
5348 {
5349         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5350                 DP(BNX2X_MSG_SP,
5351                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5352                    elem->message.data.vif_list_event.func_bit_map);
5353                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5354                         elem->message.data.vif_list_event.func_bit_map);
5355         } else if (elem->message.data.vif_list_event.echo ==
5356                    VIF_LIST_RULE_SET) {
5357                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5358                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5359         }
5360 }
5361 
5362 /* called with rtnl_lock */
5363 static void bnx2x_after_function_update(struct bnx2x *bp)
5364 {
5365         int q, rc;
5366         struct bnx2x_fastpath *fp;
5367         struct bnx2x_queue_state_params queue_params = {NULL};
5368         struct bnx2x_queue_update_params *q_update_params =
5369                 &queue_params.params.update;
5370 
5371         /* Send Q update command with afex vlan removal values for all Qs */
5372         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5373 
5374         /* set silent vlan removal values according to vlan mode */
5375         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5376                   &q_update_params->update_flags);
5377         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5378                   &q_update_params->update_flags);
5379         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5380 
5381         /* in access mode mark mask and value are 0 to strip all vlans */
5382         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5383                 q_update_params->silent_removal_value = 0;
5384                 q_update_params->silent_removal_mask = 0;
5385         } else {
5386                 q_update_params->silent_removal_value =
5387                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5388                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5389         }
5390 
5391         for_each_eth_queue(bp, q) {
5392                 /* Set the appropriate Queue object */
5393                 fp = &bp->fp[q];
5394                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5395 
5396                 /* send the ramrod */
5397                 rc = bnx2x_queue_state_change(bp, &queue_params);
5398                 if (rc < 0)
5399                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5400                                   q);
5401         }
5402 
5403         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5404                 fp = &bp->fp[FCOE_IDX(bp)];
5405                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5406 
5407                 /* clear pending completion bit */
5408                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5409 
5410                 /* mark latest Q bit */
5411                 smp_mb__before_atomic();
5412                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5413                 smp_mb__after_atomic();
5414 
5415                 /* send Q update ramrod for FCoE Q */
5416                 rc = bnx2x_queue_state_change(bp, &queue_params);
5417                 if (rc < 0)
5418                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5419                                   q);
5420         } else {
5421                 /* If no FCoE ring - ACK MCP now */
5422                 bnx2x_link_report(bp);
5423                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5424         }
5425 }
5426 
5427 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5428         struct bnx2x *bp, u32 cid)
5429 {
5430         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5431 
5432         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5433                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5434         else
5435                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5436 }
5437 
5438 static void bnx2x_eq_int(struct bnx2x *bp)
5439 {
5440         u16 hw_cons, sw_cons, sw_prod;
5441         union event_ring_elem *elem;
5442         u8 echo;
5443         u32 cid;
5444         u8 opcode;
5445         int rc, spqe_cnt = 0;
5446         struct bnx2x_queue_sp_obj *q_obj;
5447         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5448         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5449 
5450         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5451 
5452         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5453          * when we get the next-page we need to adjust so the loop
5454          * condition below will be met. The next element is the size of a
5455          * regular element and hence incrementing by 1
5456          */
5457         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5458                 hw_cons++;
5459 
5460         /* This function may never run in parallel with itself for a
5461          * specific bp, thus there is no need in "paired" read memory
5462          * barrier here.
5463          */
5464         sw_cons = bp->eq_cons;
5465         sw_prod = bp->eq_prod;
5466 
5467         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5468                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5469 
5470         for (; sw_cons != hw_cons;
5471               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5472 
5473                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5474 
5475                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5476                 if (!rc) {
5477                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5478                            rc);
5479                         goto next_spqe;
5480                 }
5481 
5482                 opcode = elem->message.opcode;
5483 
5484                 /* handle eq element */
5485                 switch (opcode) {
5486                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5487                         bnx2x_vf_mbx_schedule(bp,
5488                                               &elem->message.data.vf_pf_event);
5489                         continue;
5490 
5491                 case EVENT_RING_OPCODE_STAT_QUERY:
5492                         DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5493                                "got statistics comp event %d\n",
5494                                bp->stats_comp++);
5495                         /* nothing to do with stats comp */
5496                         goto next_spqe;
5497 
5498                 case EVENT_RING_OPCODE_CFC_DEL:
5499                         /* handle according to cid range */
5500                         /*
5501                          * we may want to verify here that the bp state is
5502                          * HALTING
5503                          */
5504 
5505                         /* elem CID originates from FW; actually LE */
5506                         cid = SW_CID(elem->message.data.cfc_del_event.cid);
5507 
5508                         DP(BNX2X_MSG_SP,
5509                            "got delete ramrod for MULTI[%d]\n", cid);
5510 
5511                         if (CNIC_LOADED(bp) &&
5512                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5513                                 goto next_spqe;
5514 
5515                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5516 
5517                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5518                                 break;
5519 
5520                         goto next_spqe;
5521 
5522                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5523                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5524                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5525                         if (f_obj->complete_cmd(bp, f_obj,
5526                                                 BNX2X_F_CMD_TX_STOP))
5527                                 break;
5528                         goto next_spqe;
5529 
5530                 case EVENT_RING_OPCODE_START_TRAFFIC:
5531                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5532                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5533                         if (f_obj->complete_cmd(bp, f_obj,
5534                                                 BNX2X_F_CMD_TX_START))
5535                                 break;
5536                         goto next_spqe;
5537 
5538                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5539                         echo = elem->message.data.function_update_event.echo;
5540                         if (echo == SWITCH_UPDATE) {
5541                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5542                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5543                                 if (f_obj->complete_cmd(
5544                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5545                                         break;
5546 
5547                         } else {
5548                                 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5549 
5550                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5551                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5552                                 f_obj->complete_cmd(bp, f_obj,
5553                                                     BNX2X_F_CMD_AFEX_UPDATE);
5554 
5555                                 /* We will perform the Queues update from
5556                                  * sp_rtnl task as all Queue SP operations
5557                                  * should run under rtnl_lock.
5558                                  */
5559                                 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5560                         }
5561 
5562                         goto next_spqe;
5563 
5564                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5565                         f_obj->complete_cmd(bp, f_obj,
5566                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5567                         bnx2x_after_afex_vif_lists(bp, elem);
5568                         goto next_spqe;
5569                 case EVENT_RING_OPCODE_FUNCTION_START:
5570                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5571                            "got FUNC_START ramrod\n");
5572                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5573                                 break;
5574 
5575                         goto next_spqe;
5576 
5577                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5578                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5579                            "got FUNC_STOP ramrod\n");
5580                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5581                                 break;
5582 
5583                         goto next_spqe;
5584 
5585                 case EVENT_RING_OPCODE_SET_TIMESYNC:
5586                         DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5587                            "got set_timesync ramrod completion\n");
5588                         if (f_obj->complete_cmd(bp, f_obj,
5589                                                 BNX2X_F_CMD_SET_TIMESYNC))
5590                                 break;
5591                         goto next_spqe;
5592                 }
5593 
5594                 switch (opcode | bp->state) {
5595                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5596                       BNX2X_STATE_OPEN):
5597                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598                       BNX2X_STATE_OPENING_WAIT4_PORT):
5599                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5600                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5601                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5602                            SW_CID(elem->message.data.eth_event.echo));
5603                         rss_raw->clear_pending(rss_raw);
5604                         break;
5605 
5606                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5607                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5608                 case (EVENT_RING_OPCODE_SET_MAC |
5609                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5610                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5611                       BNX2X_STATE_OPEN):
5612                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5613                       BNX2X_STATE_DIAG):
5614                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5615                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5616                         DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5617                         bnx2x_handle_classification_eqe(bp, elem);
5618                         break;
5619 
5620                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5621                       BNX2X_STATE_OPEN):
5622                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5623                       BNX2X_STATE_DIAG):
5624                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5625                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5626                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5627                         bnx2x_handle_mcast_eqe(bp);
5628                         break;
5629 
5630                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5631                       BNX2X_STATE_OPEN):
5632                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5633                       BNX2X_STATE_DIAG):
5634                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5635                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5636                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5637                         bnx2x_handle_rx_mode_eqe(bp);
5638                         break;
5639                 default:
5640                         /* unknown event log error and continue */
5641                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5642                                   elem->message.opcode, bp->state);
5643                 }
5644 next_spqe:
5645                 spqe_cnt++;
5646         } /* for */
5647 
5648         smp_mb__before_atomic();
5649         atomic_add(spqe_cnt, &bp->eq_spq_left);
5650 
5651         bp->eq_cons = sw_cons;
5652         bp->eq_prod = sw_prod;
5653         /* Make sure that above mem writes were issued towards the memory */
5654         smp_wmb();
5655 
5656         /* update producer */
5657         bnx2x_update_eq_prod(bp, bp->eq_prod);
5658 }
5659 
5660 static void bnx2x_sp_task(struct work_struct *work)
5661 {
5662         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5663 
5664         DP(BNX2X_MSG_SP, "sp task invoked\n");
5665 
5666         /* make sure the atomic interrupt_occurred has been written */
5667         smp_rmb();
5668         if (atomic_read(&bp->interrupt_occurred)) {
5669 
5670                 /* what work needs to be performed? */
5671                 u16 status = bnx2x_update_dsb_idx(bp);
5672 
5673                 DP(BNX2X_MSG_SP, "status %x\n", status);
5674                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5675                 atomic_set(&bp->interrupt_occurred, 0);
5676 
5677                 /* HW attentions */
5678                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5679                         bnx2x_attn_int(bp);
5680                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5681                 }
5682 
5683                 /* SP events: STAT_QUERY and others */
5684                 if (status & BNX2X_DEF_SB_IDX) {
5685                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5686 
5687                         if (FCOE_INIT(bp) &&
5688                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5689                                 /* Prevent local bottom-halves from running as
5690                                  * we are going to change the local NAPI list.
5691                                  */
5692                                 local_bh_disable();
5693                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5694                                 local_bh_enable();
5695                         }
5696 
5697                         /* Handle EQ completions */
5698                         bnx2x_eq_int(bp);
5699                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5700                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5701 
5702                         status &= ~BNX2X_DEF_SB_IDX;
5703                 }
5704 
5705                 /* if status is non zero then perhaps something went wrong */
5706                 if (unlikely(status))
5707                         DP(BNX2X_MSG_SP,
5708                            "got an unknown interrupt! (status 0x%x)\n", status);
5709 
5710                 /* ack status block only if something was actually handled */
5711                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5712                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5713         }
5714 
5715         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5716         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5717                                &bp->sp_state)) {
5718                 bnx2x_link_report(bp);
5719                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5720         }
5721 }
5722 
5723 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5724 {
5725         struct net_device *dev = dev_instance;
5726         struct bnx2x *bp = netdev_priv(dev);
5727 
5728         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5729                      IGU_INT_DISABLE, 0);
5730 
5731 #ifdef BNX2X_STOP_ON_ERROR
5732         if (unlikely(bp->panic))
5733                 return IRQ_HANDLED;
5734 #endif
5735 
5736         if (CNIC_LOADED(bp)) {
5737                 struct cnic_ops *c_ops;
5738 
5739                 rcu_read_lock();
5740                 c_ops = rcu_dereference(bp->cnic_ops);
5741                 if (c_ops)
5742                         c_ops->cnic_handler(bp->cnic_data, NULL);
5743                 rcu_read_unlock();
5744         }
5745 
5746         /* schedule sp task to perform default status block work, ack
5747          * attentions and enable interrupts.
5748          */
5749         bnx2x_schedule_sp_task(bp);
5750 
5751         return IRQ_HANDLED;
5752 }
5753 
5754 /* end of slow path */
5755 
5756 void bnx2x_drv_pulse(struct bnx2x *bp)
5757 {
5758         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5759                  bp->fw_drv_pulse_wr_seq);
5760 }
5761 
5762 static void bnx2x_timer(unsigned long data)
5763 {
5764         struct bnx2x *bp = (struct bnx2x *) data;
5765 
5766         if (!netif_running(bp->dev))
5767                 return;
5768 
5769         if (IS_PF(bp) &&
5770             !BP_NOMCP(bp)) {
5771                 int mb_idx = BP_FW_MB_IDX(bp);
5772                 u16 drv_pulse;
5773                 u16 mcp_pulse;
5774 
5775                 ++bp->fw_drv_pulse_wr_seq;
5776                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5777                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5778                 bnx2x_drv_pulse(bp);
5779 
5780                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5781                              MCP_PULSE_SEQ_MASK);
5782                 /* The delta between driver pulse and mcp response
5783                  * should not get too big. If the MFW is more than 5 pulses
5784                  * behind, we should worry about it enough to generate an error
5785                  * log.
5786                  */
5787                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5788                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5789                                   drv_pulse, mcp_pulse);
5790         }
5791 
5792         if (bp->state == BNX2X_STATE_OPEN)
5793                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5794 
5795         /* sample pf vf bulletin board for new posts from pf */
5796         if (IS_VF(bp))
5797                 bnx2x_timer_sriov(bp);
5798 
5799         mod_timer(&bp->timer, jiffies + bp->current_interval);
5800 }
5801 
5802 /* end of Statistics */
5803 
5804 /* nic init */
5805 
5806 /*
5807  * nic init service functions
5808  */
5809 
5810 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5811 {
5812         u32 i;
5813         if (!(len%4) && !(addr%4))
5814                 for (i = 0; i < len; i += 4)
5815                         REG_WR(bp, addr + i, fill);
5816         else
5817                 for (i = 0; i < len; i++)
5818                         REG_WR8(bp, addr + i, fill);
5819 }
5820 
5821 /* helper: writes FP SP data to FW - data_size in dwords */
5822 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5823                                 int fw_sb_id,
5824                                 u32 *sb_data_p,
5825                                 u32 data_size)
5826 {
5827         int index;
5828         for (index = 0; index < data_size; index++)
5829                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5830                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5831                         sizeof(u32)*index,
5832                         *(sb_data_p + index));
5833 }
5834 
5835 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5836 {
5837         u32 *sb_data_p;
5838         u32 data_size = 0;
5839         struct hc_status_block_data_e2 sb_data_e2;
5840         struct hc_status_block_data_e1x sb_data_e1x;
5841 
5842         /* disable the function first */
5843         if (!CHIP_IS_E1x(bp)) {
5844                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5845                 sb_data_e2.common.state = SB_DISABLED;
5846                 sb_data_e2.common.p_func.vf_valid = false;
5847                 sb_data_p = (u32 *)&sb_data_e2;
5848                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5849         } else {
5850                 memset(&sb_data_e1x, 0,
5851                        sizeof(struct hc_status_block_data_e1x));
5852                 sb_data_e1x.common.state = SB_DISABLED;
5853                 sb_data_e1x.common.p_func.vf_valid = false;
5854                 sb_data_p = (u32 *)&sb_data_e1x;
5855                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5856         }
5857         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5858 
5859         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5860                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5861                         CSTORM_STATUS_BLOCK_SIZE);
5862         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5863                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5864                         CSTORM_SYNC_BLOCK_SIZE);
5865 }
5866 
5867 /* helper:  writes SP SB data to FW */
5868 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5869                 struct hc_sp_status_block_data *sp_sb_data)
5870 {
5871         int func = BP_FUNC(bp);
5872         int i;
5873         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5874                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5875                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5876                         i*sizeof(u32),
5877                         *((u32 *)sp_sb_data + i));
5878 }
5879 
5880 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5881 {
5882         int func = BP_FUNC(bp);
5883         struct hc_sp_status_block_data sp_sb_data;
5884         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5885 
5886         sp_sb_data.state = SB_DISABLED;
5887         sp_sb_data.p_func.vf_valid = false;
5888 
5889         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5890 
5891         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5892                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5893                         CSTORM_SP_STATUS_BLOCK_SIZE);
5894         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5895                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5896                         CSTORM_SP_SYNC_BLOCK_SIZE);
5897 }
5898 
5899 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5900                                            int igu_sb_id, int igu_seg_id)
5901 {
5902         hc_sm->igu_sb_id = igu_sb_id;
5903         hc_sm->igu_seg_id = igu_seg_id;
5904         hc_sm->timer_value = 0xFF;
5905         hc_sm->time_to_expire = 0xFFFFFFFF;
5906 }
5907 
5908 /* allocates state machine ids. */
5909 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5910 {
5911         /* zero out state machine indices */
5912         /* rx indices */
5913         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5914 
5915         /* tx indices */
5916         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5917         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5918         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5919         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5920 
5921         /* map indices */
5922         /* rx indices */
5923         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5924                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5925 
5926         /* tx indices */
5927         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5928                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5929         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5930                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5931         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5932                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5933         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5934                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5935 }
5936 
5937 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5938                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5939 {
5940         int igu_seg_id;
5941 
5942         struct hc_status_block_data_e2 sb_data_e2;
5943         struct hc_status_block_data_e1x sb_data_e1x;
5944         struct hc_status_block_sm  *hc_sm_p;
5945         int data_size;
5946         u32 *sb_data_p;
5947 
5948         if (CHIP_INT_MODE_IS_BC(bp))
5949                 igu_seg_id = HC_SEG_ACCESS_NORM;
5950         else
5951                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5952 
5953         bnx2x_zero_fp_sb(bp, fw_sb_id);
5954 
5955         if (!CHIP_IS_E1x(bp)) {
5956                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5957                 sb_data_e2.common.state = SB_ENABLED;
5958                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5959                 sb_data_e2.common.p_func.vf_id = vfid;
5960                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5961                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5962                 sb_data_e2.common.same_igu_sb_1b = true;
5963                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5964                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5965                 hc_sm_p = sb_data_e2.common.state_machine;
5966                 sb_data_p = (u32 *)&sb_data_e2;
5967                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5968                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5969         } else {
5970                 memset(&sb_data_e1x, 0,
5971                        sizeof(struct hc_status_block_data_e1x));
5972                 sb_data_e1x.common.state = SB_ENABLED;
5973                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5974                 sb_data_e1x.common.p_func.vf_id = 0xff;
5975                 sb_data_e1x.common.p_func.vf_valid = false;
5976                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5977                 sb_data_e1x.common.same_igu_sb_1b = true;
5978                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5979                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5980                 hc_sm_p = sb_data_e1x.common.state_machine;
5981                 sb_data_p = (u32 *)&sb_data_e1x;
5982                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5983                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5984         }
5985 
5986         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5987                                        igu_sb_id, igu_seg_id);
5988         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5989                                        igu_sb_id, igu_seg_id);
5990 
5991         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5992 
5993         /* write indices to HW - PCI guarantees endianity of regpairs */
5994         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5995 }
5996 
5997 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5998                                      u16 tx_usec, u16 rx_usec)
5999 {
6000         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6001                                     false, rx_usec);
6002         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6003                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6004                                        tx_usec);
6005         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6006                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6007                                        tx_usec);
6008         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6009                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6010                                        tx_usec);
6011 }
6012 
6013 static void bnx2x_init_def_sb(struct bnx2x *bp)
6014 {
6015         struct host_sp_status_block *def_sb = bp->def_status_blk;
6016         dma_addr_t mapping = bp->def_status_blk_mapping;
6017         int igu_sp_sb_index;
6018         int igu_seg_id;
6019         int port = BP_PORT(bp);
6020         int func = BP_FUNC(bp);
6021         int reg_offset, reg_offset_en5;
6022         u64 section;
6023         int index;
6024         struct hc_sp_status_block_data sp_sb_data;
6025         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6026 
6027         if (CHIP_INT_MODE_IS_BC(bp)) {
6028                 igu_sp_sb_index = DEF_SB_IGU_ID;
6029                 igu_seg_id = HC_SEG_ACCESS_DEF;
6030         } else {
6031                 igu_sp_sb_index = bp->igu_dsb_id;
6032                 igu_seg_id = IGU_SEG_ACCESS_DEF;
6033         }
6034 
6035         /* ATTN */
6036         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6037                                             atten_status_block);
6038         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6039 
6040         bp->attn_state = 0;
6041 
6042         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6043                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6044         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6045                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6046         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6047                 int sindex;
6048                 /* take care of sig[0]..sig[4] */
6049                 for (sindex = 0; sindex < 4; sindex++)
6050                         bp->attn_group[index].sig[sindex] =
6051                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6052 
6053                 if (!CHIP_IS_E1x(bp))
6054                         /*
6055                          * enable5 is separate from the rest of the registers,
6056                          * and therefore the address skip is 4
6057                          * and not 16 between the different groups
6058                          */
6059                         bp->attn_group[index].sig[4] = REG_RD(bp,
6060                                         reg_offset_en5 + 0x4*index);
6061                 else
6062                         bp->attn_group[index].sig[4] = 0;
6063         }
6064 
6065         if (bp->common.int_block == INT_BLOCK_HC) {
6066                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6067                                      HC_REG_ATTN_MSG0_ADDR_L);
6068 
6069                 REG_WR(bp, reg_offset, U64_LO(section));
6070                 REG_WR(bp, reg_offset + 4, U64_HI(section));
6071         } else if (!CHIP_IS_E1x(bp)) {
6072                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6073                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6074         }
6075 
6076         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6077                                             sp_sb);
6078 
6079         bnx2x_zero_sp_sb(bp);
6080 
6081         /* PCI guarantees endianity of regpairs */
6082         sp_sb_data.state                = SB_ENABLED;
6083         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
6084         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
6085         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
6086         sp_sb_data.igu_seg_id           = igu_seg_id;
6087         sp_sb_data.p_func.pf_id         = func;
6088         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
6089         sp_sb_data.p_func.vf_id         = 0xff;
6090 
6091         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6092 
6093         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6094 }
6095 
6096 void bnx2x_update_coalesce(struct bnx2x *bp)
6097 {
6098         int i;
6099 
6100         for_each_eth_queue(bp, i)
6101                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6102                                          bp->tx_ticks, bp->rx_ticks);
6103 }
6104 
6105 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6106 {
6107         spin_lock_init(&bp->spq_lock);
6108         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6109 
6110         bp->spq_prod_idx = 0;
6111         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6112         bp->spq_prod_bd = bp->spq;
6113         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6114 }
6115 
6116 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6117 {
6118         int i;
6119         for (i = 1; i <= NUM_EQ_PAGES; i++) {
6120                 union event_ring_elem *elem =
6121                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6122 
6123                 elem->next_page.addr.hi =
6124                         cpu_to_le32(U64_HI(bp->eq_mapping +
6125                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6126                 elem->next_page.addr.lo =
6127                         cpu_to_le32(U64_LO(bp->eq_mapping +
6128                                    BCM_PAGE_SIZE*(i %