Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c

  1 /* bnx2x_main.c: Broadcom Everest network driver.
  2  *
  3  * Copyright (c) 2007-2013 Broadcom Corporation
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License as published by
  7  * the Free Software Foundation.
  8  *
  9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 10  * Written by: Eliezer Tamir
 11  * Based on code from Michael Chan's bnx2 driver
 12  * UDP CSUM errata workaround by Arik Gendelman
 13  * Slowpath and fastpath rework by Vladislav Zolotarov
 14  * Statistics and Link management by Yitchak Gertner
 15  *
 16  */
 17 
 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 19 
 20 #include <linux/module.h>
 21 #include <linux/moduleparam.h>
 22 #include <linux/kernel.h>
 23 #include <linux/device.h>  /* for dev_info() */
 24 #include <linux/timer.h>
 25 #include <linux/errno.h>
 26 #include <linux/ioport.h>
 27 #include <linux/slab.h>
 28 #include <linux/interrupt.h>
 29 #include <linux/pci.h>
 30 #include <linux/aer.h>
 31 #include <linux/init.h>
 32 #include <linux/netdevice.h>
 33 #include <linux/etherdevice.h>
 34 #include <linux/skbuff.h>
 35 #include <linux/dma-mapping.h>
 36 #include <linux/bitops.h>
 37 #include <linux/irq.h>
 38 #include <linux/delay.h>
 39 #include <asm/byteorder.h>
 40 #include <linux/time.h>
 41 #include <linux/ethtool.h>
 42 #include <linux/mii.h>
 43 #include <linux/if_vlan.h>
 44 #include <net/ip.h>
 45 #include <net/ipv6.h>
 46 #include <net/tcp.h>
 47 #include <net/checksum.h>
 48 #include <net/ip6_checksum.h>
 49 #include <linux/workqueue.h>
 50 #include <linux/crc32.h>
 51 #include <linux/crc32c.h>
 52 #include <linux/prefetch.h>
 53 #include <linux/zlib.h>
 54 #include <linux/io.h>
 55 #include <linux/semaphore.h>
 56 #include <linux/stringify.h>
 57 #include <linux/vmalloc.h>
 58 
 59 #include "bnx2x.h"
 60 #include "bnx2x_init.h"
 61 #include "bnx2x_init_ops.h"
 62 #include "bnx2x_cmn.h"
 63 #include "bnx2x_vfpf.h"
 64 #include "bnx2x_dcb.h"
 65 #include "bnx2x_sp.h"
 66 
 67 #include <linux/firmware.h>
 68 #include "bnx2x_fw_file_hdr.h"
 69 /* FW files */
 70 #define FW_FILE_VERSION                                 \
 71         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
 72         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
 73         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
 74         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
 75 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
 76 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 77 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 78 
 79 /* Time in jiffies before concluding the transmitter is hung */
 80 #define TX_TIMEOUT              (5*HZ)
 81 
 82 static char version[] =
 83         "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
 84         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 85 
 86 MODULE_AUTHOR("Eliezer Tamir");
 87 MODULE_DESCRIPTION("Broadcom NetXtreme II "
 88                    "BCM57710/57711/57711E/"
 89                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 90                    "57840/57840_MF Driver");
 91 MODULE_LICENSE("GPL");
 92 MODULE_VERSION(DRV_MODULE_VERSION);
 93 MODULE_FIRMWARE(FW_FILE_NAME_E1);
 94 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 95 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 96 
 97 int bnx2x_num_queues;
 98 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
 99 MODULE_PARM_DESC(num_queues,
100                  " Set number of queues (default is as a number of CPUs)");
101 
102 static int disable_tpa;
103 module_param(disable_tpa, int, S_IRUGO);
104 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105 
106 static int int_mode;
107 module_param(int_mode, int, S_IRUGO);
108 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
109                                 "(1 INT#x; 2 MSI)");
110 
111 static int dropless_fc;
112 module_param(dropless_fc, int, S_IRUGO);
113 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114 
115 static int mrrs = -1;
116 module_param(mrrs, int, S_IRUGO);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 
119 static int debug;
120 module_param(debug, int, S_IRUGO);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122 
123 struct workqueue_struct *bnx2x_wq;
124 
125 struct bnx2x_mac_vals {
126         u32 xmac_addr;
127         u32 xmac_val;
128         u32 emac_addr;
129         u32 emac_val;
130         u32 umac_addr;
131         u32 umac_val;
132         u32 bmac_addr;
133         u32 bmac_val[2];
134 };
135 
136 enum bnx2x_board_type {
137         BCM57710 = 0,
138         BCM57711,
139         BCM57711E,
140         BCM57712,
141         BCM57712_MF,
142         BCM57712_VF,
143         BCM57800,
144         BCM57800_MF,
145         BCM57800_VF,
146         BCM57810,
147         BCM57810_MF,
148         BCM57810_VF,
149         BCM57840_4_10,
150         BCM57840_2_20,
151         BCM57840_MF,
152         BCM57840_VF,
153         BCM57811,
154         BCM57811_MF,
155         BCM57840_O,
156         BCM57840_MFO,
157         BCM57811_VF
158 };
159 
160 /* indexed by board_type, above */
161 static struct {
162         char *name;
163 } board_info[] = {
164         [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
165         [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
166         [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
167         [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
168         [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
169         [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
170         [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
171         [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
172         [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
173         [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
174         [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
175         [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
176         [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
177         [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
178         [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
179         [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
180         [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
181         [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
182         [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
183         [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
184         [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
185 };
186 
187 #ifndef PCI_DEVICE_ID_NX2_57710
188 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
189 #endif
190 #ifndef PCI_DEVICE_ID_NX2_57711
191 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
192 #endif
193 #ifndef PCI_DEVICE_ID_NX2_57711E
194 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
195 #endif
196 #ifndef PCI_DEVICE_ID_NX2_57712
197 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
198 #endif
199 #ifndef PCI_DEVICE_ID_NX2_57712_MF
200 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
201 #endif
202 #ifndef PCI_DEVICE_ID_NX2_57712_VF
203 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
204 #endif
205 #ifndef PCI_DEVICE_ID_NX2_57800
206 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
207 #endif
208 #ifndef PCI_DEVICE_ID_NX2_57800_MF
209 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
210 #endif
211 #ifndef PCI_DEVICE_ID_NX2_57800_VF
212 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
213 #endif
214 #ifndef PCI_DEVICE_ID_NX2_57810
215 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
216 #endif
217 #ifndef PCI_DEVICE_ID_NX2_57810_MF
218 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
219 #endif
220 #ifndef PCI_DEVICE_ID_NX2_57840_O
221 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
222 #endif
223 #ifndef PCI_DEVICE_ID_NX2_57810_VF
224 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
225 #endif
226 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
227 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
228 #endif
229 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
230 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
231 #endif
232 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
233 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
234 #endif
235 #ifndef PCI_DEVICE_ID_NX2_57840_MF
236 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
237 #endif
238 #ifndef PCI_DEVICE_ID_NX2_57840_VF
239 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
240 #endif
241 #ifndef PCI_DEVICE_ID_NX2_57811
242 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
243 #endif
244 #ifndef PCI_DEVICE_ID_NX2_57811_MF
245 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
246 #endif
247 #ifndef PCI_DEVICE_ID_NX2_57811_VF
248 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
249 #endif
250 
251 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
252         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
253         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
254         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
255         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
256         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
257         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
271         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
272         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
273         { 0 }
274 };
275 
276 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
277 
278 /* Global resources for unloading a previously loaded device */
279 #define BNX2X_PREV_WAIT_NEEDED 1
280 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
281 static LIST_HEAD(bnx2x_prev_list);
282 
283 /* Forward declaration */
284 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
285 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
286 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
287 
288 /****************************************************************************
289 * General service functions
290 ****************************************************************************/
291 
292 static void __storm_memset_dma_mapping(struct bnx2x *bp,
293                                        u32 addr, dma_addr_t mapping)
294 {
295         REG_WR(bp,  addr, U64_LO(mapping));
296         REG_WR(bp,  addr + 4, U64_HI(mapping));
297 }
298 
299 static void storm_memset_spq_addr(struct bnx2x *bp,
300                                   dma_addr_t mapping, u16 abs_fid)
301 {
302         u32 addr = XSEM_REG_FAST_MEMORY +
303                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
304 
305         __storm_memset_dma_mapping(bp, addr, mapping);
306 }
307 
308 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
309                                   u16 pf_id)
310 {
311         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
312                 pf_id);
313         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
314                 pf_id);
315         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
316                 pf_id);
317         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
318                 pf_id);
319 }
320 
321 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
322                                  u8 enable)
323 {
324         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
325                 enable);
326         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
327                 enable);
328         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
329                 enable);
330         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
331                 enable);
332 }
333 
334 static void storm_memset_eq_data(struct bnx2x *bp,
335                                  struct event_ring_data *eq_data,
336                                 u16 pfid)
337 {
338         size_t size = sizeof(struct event_ring_data);
339 
340         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
341 
342         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
343 }
344 
345 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
346                                  u16 pfid)
347 {
348         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
349         REG_WR16(bp, addr, eq_prod);
350 }
351 
352 /* used only at init
353  * locking is done by mcp
354  */
355 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
356 {
357         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
358         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
359         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
360                                PCICFG_VENDOR_ID_OFFSET);
361 }
362 
363 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
364 {
365         u32 val;
366 
367         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
368         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
369         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
370                                PCICFG_VENDOR_ID_OFFSET);
371 
372         return val;
373 }
374 
375 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
376 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
377 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
378 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
379 #define DMAE_DP_DST_NONE        "dst_addr [none]"
380 
381 static void bnx2x_dp_dmae(struct bnx2x *bp,
382                           struct dmae_command *dmae, int msglvl)
383 {
384         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
385         int i;
386 
387         switch (dmae->opcode & DMAE_COMMAND_DST) {
388         case DMAE_CMD_DST_PCI:
389                 if (src_type == DMAE_CMD_SRC_PCI)
390                         DP(msglvl, "DMAE: opcode 0x%08x\n"
391                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
392                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
393                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
394                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
395                            dmae->comp_addr_hi, dmae->comp_addr_lo,
396                            dmae->comp_val);
397                 else
398                         DP(msglvl, "DMAE: opcode 0x%08x\n"
399                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
400                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
401                            dmae->opcode, dmae->src_addr_lo >> 2,
402                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403                            dmae->comp_addr_hi, dmae->comp_addr_lo,
404                            dmae->comp_val);
405                 break;
406         case DMAE_CMD_DST_GRC:
407                 if (src_type == DMAE_CMD_SRC_PCI)
408                         DP(msglvl, "DMAE: opcode 0x%08x\n"
409                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
410                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
411                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
412                            dmae->len, dmae->dst_addr_lo >> 2,
413                            dmae->comp_addr_hi, dmae->comp_addr_lo,
414                            dmae->comp_val);
415                 else
416                         DP(msglvl, "DMAE: opcode 0x%08x\n"
417                            "src [%08x], len [%d*4], dst [%08x]\n"
418                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
419                            dmae->opcode, dmae->src_addr_lo >> 2,
420                            dmae->len, dmae->dst_addr_lo >> 2,
421                            dmae->comp_addr_hi, dmae->comp_addr_lo,
422                            dmae->comp_val);
423                 break;
424         default:
425                 if (src_type == DMAE_CMD_SRC_PCI)
426                         DP(msglvl, "DMAE: opcode 0x%08x\n"
427                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
428                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
429                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
430                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
431                            dmae->comp_val);
432                 else
433                         DP(msglvl, "DMAE: opcode 0x%08x\n"
434                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
435                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
436                            dmae->opcode, dmae->src_addr_lo >> 2,
437                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
438                            dmae->comp_val);
439                 break;
440         }
441 
442         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
443                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
444                    i, *(((u32 *)dmae) + i));
445 }
446 
447 /* copy command into DMAE command memory and set DMAE command go */
448 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
449 {
450         u32 cmd_offset;
451         int i;
452 
453         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
454         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
455                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
456         }
457         REG_WR(bp, dmae_reg_go_c[idx], 1);
458 }
459 
460 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
461 {
462         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
463                            DMAE_CMD_C_ENABLE);
464 }
465 
466 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
467 {
468         return opcode & ~DMAE_CMD_SRC_RESET;
469 }
470 
471 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
472                              bool with_comp, u8 comp_type)
473 {
474         u32 opcode = 0;
475 
476         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
477                    (dst_type << DMAE_COMMAND_DST_SHIFT));
478 
479         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
480 
481         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
482         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
483                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
484         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
485 
486 #ifdef __BIG_ENDIAN
487         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
488 #else
489         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
490 #endif
491         if (with_comp)
492                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
493         return opcode;
494 }
495 
496 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
497                                       struct dmae_command *dmae,
498                                       u8 src_type, u8 dst_type)
499 {
500         memset(dmae, 0, sizeof(struct dmae_command));
501 
502         /* set the opcode */
503         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
504                                          true, DMAE_COMP_PCI);
505 
506         /* fill in the completion parameters */
507         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
508         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
509         dmae->comp_val = DMAE_COMP_VAL;
510 }
511 
512 /* issue a dmae command over the init-channel and wait for completion */
513 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
514                                u32 *comp)
515 {
516         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
517         int rc = 0;
518 
519         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
520 
521         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
522          * as long as this code is called both from syscall context and
523          * from ndo_set_rx_mode() flow that may be called from BH.
524          */
525         spin_lock_bh(&bp->dmae_lock);
526 
527         /* reset completion */
528         *comp = 0;
529 
530         /* post the command on the channel used for initializations */
531         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
532 
533         /* wait for completion */
534         udelay(5);
535         while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
536 
537                 if (!cnt ||
538                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
539                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
540                         BNX2X_ERR("DMAE timeout!\n");
541                         rc = DMAE_TIMEOUT;
542                         goto unlock;
543                 }
544                 cnt--;
545                 udelay(50);
546         }
547         if (*comp & DMAE_PCI_ERR_FLAG) {
548                 BNX2X_ERR("DMAE PCI error!\n");
549                 rc = DMAE_PCI_ERROR;
550         }
551 
552 unlock:
553         spin_unlock_bh(&bp->dmae_lock);
554         return rc;
555 }
556 
557 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
558                       u32 len32)
559 {
560         int rc;
561         struct dmae_command dmae;
562 
563         if (!bp->dmae_ready) {
564                 u32 *data = bnx2x_sp(bp, wb_data[0]);
565 
566                 if (CHIP_IS_E1(bp))
567                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
568                 else
569                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
570                 return;
571         }
572 
573         /* set opcode and fixed command fields */
574         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
575 
576         /* fill in addresses and len */
577         dmae.src_addr_lo = U64_LO(dma_addr);
578         dmae.src_addr_hi = U64_HI(dma_addr);
579         dmae.dst_addr_lo = dst_addr >> 2;
580         dmae.dst_addr_hi = 0;
581         dmae.len = len32;
582 
583         /* issue the command and wait for completion */
584         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
585         if (rc) {
586                 BNX2X_ERR("DMAE returned failure %d\n", rc);
587 #ifdef BNX2X_STOP_ON_ERROR
588                 bnx2x_panic();
589 #endif
590         }
591 }
592 
593 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
594 {
595         int rc;
596         struct dmae_command dmae;
597 
598         if (!bp->dmae_ready) {
599                 u32 *data = bnx2x_sp(bp, wb_data[0]);
600                 int i;
601 
602                 if (CHIP_IS_E1(bp))
603                         for (i = 0; i < len32; i++)
604                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
605                 else
606                         for (i = 0; i < len32; i++)
607                                 data[i] = REG_RD(bp, src_addr + i*4);
608 
609                 return;
610         }
611 
612         /* set opcode and fixed command fields */
613         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
614 
615         /* fill in addresses and len */
616         dmae.src_addr_lo = src_addr >> 2;
617         dmae.src_addr_hi = 0;
618         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
619         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
620         dmae.len = len32;
621 
622         /* issue the command and wait for completion */
623         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
624         if (rc) {
625                 BNX2X_ERR("DMAE returned failure %d\n", rc);
626 #ifdef BNX2X_STOP_ON_ERROR
627                 bnx2x_panic();
628 #endif
629         }
630 }
631 
632 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
633                                       u32 addr, u32 len)
634 {
635         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
636         int offset = 0;
637 
638         while (len > dmae_wr_max) {
639                 bnx2x_write_dmae(bp, phys_addr + offset,
640                                  addr + offset, dmae_wr_max);
641                 offset += dmae_wr_max * 4;
642                 len -= dmae_wr_max;
643         }
644 
645         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
646 }
647 
648 static int bnx2x_mc_assert(struct bnx2x *bp)
649 {
650         char last_idx;
651         int i, rc = 0;
652         u32 row0, row1, row2, row3;
653 
654         /* XSTORM */
655         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
656                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
657         if (last_idx)
658                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
659 
660         /* print the asserts */
661         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
662 
663                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
664                               XSTORM_ASSERT_LIST_OFFSET(i));
665                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
666                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
667                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
668                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
669                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
670                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
671 
672                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
674                                   i, row3, row2, row1, row0);
675                         rc++;
676                 } else {
677                         break;
678                 }
679         }
680 
681         /* TSTORM */
682         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
683                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
684         if (last_idx)
685                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
686 
687         /* print the asserts */
688         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
689 
690                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
691                               TSTORM_ASSERT_LIST_OFFSET(i));
692                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
693                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
694                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
695                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
696                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
697                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
698 
699                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
700                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
701                                   i, row3, row2, row1, row0);
702                         rc++;
703                 } else {
704                         break;
705                 }
706         }
707 
708         /* CSTORM */
709         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
710                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
711         if (last_idx)
712                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
713 
714         /* print the asserts */
715         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
716 
717                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
718                               CSTORM_ASSERT_LIST_OFFSET(i));
719                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
720                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
721                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
722                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
723                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
724                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
725 
726                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
727                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
728                                   i, row3, row2, row1, row0);
729                         rc++;
730                 } else {
731                         break;
732                 }
733         }
734 
735         /* USTORM */
736         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
737                            USTORM_ASSERT_LIST_INDEX_OFFSET);
738         if (last_idx)
739                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
740 
741         /* print the asserts */
742         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
743 
744                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
745                               USTORM_ASSERT_LIST_OFFSET(i));
746                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
747                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
748                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
749                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
750                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
751                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
752 
753                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
754                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
755                                   i, row3, row2, row1, row0);
756                         rc++;
757                 } else {
758                         break;
759                 }
760         }
761 
762         return rc;
763 }
764 
765 #define MCPR_TRACE_BUFFER_SIZE  (0x800)
766 #define SCRATCH_BUFFER_SIZE(bp) \
767         (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
768 
769 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
770 {
771         u32 addr, val;
772         u32 mark, offset;
773         __be32 data[9];
774         int word;
775         u32 trace_shmem_base;
776         if (BP_NOMCP(bp)) {
777                 BNX2X_ERR("NO MCP - can not dump\n");
778                 return;
779         }
780         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
781                 (bp->common.bc_ver & 0xff0000) >> 16,
782                 (bp->common.bc_ver & 0xff00) >> 8,
783                 (bp->common.bc_ver & 0xff));
784 
785         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
786         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
787                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
788 
789         if (BP_PATH(bp) == 0)
790                 trace_shmem_base = bp->common.shmem_base;
791         else
792                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
793 
794         /* sanity */
795         if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
796             trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
797                                 SCRATCH_BUFFER_SIZE(bp)) {
798                 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
799                           trace_shmem_base);
800                 return;
801         }
802 
803         addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
804 
805         /* validate TRCB signature */
806         mark = REG_RD(bp, addr);
807         if (mark != MFW_TRACE_SIGNATURE) {
808                 BNX2X_ERR("Trace buffer signature is missing.");
809                 return ;
810         }
811 
812         /* read cyclic buffer pointer */
813         addr += 4;
814         mark = REG_RD(bp, addr);
815         mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
816         if (mark >= trace_shmem_base || mark < addr + 4) {
817                 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
818                 return;
819         }
820         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
821 
822         printk("%s", lvl);
823 
824         /* dump buffer after the mark */
825         for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
826                 for (word = 0; word < 8; word++)
827                         data[word] = htonl(REG_RD(bp, offset + 4*word));
828                 data[8] = 0x0;
829                 pr_cont("%s", (char *)data);
830         }
831 
832         /* dump buffer before the mark */
833         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
834                 for (word = 0; word < 8; word++)
835                         data[word] = htonl(REG_RD(bp, offset + 4*word));
836                 data[8] = 0x0;
837                 pr_cont("%s", (char *)data);
838         }
839         printk("%s" "end of fw dump\n", lvl);
840 }
841 
842 static void bnx2x_fw_dump(struct bnx2x *bp)
843 {
844         bnx2x_fw_dump_lvl(bp, KERN_ERR);
845 }
846 
847 static void bnx2x_hc_int_disable(struct bnx2x *bp)
848 {
849         int port = BP_PORT(bp);
850         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
851         u32 val = REG_RD(bp, addr);
852 
853         /* in E1 we must use only PCI configuration space to disable
854          * MSI/MSIX capability
855          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
856          */
857         if (CHIP_IS_E1(bp)) {
858                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
859                  * Use mask register to prevent from HC sending interrupts
860                  * after we exit the function
861                  */
862                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
863 
864                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
865                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
866                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
867         } else
868                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
869                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
870                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
871                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
872 
873         DP(NETIF_MSG_IFDOWN,
874            "write %x to HC %d (addr 0x%x)\n",
875            val, port, addr);
876 
877         /* flush all outstanding writes */
878         mmiowb();
879 
880         REG_WR(bp, addr, val);
881         if (REG_RD(bp, addr) != val)
882                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
883 }
884 
885 static void bnx2x_igu_int_disable(struct bnx2x *bp)
886 {
887         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
888 
889         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
890                  IGU_PF_CONF_INT_LINE_EN |
891                  IGU_PF_CONF_ATTN_BIT_EN);
892 
893         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
894 
895         /* flush all outstanding writes */
896         mmiowb();
897 
898         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
899         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
900                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
901 }
902 
903 static void bnx2x_int_disable(struct bnx2x *bp)
904 {
905         if (bp->common.int_block == INT_BLOCK_HC)
906                 bnx2x_hc_int_disable(bp);
907         else
908                 bnx2x_igu_int_disable(bp);
909 }
910 
911 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
912 {
913         int i;
914         u16 j;
915         struct hc_sp_status_block_data sp_sb_data;
916         int func = BP_FUNC(bp);
917 #ifdef BNX2X_STOP_ON_ERROR
918         u16 start = 0, end = 0;
919         u8 cos;
920 #endif
921         if (disable_int)
922                 bnx2x_int_disable(bp);
923 
924         bp->stats_state = STATS_STATE_DISABLED;
925         bp->eth_stats.unrecoverable_error++;
926         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
927 
928         BNX2X_ERR("begin crash dump -----------------\n");
929 
930         /* Indices */
931         /* Common */
932         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
933                   bp->def_idx, bp->def_att_idx, bp->attn_state,
934                   bp->spq_prod_idx, bp->stats_counter);
935         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
936                   bp->def_status_blk->atten_status_block.attn_bits,
937                   bp->def_status_blk->atten_status_block.attn_bits_ack,
938                   bp->def_status_blk->atten_status_block.status_block_id,
939                   bp->def_status_blk->atten_status_block.attn_bits_index);
940         BNX2X_ERR("     def (");
941         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
942                 pr_cont("0x%x%s",
943                         bp->def_status_blk->sp_sb.index_values[i],
944                         (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
945 
946         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
947                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
948                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
949                         i*sizeof(u32));
950 
951         pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
952                sp_sb_data.igu_sb_id,
953                sp_sb_data.igu_seg_id,
954                sp_sb_data.p_func.pf_id,
955                sp_sb_data.p_func.vnic_id,
956                sp_sb_data.p_func.vf_id,
957                sp_sb_data.p_func.vf_valid,
958                sp_sb_data.state);
959 
960         for_each_eth_queue(bp, i) {
961                 struct bnx2x_fastpath *fp = &bp->fp[i];
962                 int loop;
963                 struct hc_status_block_data_e2 sb_data_e2;
964                 struct hc_status_block_data_e1x sb_data_e1x;
965                 struct hc_status_block_sm  *hc_sm_p =
966                         CHIP_IS_E1x(bp) ?
967                         sb_data_e1x.common.state_machine :
968                         sb_data_e2.common.state_machine;
969                 struct hc_index_data *hc_index_p =
970                         CHIP_IS_E1x(bp) ?
971                         sb_data_e1x.index_data :
972                         sb_data_e2.index_data;
973                 u8 data_size, cos;
974                 u32 *sb_data_p;
975                 struct bnx2x_fp_txdata txdata;
976 
977                 /* Rx */
978                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
979                           i, fp->rx_bd_prod, fp->rx_bd_cons,
980                           fp->rx_comp_prod,
981                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
982                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
983                           fp->rx_sge_prod, fp->last_max_sge,
984                           le16_to_cpu(fp->fp_hc_idx));
985 
986                 /* Tx */
987                 for_each_cos_in_tx_queue(fp, cos)
988                 {
989                         txdata = *fp->txdata_ptr[cos];
990                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
991                                   i, txdata.tx_pkt_prod,
992                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
993                                   txdata.tx_bd_cons,
994                                   le16_to_cpu(*txdata.tx_cons_sb));
995                 }
996 
997                 loop = CHIP_IS_E1x(bp) ?
998                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
999 
1000                 /* host sb data */
1001 
1002                 if (IS_FCOE_FP(fp))
1003                         continue;
1004 
1005                 BNX2X_ERR("     run indexes (");
1006                 for (j = 0; j < HC_SB_MAX_SM; j++)
1007                         pr_cont("0x%x%s",
1008                                fp->sb_running_index[j],
1009                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1010 
1011                 BNX2X_ERR("     indexes (");
1012                 for (j = 0; j < loop; j++)
1013                         pr_cont("0x%x%s",
1014                                fp->sb_index_values[j],
1015                                (j == loop - 1) ? ")" : " ");
1016                 /* fw sb data */
1017                 data_size = CHIP_IS_E1x(bp) ?
1018                         sizeof(struct hc_status_block_data_e1x) :
1019                         sizeof(struct hc_status_block_data_e2);
1020                 data_size /= sizeof(u32);
1021                 sb_data_p = CHIP_IS_E1x(bp) ?
1022                         (u32 *)&sb_data_e1x :
1023                         (u32 *)&sb_data_e2;
1024                 /* copy sb data in here */
1025                 for (j = 0; j < data_size; j++)
1026                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1027                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1028                                 j * sizeof(u32));
1029 
1030                 if (!CHIP_IS_E1x(bp)) {
1031                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1032                                 sb_data_e2.common.p_func.pf_id,
1033                                 sb_data_e2.common.p_func.vf_id,
1034                                 sb_data_e2.common.p_func.vf_valid,
1035                                 sb_data_e2.common.p_func.vnic_id,
1036                                 sb_data_e2.common.same_igu_sb_1b,
1037                                 sb_data_e2.common.state);
1038                 } else {
1039                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1040                                 sb_data_e1x.common.p_func.pf_id,
1041                                 sb_data_e1x.common.p_func.vf_id,
1042                                 sb_data_e1x.common.p_func.vf_valid,
1043                                 sb_data_e1x.common.p_func.vnic_id,
1044                                 sb_data_e1x.common.same_igu_sb_1b,
1045                                 sb_data_e1x.common.state);
1046                 }
1047 
1048                 /* SB_SMs data */
1049                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1050                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1051                                 j, hc_sm_p[j].__flags,
1052                                 hc_sm_p[j].igu_sb_id,
1053                                 hc_sm_p[j].igu_seg_id,
1054                                 hc_sm_p[j].time_to_expire,
1055                                 hc_sm_p[j].timer_value);
1056                 }
1057 
1058                 /* Indices data */
1059                 for (j = 0; j < loop; j++) {
1060                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1061                                hc_index_p[j].flags,
1062                                hc_index_p[j].timeout);
1063                 }
1064         }
1065 
1066 #ifdef BNX2X_STOP_ON_ERROR
1067 
1068         /* event queue */
1069         BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1070         for (i = 0; i < NUM_EQ_DESC; i++) {
1071                 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1072 
1073                 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1074                           i, bp->eq_ring[i].message.opcode,
1075                           bp->eq_ring[i].message.error);
1076                 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1077         }
1078 
1079         /* Rings */
1080         /* Rx */
1081         for_each_valid_rx_queue(bp, i) {
1082                 struct bnx2x_fastpath *fp = &bp->fp[i];
1083 
1084                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1085                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1086                 for (j = start; j != end; j = RX_BD(j + 1)) {
1087                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1088                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1089 
1090                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1091                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1092                 }
1093 
1094                 start = RX_SGE(fp->rx_sge_prod);
1095                 end = RX_SGE(fp->last_max_sge);
1096                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1097                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1098                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1099 
1100                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1101                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1102                 }
1103 
1104                 start = RCQ_BD(fp->rx_comp_cons - 10);
1105                 end = RCQ_BD(fp->rx_comp_cons + 503);
1106                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1107                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1108 
1109                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1110                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1111                 }
1112         }
1113 
1114         /* Tx */
1115         for_each_valid_tx_queue(bp, i) {
1116                 struct bnx2x_fastpath *fp = &bp->fp[i];
1117                 for_each_cos_in_tx_queue(fp, cos) {
1118                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1119 
1120                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1121                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1122                         for (j = start; j != end; j = TX_BD(j + 1)) {
1123                                 struct sw_tx_bd *sw_bd =
1124                                         &txdata->tx_buf_ring[j];
1125 
1126                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1127                                           i, cos, j, sw_bd->skb,
1128                                           sw_bd->first_bd);
1129                         }
1130 
1131                         start = TX_BD(txdata->tx_bd_cons - 10);
1132                         end = TX_BD(txdata->tx_bd_cons + 254);
1133                         for (j = start; j != end; j = TX_BD(j + 1)) {
1134                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1135 
1136                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1137                                           i, cos, j, tx_bd[0], tx_bd[1],
1138                                           tx_bd[2], tx_bd[3]);
1139                         }
1140                 }
1141         }
1142 #endif
1143         bnx2x_fw_dump(bp);
1144         bnx2x_mc_assert(bp);
1145         BNX2X_ERR("end crash dump -----------------\n");
1146 }
1147 
1148 /*
1149  * FLR Support for E2
1150  *
1151  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1152  * initialization.
1153  */
1154 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1155 #define FLR_WAIT_INTERVAL       50      /* usec */
1156 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1157 
1158 struct pbf_pN_buf_regs {
1159         int pN;
1160         u32 init_crd;
1161         u32 crd;
1162         u32 crd_freed;
1163 };
1164 
1165 struct pbf_pN_cmd_regs {
1166         int pN;
1167         u32 lines_occup;
1168         u32 lines_freed;
1169 };
1170 
1171 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1172                                      struct pbf_pN_buf_regs *regs,
1173                                      u32 poll_count)
1174 {
1175         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1176         u32 cur_cnt = poll_count;
1177 
1178         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1179         crd = crd_start = REG_RD(bp, regs->crd);
1180         init_crd = REG_RD(bp, regs->init_crd);
1181 
1182         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1183         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1184         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1185 
1186         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1187                (init_crd - crd_start))) {
1188                 if (cur_cnt--) {
1189                         udelay(FLR_WAIT_INTERVAL);
1190                         crd = REG_RD(bp, regs->crd);
1191                         crd_freed = REG_RD(bp, regs->crd_freed);
1192                 } else {
1193                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1194                            regs->pN);
1195                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1196                            regs->pN, crd);
1197                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1198                            regs->pN, crd_freed);
1199                         break;
1200                 }
1201         }
1202         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1203            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1204 }
1205 
1206 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1207                                      struct pbf_pN_cmd_regs *regs,
1208                                      u32 poll_count)
1209 {
1210         u32 occup, to_free, freed, freed_start;
1211         u32 cur_cnt = poll_count;
1212 
1213         occup = to_free = REG_RD(bp, regs->lines_occup);
1214         freed = freed_start = REG_RD(bp, regs->lines_freed);
1215 
1216         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1217         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1218 
1219         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1220                 if (cur_cnt--) {
1221                         udelay(FLR_WAIT_INTERVAL);
1222                         occup = REG_RD(bp, regs->lines_occup);
1223                         freed = REG_RD(bp, regs->lines_freed);
1224                 } else {
1225                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1226                            regs->pN);
1227                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1228                            regs->pN, occup);
1229                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1230                            regs->pN, freed);
1231                         break;
1232                 }
1233         }
1234         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1235            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1236 }
1237 
1238 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1239                                     u32 expected, u32 poll_count)
1240 {
1241         u32 cur_cnt = poll_count;
1242         u32 val;
1243 
1244         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1245                 udelay(FLR_WAIT_INTERVAL);
1246 
1247         return val;
1248 }
1249 
1250 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1251                                     char *msg, u32 poll_cnt)
1252 {
1253         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1254         if (val != 0) {
1255                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1256                 return 1;
1257         }
1258         return 0;
1259 }
1260 
1261 /* Common routines with VF FLR cleanup */
1262 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1263 {
1264         /* adjust polling timeout */
1265         if (CHIP_REV_IS_EMUL(bp))
1266                 return FLR_POLL_CNT * 2000;
1267 
1268         if (CHIP_REV_IS_FPGA(bp))
1269                 return FLR_POLL_CNT * 120;
1270 
1271         return FLR_POLL_CNT;
1272 }
1273 
1274 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1275 {
1276         struct pbf_pN_cmd_regs cmd_regs[] = {
1277                 {0, (CHIP_IS_E3B0(bp)) ?
1278                         PBF_REG_TQ_OCCUPANCY_Q0 :
1279                         PBF_REG_P0_TQ_OCCUPANCY,
1280                     (CHIP_IS_E3B0(bp)) ?
1281                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1282                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1283                 {1, (CHIP_IS_E3B0(bp)) ?
1284                         PBF_REG_TQ_OCCUPANCY_Q1 :
1285                         PBF_REG_P1_TQ_OCCUPANCY,
1286                     (CHIP_IS_E3B0(bp)) ?
1287                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1288                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1289                 {4, (CHIP_IS_E3B0(bp)) ?
1290                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1291                         PBF_REG_P4_TQ_OCCUPANCY,
1292                     (CHIP_IS_E3B0(bp)) ?
1293                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1294                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1295         };
1296 
1297         struct pbf_pN_buf_regs buf_regs[] = {
1298                 {0, (CHIP_IS_E3B0(bp)) ?
1299                         PBF_REG_INIT_CRD_Q0 :
1300                         PBF_REG_P0_INIT_CRD ,
1301                     (CHIP_IS_E3B0(bp)) ?
1302                         PBF_REG_CREDIT_Q0 :
1303                         PBF_REG_P0_CREDIT,
1304                     (CHIP_IS_E3B0(bp)) ?
1305                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1306                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1307                 {1, (CHIP_IS_E3B0(bp)) ?
1308                         PBF_REG_INIT_CRD_Q1 :
1309                         PBF_REG_P1_INIT_CRD,
1310                     (CHIP_IS_E3B0(bp)) ?
1311                         PBF_REG_CREDIT_Q1 :
1312                         PBF_REG_P1_CREDIT,
1313                     (CHIP_IS_E3B0(bp)) ?
1314                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1315                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1316                 {4, (CHIP_IS_E3B0(bp)) ?
1317                         PBF_REG_INIT_CRD_LB_Q :
1318                         PBF_REG_P4_INIT_CRD,
1319                     (CHIP_IS_E3B0(bp)) ?
1320                         PBF_REG_CREDIT_LB_Q :
1321                         PBF_REG_P4_CREDIT,
1322                     (CHIP_IS_E3B0(bp)) ?
1323                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1324                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1325         };
1326 
1327         int i;
1328 
1329         /* Verify the command queues are flushed P0, P1, P4 */
1330         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1331                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1332 
1333         /* Verify the transmission buffers are flushed P0, P1, P4 */
1334         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1335                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1336 }
1337 
1338 #define OP_GEN_PARAM(param) \
1339         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1340 
1341 #define OP_GEN_TYPE(type) \
1342         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1343 
1344 #define OP_GEN_AGG_VECT(index) \
1345         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1346 
1347 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1348 {
1349         u32 op_gen_command = 0;
1350         u32 comp_addr = BAR_CSTRORM_INTMEM +
1351                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1352         int ret = 0;
1353 
1354         if (REG_RD(bp, comp_addr)) {
1355                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1356                 return 1;
1357         }
1358 
1359         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1360         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1361         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1362         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1363 
1364         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1365         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1366 
1367         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1368                 BNX2X_ERR("FW final cleanup did not succeed\n");
1369                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1370                    (REG_RD(bp, comp_addr)));
1371                 bnx2x_panic();
1372                 return 1;
1373         }
1374         /* Zero completion for next FLR */
1375         REG_WR(bp, comp_addr, 0);
1376 
1377         return ret;
1378 }
1379 
1380 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1381 {
1382         u16 status;
1383 
1384         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1385         return status & PCI_EXP_DEVSTA_TRPND;
1386 }
1387 
1388 /* PF FLR specific routines
1389 */
1390 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1391 {
1392         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1393         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1394                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1395                         "CFC PF usage counter timed out",
1396                         poll_cnt))
1397                 return 1;
1398 
1399         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1400         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1401                         DORQ_REG_PF_USAGE_CNT,
1402                         "DQ PF usage counter timed out",
1403                         poll_cnt))
1404                 return 1;
1405 
1406         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1407         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1408                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1409                         "QM PF usage counter timed out",
1410                         poll_cnt))
1411                 return 1;
1412 
1413         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1414         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1415                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1416                         "Timers VNIC usage counter timed out",
1417                         poll_cnt))
1418                 return 1;
1419         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1420                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1421                         "Timers NUM_SCANS usage counter timed out",
1422                         poll_cnt))
1423                 return 1;
1424 
1425         /* Wait DMAE PF usage counter to zero */
1426         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1427                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1428                         "DMAE command register timed out",
1429                         poll_cnt))
1430                 return 1;
1431 
1432         return 0;
1433 }
1434 
1435 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1436 {
1437         u32 val;
1438 
1439         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1440         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1441 
1442         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1443         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1444 
1445         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1446         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1447 
1448         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1449         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1450 
1451         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1452         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1453 
1454         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1455         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1456 
1457         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1458         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1459 
1460         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1461         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1462            val);
1463 }
1464 
1465 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1466 {
1467         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1468 
1469         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1470 
1471         /* Re-enable PF target read access */
1472         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1473 
1474         /* Poll HW usage counters */
1475         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1476         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1477                 return -EBUSY;
1478 
1479         /* Zero the igu 'trailing edge' and 'leading edge' */
1480 
1481         /* Send the FW cleanup command */
1482         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1483                 return -EBUSY;
1484 
1485         /* ATC cleanup */
1486 
1487         /* Verify TX hw is flushed */
1488         bnx2x_tx_hw_flushed(bp, poll_cnt);
1489 
1490         /* Wait 100ms (not adjusted according to platform) */
1491         msleep(100);
1492 
1493         /* Verify no pending pci transactions */
1494         if (bnx2x_is_pcie_pending(bp->pdev))
1495                 BNX2X_ERR("PCIE Transactions still pending\n");
1496 
1497         /* Debug */
1498         bnx2x_hw_enable_status(bp);
1499 
1500         /*
1501          * Master enable - Due to WB DMAE writes performed before this
1502          * register is re-initialized as part of the regular function init
1503          */
1504         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1505 
1506         return 0;
1507 }
1508 
1509 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1510 {
1511         int port = BP_PORT(bp);
1512         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1513         u32 val = REG_RD(bp, addr);
1514         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1515         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1516         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1517 
1518         if (msix) {
1519                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1520                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1521                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1522                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1523                 if (single_msix)
1524                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1525         } else if (msi) {
1526                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1527                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1528                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1529                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1530         } else {
1531                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1532                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1533                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1534                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1535 
1536                 if (!CHIP_IS_E1(bp)) {
1537                         DP(NETIF_MSG_IFUP,
1538                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1539 
1540                         REG_WR(bp, addr, val);
1541 
1542                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1543                 }
1544         }
1545 
1546         if (CHIP_IS_E1(bp))
1547                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1548 
1549         DP(NETIF_MSG_IFUP,
1550            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1551            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1552 
1553         REG_WR(bp, addr, val);
1554         /*
1555          * Ensure that HC_CONFIG is written before leading/trailing edge config
1556          */
1557         mmiowb();
1558         barrier();
1559 
1560         if (!CHIP_IS_E1(bp)) {
1561                 /* init leading/trailing edge */
1562                 if (IS_MF(bp)) {
1563                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1564                         if (bp->port.pmf)
1565                                 /* enable nig and gpio3 attention */
1566                                 val |= 0x1100;
1567                 } else
1568                         val = 0xffff;
1569 
1570                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1571                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1572         }
1573 
1574         /* Make sure that interrupts are indeed enabled from here on */
1575         mmiowb();
1576 }
1577 
1578 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1579 {
1580         u32 val;
1581         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1582         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1583         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1584 
1585         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1586 
1587         if (msix) {
1588                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1589                          IGU_PF_CONF_SINGLE_ISR_EN);
1590                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1591                         IGU_PF_CONF_ATTN_BIT_EN);
1592 
1593                 if (single_msix)
1594                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1595         } else if (msi) {
1596                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1597                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1598                         IGU_PF_CONF_ATTN_BIT_EN |
1599                         IGU_PF_CONF_SINGLE_ISR_EN);
1600         } else {
1601                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1602                 val |= (IGU_PF_CONF_INT_LINE_EN |
1603                         IGU_PF_CONF_ATTN_BIT_EN |
1604                         IGU_PF_CONF_SINGLE_ISR_EN);
1605         }
1606 
1607         /* Clean previous status - need to configure igu prior to ack*/
1608         if ((!msix) || single_msix) {
1609                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1610                 bnx2x_ack_int(bp);
1611         }
1612 
1613         val |= IGU_PF_CONF_FUNC_EN;
1614 
1615         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1616            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1617 
1618         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1619 
1620         if (val & IGU_PF_CONF_INT_LINE_EN)
1621                 pci_intx(bp->pdev, true);
1622 
1623         barrier();
1624 
1625         /* init leading/trailing edge */
1626         if (IS_MF(bp)) {
1627                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1628                 if (bp->port.pmf)
1629                         /* enable nig and gpio3 attention */
1630                         val |= 0x1100;
1631         } else
1632                 val = 0xffff;
1633 
1634         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1635         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1636 
1637         /* Make sure that interrupts are indeed enabled from here on */
1638         mmiowb();
1639 }
1640 
1641 void bnx2x_int_enable(struct bnx2x *bp)
1642 {
1643         if (bp->common.int_block == INT_BLOCK_HC)
1644                 bnx2x_hc_int_enable(bp);
1645         else
1646                 bnx2x_igu_int_enable(bp);
1647 }
1648 
1649 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1650 {
1651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1652         int i, offset;
1653 
1654         if (disable_hw)
1655                 /* prevent the HW from sending interrupts */
1656                 bnx2x_int_disable(bp);
1657 
1658         /* make sure all ISRs are done */
1659         if (msix) {
1660                 synchronize_irq(bp->msix_table[0].vector);
1661                 offset = 1;
1662                 if (CNIC_SUPPORT(bp))
1663                         offset++;
1664                 for_each_eth_queue(bp, i)
1665                         synchronize_irq(bp->msix_table[offset++].vector);
1666         } else
1667                 synchronize_irq(bp->pdev->irq);
1668 
1669         /* make sure sp_task is not running */
1670         cancel_delayed_work(&bp->sp_task);
1671         cancel_delayed_work(&bp->period_task);
1672         flush_workqueue(bnx2x_wq);
1673 }
1674 
1675 /* fast path */
1676 
1677 /*
1678  * General service functions
1679  */
1680 
1681 /* Return true if succeeded to acquire the lock */
1682 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1683 {
1684         u32 lock_status;
1685         u32 resource_bit = (1 << resource);
1686         int func = BP_FUNC(bp);
1687         u32 hw_lock_control_reg;
1688 
1689         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1690            "Trying to take a lock on resource %d\n", resource);
1691 
1692         /* Validating that the resource is within range */
1693         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1694                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1695                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1696                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1697                 return false;
1698         }
1699 
1700         if (func <= 5)
1701                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1702         else
1703                 hw_lock_control_reg =
1704                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1705 
1706         /* Try to acquire the lock */
1707         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1708         lock_status = REG_RD(bp, hw_lock_control_reg);
1709         if (lock_status & resource_bit)
1710                 return true;
1711 
1712         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1713            "Failed to get a lock on resource %d\n", resource);
1714         return false;
1715 }
1716 
1717 /**
1718  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1719  *
1720  * @bp: driver handle
1721  *
1722  * Returns the recovery leader resource id according to the engine this function
1723  * belongs to. Currently only only 2 engines is supported.
1724  */
1725 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1726 {
1727         if (BP_PATH(bp))
1728                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1729         else
1730                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1731 }
1732 
1733 /**
1734  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1735  *
1736  * @bp: driver handle
1737  *
1738  * Tries to acquire a leader lock for current engine.
1739  */
1740 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1741 {
1742         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1743 }
1744 
1745 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1746 
1747 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1748 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1749 {
1750         /* Set the interrupt occurred bit for the sp-task to recognize it
1751          * must ack the interrupt and transition according to the IGU
1752          * state machine.
1753          */
1754         atomic_set(&bp->interrupt_occurred, 1);
1755 
1756         /* The sp_task must execute only after this bit
1757          * is set, otherwise we will get out of sync and miss all
1758          * further interrupts. Hence, the barrier.
1759          */
1760         smp_wmb();
1761 
1762         /* schedule sp_task to workqueue */
1763         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1764 }
1765 
1766 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1767 {
1768         struct bnx2x *bp = fp->bp;
1769         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1770         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1771         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1772         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1773 
1774         DP(BNX2X_MSG_SP,
1775            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1776            fp->index, cid, command, bp->state,
1777            rr_cqe->ramrod_cqe.ramrod_type);
1778 
1779         /* If cid is within VF range, replace the slowpath object with the
1780          * one corresponding to this VF
1781          */
1782         if (cid >= BNX2X_FIRST_VF_CID  &&
1783             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1784                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1785 
1786         switch (command) {
1787         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1788                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1789                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1790                 break;
1791 
1792         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1793                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1794                 drv_cmd = BNX2X_Q_CMD_SETUP;
1795                 break;
1796 
1797         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1798                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1799                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1800                 break;
1801 
1802         case (RAMROD_CMD_ID_ETH_HALT):
1803                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1804                 drv_cmd = BNX2X_Q_CMD_HALT;
1805                 break;
1806 
1807         case (RAMROD_CMD_ID_ETH_TERMINATE):
1808                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1809                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1810                 break;
1811 
1812         case (RAMROD_CMD_ID_ETH_EMPTY):
1813                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1814                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1815                 break;
1816 
1817         default:
1818                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1819                           command, fp->index);
1820                 return;
1821         }
1822 
1823         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1824             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1825                 /* q_obj->complete_cmd() failure means that this was
1826                  * an unexpected completion.
1827                  *
1828                  * In this case we don't want to increase the bp->spq_left
1829                  * because apparently we haven't sent this command the first
1830                  * place.
1831                  */
1832 #ifdef BNX2X_STOP_ON_ERROR
1833                 bnx2x_panic();
1834 #else
1835                 return;
1836 #endif
1837         /* SRIOV: reschedule any 'in_progress' operations */
1838         bnx2x_iov_sp_event(bp, cid, true);
1839 
1840         smp_mb__before_atomic_inc();
1841         atomic_inc(&bp->cq_spq_left);
1842         /* push the change in bp->spq_left and towards the memory */
1843         smp_mb__after_atomic_inc();
1844 
1845         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1846 
1847         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1848             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1849                 /* if Q update ramrod is completed for last Q in AFEX vif set
1850                  * flow, then ACK MCP at the end
1851                  *
1852                  * mark pending ACK to MCP bit.
1853                  * prevent case that both bits are cleared.
1854                  * At the end of load/unload driver checks that
1855                  * sp_state is cleared, and this order prevents
1856                  * races
1857                  */
1858                 smp_mb__before_clear_bit();
1859                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1860                 wmb();
1861                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1862                 smp_mb__after_clear_bit();
1863 
1864                 /* schedule the sp task as mcp ack is required */
1865                 bnx2x_schedule_sp_task(bp);
1866         }
1867 
1868         return;
1869 }
1870 
1871 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1872 {
1873         struct bnx2x *bp = netdev_priv(dev_instance);
1874         u16 status = bnx2x_ack_int(bp);
1875         u16 mask;
1876         int i;
1877         u8 cos;
1878 
1879         /* Return here if interrupt is shared and it's not for us */
1880         if (unlikely(status == 0)) {
1881                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1882                 return IRQ_NONE;
1883         }
1884         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1885 
1886 #ifdef BNX2X_STOP_ON_ERROR
1887         if (unlikely(bp->panic))
1888                 return IRQ_HANDLED;
1889 #endif
1890 
1891         for_each_eth_queue(bp, i) {
1892                 struct bnx2x_fastpath *fp = &bp->fp[i];
1893 
1894                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1895                 if (status & mask) {
1896                         /* Handle Rx or Tx according to SB id */
1897                         for_each_cos_in_tx_queue(fp, cos)
1898                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1899                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1900                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1901                         status &= ~mask;
1902                 }
1903         }
1904 
1905         if (CNIC_SUPPORT(bp)) {
1906                 mask = 0x2;
1907                 if (status & (mask | 0x1)) {
1908                         struct cnic_ops *c_ops = NULL;
1909 
1910                         rcu_read_lock();
1911                         c_ops = rcu_dereference(bp->cnic_ops);
1912                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1913                                       CNIC_DRV_STATE_HANDLES_IRQ))
1914                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1915                         rcu_read_unlock();
1916 
1917                         status &= ~mask;
1918                 }
1919         }
1920 
1921         if (unlikely(status & 0x1)) {
1922 
1923                 /* schedule sp task to perform default status block work, ack
1924                  * attentions and enable interrupts.
1925                  */
1926                 bnx2x_schedule_sp_task(bp);
1927 
1928                 status &= ~0x1;
1929                 if (!status)
1930                         return IRQ_HANDLED;
1931         }
1932 
1933         if (unlikely(status))
1934                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1935                    status);
1936 
1937         return IRQ_HANDLED;
1938 }
1939 
1940 /* Link */
1941 
1942 /*
1943  * General service functions
1944  */
1945 
1946 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1947 {
1948         u32 lock_status;
1949         u32 resource_bit = (1 << resource);
1950         int func = BP_FUNC(bp);
1951         u32 hw_lock_control_reg;
1952         int cnt;
1953 
1954         /* Validating that the resource is within range */
1955         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1956                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958                 return -EINVAL;
1959         }
1960 
1961         if (func <= 5) {
1962                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963         } else {
1964                 hw_lock_control_reg =
1965                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966         }
1967 
1968         /* Validating that the resource is not already taken */
1969         lock_status = REG_RD(bp, hw_lock_control_reg);
1970         if (lock_status & resource_bit) {
1971                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
1972                    lock_status, resource_bit);
1973                 return -EEXIST;
1974         }
1975 
1976         /* Try for 5 second every 5ms */
1977         for (cnt = 0; cnt < 1000; cnt++) {
1978                 /* Try to acquire the lock */
1979                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1980                 lock_status = REG_RD(bp, hw_lock_control_reg);
1981                 if (lock_status & resource_bit)
1982                         return 0;
1983 
1984                 usleep_range(5000, 10000);
1985         }
1986         BNX2X_ERR("Timeout\n");
1987         return -EAGAIN;
1988 }
1989 
1990 int bnx2x_release_leader_lock(struct bnx2x *bp)
1991 {
1992         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1993 }
1994 
1995 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1996 {
1997         u32 lock_status;
1998         u32 resource_bit = (1 << resource);
1999         int func = BP_FUNC(bp);
2000         u32 hw_lock_control_reg;
2001 
2002         /* Validating that the resource is within range */
2003         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2004                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2005                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2006                 return -EINVAL;
2007         }
2008 
2009         if (func <= 5) {
2010                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2011         } else {
2012                 hw_lock_control_reg =
2013                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2014         }
2015 
2016         /* Validating that the resource is currently taken */
2017         lock_status = REG_RD(bp, hw_lock_control_reg);
2018         if (!(lock_status & resource_bit)) {
2019                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2020                           lock_status, resource_bit);
2021                 return -EFAULT;
2022         }
2023 
2024         REG_WR(bp, hw_lock_control_reg, resource_bit);
2025         return 0;
2026 }
2027 
2028 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2029 {
2030         /* The GPIO should be swapped if swap register is set and active */
2031         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2032                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2033         int gpio_shift = gpio_num +
2034                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2035         u32 gpio_mask = (1 << gpio_shift);
2036         u32 gpio_reg;
2037         int value;
2038 
2039         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2040                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2041                 return -EINVAL;
2042         }
2043 
2044         /* read GPIO value */
2045         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2046 
2047         /* get the requested pin value */
2048         if ((gpio_reg & gpio_mask) == gpio_mask)
2049                 value = 1;
2050         else
2051                 value = 0;
2052 
2053         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2054 
2055         return value;
2056 }
2057 
2058 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2059 {
2060         /* The GPIO should be swapped if swap register is set and active */
2061         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2062                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2063         int gpio_shift = gpio_num +
2064                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2065         u32 gpio_mask = (1 << gpio_shift);
2066         u32 gpio_reg;
2067 
2068         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2069                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2070                 return -EINVAL;
2071         }
2072 
2073         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2074         /* read GPIO and mask except the float bits */
2075         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2076 
2077         switch (mode) {
2078         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2079                 DP(NETIF_MSG_LINK,
2080                    "Set GPIO %d (shift %d) -> output low\n",
2081                    gpio_num, gpio_shift);
2082                 /* clear FLOAT and set CLR */
2083                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2084                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2085                 break;
2086 
2087         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK,
2089                    "Set GPIO %d (shift %d) -> output high\n",
2090                    gpio_num, gpio_shift);
2091                 /* clear FLOAT and set SET */
2092                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2093                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2094                 break;
2095 
2096         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2097                 DP(NETIF_MSG_LINK,
2098                    "Set GPIO %d (shift %d) -> input\n",
2099                    gpio_num, gpio_shift);
2100                 /* set FLOAT */
2101                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2102                 break;
2103 
2104         default:
2105                 break;
2106         }
2107 
2108         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2109         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110 
2111         return 0;
2112 }
2113 
2114 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2115 {
2116         u32 gpio_reg = 0;
2117         int rc = 0;
2118 
2119         /* Any port swapping should be handled by caller. */
2120 
2121         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122         /* read GPIO and mask except the float bits */
2123         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2124         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2125         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2126         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2127 
2128         switch (mode) {
2129         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2130                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2131                 /* set CLR */
2132                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2133                 break;
2134 
2135         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2136                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2137                 /* set SET */
2138                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2139                 break;
2140 
2141         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2142                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2143                 /* set FLOAT */
2144                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2145                 break;
2146 
2147         default:
2148                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2149                 rc = -EINVAL;
2150                 break;
2151         }
2152 
2153         if (rc == 0)
2154                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2155 
2156         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2157 
2158         return rc;
2159 }
2160 
2161 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2162 {
2163         /* The GPIO should be swapped if swap register is set and active */
2164         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2165                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2166         int gpio_shift = gpio_num +
2167                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2168         u32 gpio_mask = (1 << gpio_shift);
2169         u32 gpio_reg;
2170 
2171         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2172                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2173                 return -EINVAL;
2174         }
2175 
2176         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2177         /* read GPIO int */
2178         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2179 
2180         switch (mode) {
2181         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2182                 DP(NETIF_MSG_LINK,
2183                    "Clear GPIO INT %d (shift %d) -> output low\n",
2184                    gpio_num, gpio_shift);
2185                 /* clear SET and set CLR */
2186                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2187                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2188                 break;
2189 
2190         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2191                 DP(NETIF_MSG_LINK,
2192                    "Set GPIO INT %d (shift %d) -> output high\n",
2193                    gpio_num, gpio_shift);
2194                 /* clear CLR and set SET */
2195                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2196                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2197                 break;
2198 
2199         default:
2200                 break;
2201         }
2202 
2203         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2204         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2205 
2206         return 0;
2207 }
2208 
2209 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2210 {
2211         u32 spio_reg;
2212 
2213         /* Only 2 SPIOs are configurable */
2214         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2215                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2216                 return -EINVAL;
2217         }
2218 
2219         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2220         /* read SPIO and mask except the float bits */
2221         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2222 
2223         switch (mode) {
2224         case MISC_SPIO_OUTPUT_LOW:
2225                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2226                 /* clear FLOAT and set CLR */
2227                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2228                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2229                 break;
2230 
2231         case MISC_SPIO_OUTPUT_HIGH:
2232                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2233                 /* clear FLOAT and set SET */
2234                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2235                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2236                 break;
2237 
2238         case MISC_SPIO_INPUT_HI_Z:
2239                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2240                 /* set FLOAT */
2241                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2242                 break;
2243 
2244         default:
2245                 break;
2246         }
2247 
2248         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2249         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2250 
2251         return 0;
2252 }
2253 
2254 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2255 {
2256         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2257         switch (bp->link_vars.ieee_fc &
2258                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2259         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2260                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2261                                                    ADVERTISED_Pause);
2262                 break;
2263 
2264         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2265                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2266                                                   ADVERTISED_Pause);
2267                 break;
2268 
2269         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2270                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2271                 break;
2272 
2273         default:
2274                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2275                                                    ADVERTISED_Pause);
2276                 break;
2277         }
2278 }
2279 
2280 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2281 {
2282         /* Initialize link parameters structure variables
2283          * It is recommended to turn off RX FC for jumbo frames
2284          *  for better performance
2285          */
2286         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2287                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2288         else
2289                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2290 }
2291 
2292 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2293 {
2294         u32 pause_enabled = 0;
2295 
2296         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2297                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2298                         pause_enabled = 1;
2299 
2300                 REG_WR(bp, BAR_USTRORM_INTMEM +
2301                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2302                        pause_enabled);
2303         }
2304 
2305         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2306            pause_enabled ? "enabled" : "disabled");
2307 }
2308 
2309 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2310 {
2311         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2312         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2313 
2314         if (!BP_NOMCP(bp)) {
2315                 bnx2x_set_requested_fc(bp);
2316                 bnx2x_acquire_phy_lock(bp);
2317 
2318                 if (load_mode == LOAD_DIAG) {
2319                         struct link_params *lp = &bp->link_params;
2320                         lp->loopback_mode = LOOPBACK_XGXS;
2321                         /* do PHY loopback at 10G speed, if possible */
2322                         if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2323                                 if (lp->speed_cap_mask[cfx_idx] &
2324                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2325                                         lp->req_line_speed[cfx_idx] =
2326                                         SPEED_10000;
2327                                 else
2328                                         lp->req_line_speed[cfx_idx] =
2329                                         SPEED_1000;
2330                         }
2331                 }
2332 
2333                 if (load_mode == LOAD_LOOPBACK_EXT) {
2334                         struct link_params *lp = &bp->link_params;
2335                         lp->loopback_mode = LOOPBACK_EXT;
2336                 }
2337 
2338                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2339 
2340                 bnx2x_release_phy_lock(bp);
2341 
2342                 bnx2x_init_dropless_fc(bp);
2343 
2344                 bnx2x_calc_fc_adv(bp);
2345 
2346                 if (bp->link_vars.link_up) {
2347                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2348                         bnx2x_link_report(bp);
2349                 }
2350                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2351                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2352                 return rc;
2353         }
2354         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2355         return -EINVAL;
2356 }
2357 
2358 void bnx2x_link_set(struct bnx2x *bp)
2359 {
2360         if (!BP_NOMCP(bp)) {
2361                 bnx2x_acquire_phy_lock(bp);
2362                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2363                 bnx2x_release_phy_lock(bp);
2364 
2365                 bnx2x_init_dropless_fc(bp);
2366 
2367                 bnx2x_calc_fc_adv(bp);
2368         } else
2369                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2370 }
2371 
2372 static void bnx2x__link_reset(struct bnx2x *bp)
2373 {
2374         if (!BP_NOMCP(bp)) {
2375                 bnx2x_acquire_phy_lock(bp);
2376                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2377                 bnx2x_release_phy_lock(bp);
2378         } else
2379                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2380 }
2381 
2382 void bnx2x_force_link_reset(struct bnx2x *bp)
2383 {
2384         bnx2x_acquire_phy_lock(bp);
2385         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2386         bnx2x_release_phy_lock(bp);
2387 }
2388 
2389 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2390 {
2391         u8 rc = 0;
2392 
2393         if (!BP_NOMCP(bp)) {
2394                 bnx2x_acquire_phy_lock(bp);
2395                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2396                                      is_serdes);
2397                 bnx2x_release_phy_lock(bp);
2398         } else
2399                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2400 
2401         return rc;
2402 }
2403 
2404 /* Calculates the sum of vn_min_rates.
2405    It's needed for further normalizing of the min_rates.
2406    Returns:
2407      sum of vn_min_rates.
2408        or
2409      0 - if all the min_rates are 0.
2410      In the later case fairness algorithm should be deactivated.
2411      If not all min_rates are zero then those that are zeroes will be set to 1.
2412  */
2413 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2414                                       struct cmng_init_input *input)
2415 {
2416         int all_zero = 1;
2417         int vn;
2418 
2419         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2420                 u32 vn_cfg = bp->mf_config[vn];
2421                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2422                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2423 
2424                 /* Skip hidden vns */
2425                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2426                         vn_min_rate = 0;
2427                 /* If min rate is zero - set it to 1 */
2428                 else if (!vn_min_rate)
2429                         vn_min_rate = DEF_MIN_RATE;
2430                 else
2431                         all_zero = 0;
2432 
2433                 input->vnic_min_rate[vn] = vn_min_rate;
2434         }
2435 
2436         /* if ETS or all min rates are zeros - disable fairness */
2437         if (BNX2X_IS_ETS_ENABLED(bp)) {
2438                 input->flags.cmng_enables &=
2439                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2440                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2441         } else if (all_zero) {
2442                 input->flags.cmng_enables &=
2443                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2444                 DP(NETIF_MSG_IFUP,
2445                    "All MIN values are zeroes fairness will be disabled\n");
2446         } else
2447                 input->flags.cmng_enables |=
2448                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2449 }
2450 
2451 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2452                                     struct cmng_init_input *input)
2453 {
2454         u16 vn_max_rate;
2455         u32 vn_cfg = bp->mf_config[vn];
2456 
2457         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2458                 vn_max_rate = 0;
2459         else {
2460                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2461 
2462                 if (IS_MF_SI(bp)) {
2463                         /* maxCfg in percents of linkspeed */
2464                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2465                 } else /* SD modes */
2466                         /* maxCfg is absolute in 100Mb units */
2467                         vn_max_rate = maxCfg * 100;
2468         }
2469 
2470         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2471 
2472         input->vnic_max_rate[vn] = vn_max_rate;
2473 }
2474 
2475 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2476 {
2477         if (CHIP_REV_IS_SLOW(bp))
2478                 return CMNG_FNS_NONE;
2479         if (IS_MF(bp))
2480                 return CMNG_FNS_MINMAX;
2481 
2482         return CMNG_FNS_NONE;
2483 }
2484 
2485 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2486 {
2487         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2488 
2489         if (BP_NOMCP(bp))
2490                 return; /* what should be the default value in this case */
2491 
2492         /* For 2 port configuration the absolute function number formula
2493          * is:
2494          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2495          *
2496          *      and there are 4 functions per port
2497          *
2498          * For 4 port configuration it is
2499          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2500          *
2501          *      and there are 2 functions per port
2502          */
2503         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2504                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2505 
2506                 if (func >= E1H_FUNC_MAX)
2507                         break;
2508 
2509                 bp->mf_config[vn] =
2510                         MF_CFG_RD(bp, func_mf_config[func].config);
2511         }
2512         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2513                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2514                 bp->flags |= MF_FUNC_DIS;
2515         } else {
2516                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2517                 bp->flags &= ~MF_FUNC_DIS;
2518         }
2519 }
2520 
2521 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2522 {
2523         struct cmng_init_input input;
2524         memset(&input, 0, sizeof(struct cmng_init_input));
2525 
2526         input.port_rate = bp->link_vars.line_speed;
2527 
2528         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2529                 int vn;
2530 
2531                 /* read mf conf from shmem */
2532                 if (read_cfg)
2533                         bnx2x_read_mf_cfg(bp);
2534 
2535                 /* vn_weight_sum and enable fairness if not 0 */
2536                 bnx2x_calc_vn_min(bp, &input);
2537 
2538                 /* calculate and set min-max rate for each vn */
2539                 if (bp->port.pmf)
2540                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2541                                 bnx2x_calc_vn_max(bp, vn, &input);
2542 
2543                 /* always enable rate shaping and fairness */
2544                 input.flags.cmng_enables |=
2545                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2546 
2547                 bnx2x_init_cmng(&input, &bp->cmng);
2548                 return;
2549         }
2550 
2551         /* rate shaping and fairness are disabled */
2552         DP(NETIF_MSG_IFUP,
2553            "rate shaping and fairness are disabled\n");
2554 }
2555 
2556 static void storm_memset_cmng(struct bnx2x *bp,
2557                               struct cmng_init *cmng,
2558                               u8 port)
2559 {
2560         int vn;
2561         size_t size = sizeof(struct cmng_struct_per_port);
2562 
2563         u32 addr = BAR_XSTRORM_INTMEM +
2564                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2565 
2566         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2567 
2568         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2569                 int func = func_by_vn(bp, vn);
2570 
2571                 addr = BAR_XSTRORM_INTMEM +
2572                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2573                 size = sizeof(struct rate_shaping_vars_per_vn);
2574                 __storm_memset_struct(bp, addr, size,
2575                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2576 
2577                 addr = BAR_XSTRORM_INTMEM +
2578                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2579                 size = sizeof(struct fairness_vars_per_vn);
2580                 __storm_memset_struct(bp, addr, size,
2581                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2582         }
2583 }
2584 
2585 /* init cmng mode in HW according to local configuration */
2586 void bnx2x_set_local_cmng(struct bnx2x *bp)
2587 {
2588         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2589 
2590         if (cmng_fns != CMNG_FNS_NONE) {
2591                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2592                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593         } else {
2594                 /* rate shaping and fairness are disabled */
2595                 DP(NETIF_MSG_IFUP,
2596                    "single function mode without fairness\n");
2597         }
2598 }
2599 
2600 /* This function is called upon link interrupt */
2601 static void bnx2x_link_attn(struct bnx2x *bp)
2602 {
2603         /* Make sure that we are synced with the current statistics */
2604         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2605 
2606         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2607 
2608         bnx2x_init_dropless_fc(bp);
2609 
2610         if (bp->link_vars.link_up) {
2611 
2612                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2613                         struct host_port_stats *pstats;
2614 
2615                         pstats = bnx2x_sp(bp, port_stats);
2616                         /* reset old mac stats */
2617                         memset(&(pstats->mac_stx[0]), 0,
2618                                sizeof(struct mac_stx));
2619                 }
2620                 if (bp->state == BNX2X_STATE_OPEN)
2621                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2622         }
2623 
2624         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2625                 bnx2x_set_local_cmng(bp);
2626 
2627         __bnx2x_link_report(bp);
2628 
2629         if (IS_MF(bp))
2630                 bnx2x_link_sync_notify(bp);
2631 }
2632 
2633 void bnx2x__link_status_update(struct bnx2x *bp)
2634 {
2635         if (bp->state != BNX2X_STATE_OPEN)
2636                 return;
2637 
2638         /* read updated dcb configuration */
2639         if (IS_PF(bp)) {
2640                 bnx2x_dcbx_pmf_update(bp);
2641                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2642                 if (bp->link_vars.link_up)
2643                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2644                 else
2645                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2646                         /* indicate link status */
2647                 bnx2x_link_report(bp);
2648 
2649         } else { /* VF */
2650                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2651                                           SUPPORTED_10baseT_Full |
2652                                           SUPPORTED_100baseT_Half |
2653                                           SUPPORTED_100baseT_Full |
2654                                           SUPPORTED_1000baseT_Full |
2655                                           SUPPORTED_2500baseX_Full |
2656                                           SUPPORTED_10000baseT_Full |
2657                                           SUPPORTED_TP |
2658                                           SUPPORTED_FIBRE |
2659                                           SUPPORTED_Autoneg |
2660                                           SUPPORTED_Pause |
2661                                           SUPPORTED_Asym_Pause);
2662                 bp->port.advertising[0] = bp->port.supported[0];
2663 
2664                 bp->link_params.bp = bp;
2665                 bp->link_params.port = BP_PORT(bp);
2666                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2667                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2668                 bp->link_params.req_line_speed[0] = SPEED_10000;
2669                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2670                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2671                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2672                 bp->link_vars.line_speed = SPEED_10000;
2673                 bp->link_vars.link_status =
2674                         (LINK_STATUS_LINK_UP |
2675                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2676                 bp->link_vars.link_up = 1;
2677                 bp->link_vars.duplex = DUPLEX_FULL;
2678                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2679                 __bnx2x_link_report(bp);
2680                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2681         }
2682 }
2683 
2684 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2685                                   u16 vlan_val, u8 allowed_prio)
2686 {
2687         struct bnx2x_func_state_params func_params = {NULL};
2688         struct bnx2x_func_afex_update_params *f_update_params =
2689                 &func_params.params.afex_update;
2690 
2691         func_params.f_obj = &bp->func_obj;
2692         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2693 
2694         /* no need to wait for RAMROD completion, so don't
2695          * set RAMROD_COMP_WAIT flag
2696          */
2697 
2698         f_update_params->vif_id = vifid;
2699         f_update_params->afex_default_vlan = vlan_val;
2700         f_update_params->allowed_priorities = allowed_prio;
2701 
2702         /* if ramrod can not be sent, response to MCP immediately */
2703         if (bnx2x_func_state_change(bp, &func_params) < 0)
2704                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2705 
2706         return 0;
2707 }
2708 
2709 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2710                                           u16 vif_index, u8 func_bit_map)
2711 {
2712         struct bnx2x_func_state_params func_params = {NULL};
2713         struct bnx2x_func_afex_viflists_params *update_params =
2714                 &func_params.params.afex_viflists;
2715         int rc;
2716         u32 drv_msg_code;
2717 
2718         /* validate only LIST_SET and LIST_GET are received from switch */
2719         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2720                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2721                           cmd_type);
2722 
2723         func_params.f_obj = &bp->func_obj;
2724         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2725 
2726         /* set parameters according to cmd_type */
2727         update_params->afex_vif_list_command = cmd_type;
2728         update_params->vif_list_index = vif_index;
2729         update_params->func_bit_map =
2730                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2731         update_params->func_to_clear = 0;
2732         drv_msg_code =
2733                 (cmd_type == VIF_LIST_RULE_GET) ?
2734                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2735                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2736 
2737         /* if ramrod can not be sent, respond to MCP immediately for
2738          * SET and GET requests (other are not triggered from MCP)
2739          */
2740         rc = bnx2x_func_state_change(bp, &func_params);
2741         if (rc < 0)
2742                 bnx2x_fw_command(bp, drv_msg_code, 0);
2743 
2744         return 0;
2745 }
2746 
2747 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2748 {
2749         struct afex_stats afex_stats;
2750         u32 func = BP_ABS_FUNC(bp);
2751         u32 mf_config;
2752         u16 vlan_val;
2753         u32 vlan_prio;
2754         u16 vif_id;
2755         u8 allowed_prio;
2756         u8 vlan_mode;
2757         u32 addr_to_write, vifid, addrs, stats_type, i;
2758 
2759         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2760                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2761                 DP(BNX2X_MSG_MCP,
2762                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2763                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2764         }
2765 
2766         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2767                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2768                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2769                 DP(BNX2X_MSG_MCP,
2770                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2771                    vifid, addrs);
2772                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2773                                                addrs);
2774         }
2775 
2776         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2777                 addr_to_write = SHMEM2_RD(bp,
2778                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2779                 stats_type = SHMEM2_RD(bp,
2780                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2781 
2782                 DP(BNX2X_MSG_MCP,
2783                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2784                    addr_to_write);
2785 
2786                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2787 
2788                 /* write response to scratchpad, for MCP */
2789                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2790                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2791                                *(((u32 *)(&afex_stats))+i));
2792 
2793                 /* send ack message to MCP */
2794                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2795         }
2796 
2797         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2798                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2799                 bp->mf_config[BP_VN(bp)] = mf_config;
2800                 DP(BNX2X_MSG_MCP,
2801                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2802                    mf_config);
2803 
2804                 /* if VIF_SET is "enabled" */
2805                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2806                         /* set rate limit directly to internal RAM */
2807                         struct cmng_init_input cmng_input;
2808                         struct rate_shaping_vars_per_vn m_rs_vn;
2809                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2810                         u32 addr = BAR_XSTRORM_INTMEM +
2811                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2812 
2813                         bp->mf_config[BP_VN(bp)] = mf_config;
2814 
2815                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2816                         m_rs_vn.vn_counter.rate =
2817                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2818                         m_rs_vn.vn_counter.quota =
2819                                 (m_rs_vn.vn_counter.rate *
2820                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2821 
2822                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2823 
2824                         /* read relevant values from mf_cfg struct in shmem */
2825                         vif_id =
2826                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2827                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2828                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2829                         vlan_val =
2830                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2831                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2832                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2833                         vlan_prio = (mf_config &
2834                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2835                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2836                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2837                         vlan_mode =
2838                                 (MF_CFG_RD(bp,
2839                                            func_mf_config[func].afex_config) &
2840                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2841                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2842                         allowed_prio =
2843                                 (MF_CFG_RD(bp,
2844                                            func_mf_config[func].afex_config) &
2845                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2846                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2847 
2848                         /* send ramrod to FW, return in case of failure */
2849                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2850                                                    allowed_prio))
2851                                 return;
2852 
2853                         bp->afex_def_vlan_tag = vlan_val;
2854                         bp->afex_vlan_mode = vlan_mode;
2855                 } else {
2856                         /* notify link down because BP->flags is disabled */
2857                         bnx2x_link_report(bp);
2858 
2859                         /* send INVALID VIF ramrod to FW */
2860                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2861 
2862                         /* Reset the default afex VLAN */
2863                         bp->afex_def_vlan_tag = -1;
2864                 }
2865         }
2866 }
2867 
2868 static void bnx2x_pmf_update(struct bnx2x *bp)
2869 {
2870         int port = BP_PORT(bp);
2871         u32 val;
2872 
2873         bp->port.pmf = 1;
2874         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2875 
2876         /*
2877          * We need the mb() to ensure the ordering between the writing to
2878          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2879          */
2880         smp_mb();
2881 
2882         /* queue a periodic task */
2883         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2884 
2885         bnx2x_dcbx_pmf_update(bp);
2886 
2887         /* enable nig attention */
2888         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2889         if (bp->common.int_block == INT_BLOCK_HC) {
2890                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2891                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2892         } else if (!CHIP_IS_E1x(bp)) {
2893                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2894                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2895         }
2896 
2897         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2898 }
2899 
2900 /* end of Link */
2901 
2902 /* slow path */
2903 
2904 /*
2905  * General service functions
2906  */
2907 
2908 /* send the MCP a request, block until there is a reply */
2909 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2910 {
2911         int mb_idx = BP_FW_MB_IDX(bp);
2912         u32 seq;
2913         u32 rc = 0;
2914         u32 cnt = 1;
2915         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2916 
2917         mutex_lock(&bp->fw_mb_mutex);
2918         seq = ++bp->fw_seq;
2919         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2920         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2921 
2922         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2923                         (command | seq), param);
2924 
2925         do {
2926                 /* let the FW do it's magic ... */
2927                 msleep(delay);
2928 
2929                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2930 
2931                 /* Give the FW up to 5 second (500*10ms) */
2932         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2933 
2934         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2935            cnt*delay, rc, seq);
2936 
2937         /* is this a reply to our command? */
2938         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2939                 rc &= FW_MSG_CODE_MASK;
2940         else {
2941                 /* FW BUG! */
2942                 BNX2X_ERR("FW failed to respond!\n");
2943                 bnx2x_fw_dump(bp);
2944                 rc = 0;
2945         }
2946         mutex_unlock(&bp->fw_mb_mutex);
2947 
2948         return rc;
2949 }
2950 
2951 static void storm_memset_func_cfg(struct bnx2x *bp,
2952                                  struct tstorm_eth_function_common_config *tcfg,
2953                                  u16 abs_fid)
2954 {
2955         size_t size = sizeof(struct tstorm_eth_function_common_config);
2956 
2957         u32 addr = BAR_TSTRORM_INTMEM +
2958                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2959 
2960         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2961 }
2962 
2963 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2964 {
2965         if (CHIP_IS_E1x(bp)) {
2966                 struct tstorm_eth_function_common_config tcfg = {0};
2967 
2968                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2969         }
2970 
2971         /* Enable the function in the FW */
2972         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2973         storm_memset_func_en(bp, p->func_id, 1);
2974 
2975         /* spq */
2976         if (p->func_flgs & FUNC_FLG_SPQ) {
2977                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2978                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2979                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2980         }
2981 }
2982 
2983 /**
2984  * bnx2x_get_common_flags - Return common flags
2985  *
2986  * @bp          device handle
2987  * @fp          queue handle
2988  * @zero_stats  TRUE if statistics zeroing is needed
2989  *
2990  * Return the flags that are common for the Tx-only and not normal connections.
2991  */
2992 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2993                                             struct bnx2x_fastpath *fp,
2994                                             bool zero_stats)
2995 {
2996         unsigned long flags = 0;
2997 
2998         /* PF driver will always initialize the Queue to an ACTIVE state */
2999         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3000 
3001         /* tx only connections collect statistics (on the same index as the
3002          * parent connection). The statistics are zeroed when the parent
3003          * connection is initialized.
3004          */
3005 
3006         __set_bit(BNX2X_Q_FLG_STATS, &flags);
3007         if (zero_stats)
3008                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3009 
3010         if (bp->flags & TX_SWITCHING)
3011                 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3012 
3013         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3014         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3015 
3016 #ifdef BNX2X_STOP_ON_ERROR
3017         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3018 #endif
3019 
3020         return flags;
3021 }
3022 
3023 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3024                                        struct bnx2x_fastpath *fp,
3025                                        bool leading)
3026 {
3027         unsigned long flags = 0;
3028 
3029         /* calculate other queue flags */
3030         if (IS_MF_SD(bp))
3031                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3032 
3033         if (IS_FCOE_FP(fp)) {
3034                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3035                 /* For FCoE - force usage of default priority (for afex) */
3036                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3037         }
3038 
3039         if (!fp->disable_tpa) {
3040                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3041                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3042                 if (fp->mode == TPA_MODE_GRO)
3043                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3044         }
3045 
3046         if (leading) {
3047                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3048                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3049         }
3050 
3051         /* Always set HW VLAN stripping */
3052         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3053 
3054         /* configure silent vlan removal */
3055         if (IS_MF_AFEX(bp))
3056                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3057 
3058         return flags | bnx2x_get_common_flags(bp, fp, true);
3059 }
3060 
3061 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3062         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3063         u8 cos)
3064 {
3065         gen_init->stat_id = bnx2x_stats_id(fp);
3066         gen_init->spcl_id = fp->cl_id;
3067 
3068         /* Always use mini-jumbo MTU for FCoE L2 ring */
3069         if (IS_FCOE_FP(fp))
3070                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3071         else
3072                 gen_init->mtu = bp->dev->mtu;
3073 
3074         gen_init->cos = cos;
3075 }
3076 
3077 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3078         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3079         struct bnx2x_rxq_setup_params *rxq_init)
3080 {
3081         u8 max_sge = 0;
3082         u16 sge_sz = 0;
3083         u16 tpa_agg_size = 0;
3084 
3085         if (!fp->disable_tpa) {
3086                 pause->sge_th_lo = SGE_TH_LO(bp);
3087                 pause->sge_th_hi = SGE_TH_HI(bp);
3088 
3089                 /* validate SGE ring has enough to cross high threshold */
3090                 WARN_ON(bp->dropless_fc &&
3091                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3092                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3093 
3094                 tpa_agg_size = TPA_AGG_SIZE;
3095                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3096                         SGE_PAGE_SHIFT;
3097                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3098                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3099                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3100         }
3101 
3102         /* pause - not for e1 */
3103         if (!CHIP_IS_E1(bp)) {
3104                 pause->bd_th_lo = BD_TH_LO(bp);
3105                 pause->bd_th_hi = BD_TH_HI(bp);
3106 
3107                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3108                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3109                 /*
3110                  * validate that rings have enough entries to cross
3111                  * high thresholds
3112                  */
3113                 WARN_ON(bp->dropless_fc &&
3114                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3115                                 bp->rx_ring_size);
3116                 WARN_ON(bp->dropless_fc &&
3117                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3118                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3119 
3120                 pause->pri_map = 1;
3121         }
3122 
3123         /* rxq setup */
3124         rxq_init->dscr_map = fp->rx_desc_mapping;
3125         rxq_init->sge_map = fp->rx_sge_mapping;
3126         rxq_init->rcq_map = fp->rx_comp_mapping;
3127         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3128 
3129         /* This should be a maximum number of data bytes that may be
3130          * placed on the BD (not including paddings).
3131          */
3132         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3133                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3134 
3135         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3136         rxq_init->tpa_agg_sz = tpa_agg_size;
3137         rxq_init->sge_buf_sz = sge_sz;
3138         rxq_init->max_sges_pkt = max_sge;
3139         rxq_init->rss_engine_id = BP_FUNC(bp);
3140         rxq_init->mcast_engine_id = BP_FUNC(bp);
3141 
3142         /* Maximum number or simultaneous TPA aggregation for this Queue.
3143          *
3144          * For PF Clients it should be the maximum available number.
3145          * VF driver(s) may want to define it to a smaller value.
3146          */
3147         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3148 
3149         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3150         rxq_init->fw_sb_id = fp->fw_sb_id;
3151 
3152         if (IS_FCOE_FP(fp))
3153                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3154         else
3155                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3156         /* configure silent vlan removal
3157          * if multi function mode is afex, then mask default vlan
3158          */
3159         if (IS_MF_AFEX(bp)) {
3160                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3161                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3162         }
3163 }
3164 
3165 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3166         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3167         u8 cos)
3168 {
3169         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3170         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3171         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3172         txq_init->fw_sb_id = fp->fw_sb_id;
3173 
3174         /*
3175          * set the tss leading client id for TX classification ==
3176          * leading RSS client id
3177          */
3178         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3179 
3180         if (IS_FCOE_FP(fp)) {
3181                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3182                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3183         }
3184 }
3185 
3186 static void bnx2x_pf_init(struct bnx2x *bp)
3187 {
3188         struct bnx2x_func_init_params func_init = {0};
3189         struct event_ring_data eq_data = { {0} };
3190         u16 flags;
3191 
3192         if (!CHIP_IS_E1x(bp)) {
3193                 /* reset IGU PF statistics: MSIX + ATTN */
3194                 /* PF */
3195                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3196                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3197                            (CHIP_MODE_IS_4_PORT(bp) ?
3198                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3199                 /* ATTN */
3200                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3201                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3202                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3203                            (CHIP_MODE_IS_4_PORT(bp) ?
3204                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3205         }
3206 
3207         /* function setup flags */
3208         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3209 
3210         /* This flag is relevant for E1x only.
3211          * E2 doesn't have a TPA configuration in a function level.
3212          */
3213         flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3214 
3215         func_init.func_flgs = flags;
3216         func_init.pf_id = BP_FUNC(bp);
3217         func_init.func_id = BP_FUNC(bp);
3218         func_init.spq_map = bp->spq_mapping;
3219         func_init.spq_prod = bp->spq_prod_idx;
3220 
3221         bnx2x_func_init(bp, &func_init);
3222 
3223         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3224 
3225         /*
3226          * Congestion management values depend on the link rate
3227          * There is no active link so initial link rate is set to 10 Gbps.
3228          * When the link comes up The congestion management values are
3229          * re-calculated according to the actual link rate.
3230          */
3231         bp->link_vars.line_speed = SPEED_10000;
3232         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3233 
3234         /* Only the PMF sets the HW */
3235         if (bp->port.pmf)
3236                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3237 
3238         /* init Event Queue - PCI bus guarantees correct endianity*/
3239         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3240         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3241         eq_data.producer = bp->eq_prod;
3242         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3243         eq_data.sb_id = DEF_SB_ID;
3244         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3245 }
3246 
3247 static void bnx2x_e1h_disable(struct bnx2x *bp)
3248 {
3249         int port = BP_PORT(bp);
3250 
3251         bnx2x_tx_disable(bp);
3252 
3253         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3254 }
3255 
3256 static void bnx2x_e1h_enable(struct bnx2x *bp)
3257 {
3258         int port = BP_PORT(bp);
3259 
3260         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3261 
3262         /* Tx queue should be only re-enabled */
3263         netif_tx_wake_all_queues(bp->dev);
3264 
3265         /*
3266          * Should not call netif_carrier_on since it will be called if the link
3267          * is up when checking for link state
3268          */
3269 }
3270 
3271 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3272 
3273 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3274 {
3275         struct eth_stats_info *ether_stat =
3276                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3277         struct bnx2x_vlan_mac_obj *mac_obj =
3278                 &bp->sp_objs->mac_obj;
3279         int i;
3280 
3281         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3282                 ETH_STAT_INFO_VERSION_LEN);
3283 
3284         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3285          * mac_local field in ether_stat struct. The base address is offset by 2
3286          * bytes to account for the field being 8 bytes but a mac address is
3287          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3288          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3289          * allocated by the ether_stat struct, so the macs will land in their
3290          * proper positions.
3291          */
3292         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3293                 memset(ether_stat->mac_local + i, 0,
3294                        sizeof(ether_stat->mac_local[0]));
3295         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3296                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3297                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3298                                 ETH_ALEN);
3299         ether_stat->mtu_size = bp->dev->mtu;
3300         if (bp->dev->features & NETIF_F_RXCSUM)
3301                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3302         if (bp->dev->features & NETIF_F_TSO)
3303                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3304         ether_stat->feature_flags |= bp->common.boot_mode;
3305 
3306         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3307 
3308         ether_stat->txq_size = bp->tx_ring_size;
3309         ether_stat->rxq_size = bp->rx_ring_size;
3310 
3311 #ifdef CONFIG_BNX2X_SRIOV
3312         ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3313 #endif
3314 }
3315 
3316 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3317 {
3318         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3319         struct fcoe_stats_info *fcoe_stat =
3320                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3321 
3322         if (!CNIC_LOADED(bp))
3323                 return;
3324 
3325         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3326 
3327         fcoe_stat->qos_priority =
3328                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3329 
3330         /* insert FCoE stats from ramrod response */
3331         if (!NO_FCOE(bp)) {
3332                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3333                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3334                         tstorm_queue_statistics;
3335 
3336                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3337                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3338                         xstorm_queue_statistics;
3339 
3340                 struct fcoe_statistics_params *fw_fcoe_stat =
3341                         &bp->fw_stats_data->fcoe;
3342 
3343                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3344                           fcoe_stat->rx_bytes_lo,
3345                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3346 
3347                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3348                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3349                           fcoe_stat->rx_bytes_lo,
3350                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3351 
3352                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3353                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3354                           fcoe_stat->rx_bytes_lo,
3355                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3356 
3357                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3358                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3359                           fcoe_stat->rx_bytes_lo,
3360                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3361 
3362                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3363                           fcoe_stat->rx_frames_lo,
3364                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3365 
3366                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3367                           fcoe_stat->rx_frames_lo,
3368                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3369 
3370                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3371                           fcoe_stat->rx_frames_lo,
3372                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3373 
3374                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3375                           fcoe_stat->rx_frames_lo,
3376                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3377 
3378                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3379                           fcoe_stat->tx_bytes_lo,
3380                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3381 
3382                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3383                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3384                           fcoe_stat->tx_bytes_lo,
3385                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3386 
3387                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3388                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3389                           fcoe_stat->tx_bytes_lo,
3390                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3391 
3392                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3393                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3394                           fcoe_stat->tx_bytes_lo,
3395                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3396 
3397                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3398                           fcoe_stat->tx_frames_lo,
3399                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3400 
3401                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3402                           fcoe_stat->tx_frames_lo,
3403                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3404 
3405                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3406                           fcoe_stat->tx_frames_lo,
3407                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3408 
3409                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3410                           fcoe_stat->tx_frames_lo,
3411                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3412         }
3413 
3414         /* ask L5 driver to add data to the struct */
3415         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3416 }
3417 
3418 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3419 {
3420         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3421         struct iscsi_stats_info *iscsi_stat =
3422                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3423 
3424         if (!CNIC_LOADED(bp))
3425                 return;
3426 
3427         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3428                ETH_ALEN);
3429 
3430         iscsi_stat->qos_priority =
3431                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3432 
3433         /* ask L5 driver to add data to the struct */
3434         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3435 }
3436 
3437 /* called due to MCP event (on pmf):
3438  *      reread new bandwidth configuration
3439  *      configure FW
3440  *      notify others function about the change
3441  */
3442 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3443 {
3444         if (bp->link_vars.link_up) {
3445                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3446                 bnx2x_link_sync_notify(bp);
3447         }
3448         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3449 }
3450 
3451 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3452 {
3453         bnx2x_config_mf_bw(bp);
3454         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3455 }
3456 
3457 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3458 {
3459         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3460         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3461 }
3462 
3463 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3464 {
3465         enum drv_info_opcode op_code;
3466         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3467 
3468         /* if drv_info version supported by MFW doesn't match - send NACK */
3469         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3470                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3471                 return;
3472         }
3473 
3474         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3475                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3476 
3477         memset(&bp->slowpath->drv_info_to_mcp, 0,
3478                sizeof(union drv_info_to_mcp));
3479 
3480         switch (op_code) {
3481         case ETH_STATS_OPCODE:
3482                 bnx2x_drv_info_ether_stat(bp);
3483                 break;
3484         case FCOE_STATS_OPCODE:
3485                 bnx2x_drv_info_fcoe_stat(bp);
3486                 break;
3487         case ISCSI_STATS_OPCODE:
3488                 bnx2x_drv_info_iscsi_stat(bp);
3489                 break;
3490         default:
3491                 /* if op code isn't supported - send NACK */
3492                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3493                 return;
3494         }
3495 
3496         /* if we got drv_info attn from MFW then these fields are defined in
3497          * shmem2 for sure
3498          */
3499         SHMEM2_WR(bp, drv_info_host_addr_lo,
3500                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3501         SHMEM2_WR(bp, drv_info_host_addr_hi,
3502                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3503 
3504         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3505 }
3506 
3507 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3508 {
3509         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3510 
3511         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3512 
3513                 /*
3514                  * This is the only place besides the function initialization
3515                  * where the bp->flags can change so it is done without any
3516                  * locks
3517                  */
3518                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3519                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3520                         bp->flags |= MF_FUNC_DIS;
3521 
3522                         bnx2x_e1h_disable(bp);
3523                 } else {
3524                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3525                         bp->flags &= ~MF_FUNC_DIS;
3526 
3527                         bnx2x_e1h_enable(bp);
3528                 }
3529                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3530         }
3531         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3532                 bnx2x_config_mf_bw(bp);
3533                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3534         }
3535 
3536         /* Report results to MCP */
3537         if (dcc_event)
3538                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3539         else
3540                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3541 }
3542 
3543 /* must be called under the spq lock */
3544 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3545 {
3546         struct eth_spe *next_spe = bp->spq_prod_bd;
3547 
3548         if (bp->spq_prod_bd == bp->spq_last_bd) {
3549                 bp->spq_prod_bd = bp->spq;
3550                 bp->spq_prod_idx = 0;
3551                 DP(BNX2X_MSG_SP, "end of spq\n");
3552         } else {
3553                 bp->spq_prod_bd++;
3554                 bp->spq_prod_idx++;
3555         }
3556         return next_spe;
3557 }
3558 
3559 /* must be called under the spq lock */
3560 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3561 {
3562         int func = BP_FUNC(bp);
3563 
3564         /*
3565          * Make sure that BD data is updated before writing the producer:
3566          * BD data is written to the memory, the producer is read from the
3567          * memory, thus we need a full memory barrier to ensure the ordering.
3568          */
3569         mb();
3570 
3571         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3572                  bp->spq_prod_idx);
3573         mmiowb();
3574 }
3575 
3576 /**
3577  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3578  *
3579  * @cmd:        command to check
3580  * @cmd_type:   command type
3581  */
3582 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3583 {
3584         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3585             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3586             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3587             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3588             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3589             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3590             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3591                 return true;
3592         else
3593                 return false;
3594 }
3595 
3596 /**
3597  * bnx2x_sp_post - place a single command on an SP ring
3598  *
3599  * @bp:         driver handle
3600  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3601  * @cid:        SW CID the command is related to
3602  * @data_hi:    command private data address (high 32 bits)
3603  * @data_lo:    command private data address (low 32 bits)
3604  * @cmd_type:   command type (e.g. NONE, ETH)
3605  *
3606  * SP data is handled as if it's always an address pair, thus data fields are
3607  * not swapped to little endian in upper functions. Instead this function swaps
3608  * data as if it's two u32 fields.
3609  */
3610 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3611                   u32 data_hi, u32 data_lo, int cmd_type)
3612 {
3613         struct eth_spe *spe;
3614         u16 type;
3615         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3616 
3617 #ifdef BNX2X_STOP_ON_ERROR
3618         if (unlikely(bp->panic)) {
3619                 BNX2X_ERR("Can't post SP when there is panic\n");
3620                 return -EIO;
3621         }
3622 #endif
3623 
3624         spin_lock_bh(&bp->spq_lock);
3625 
3626         if (common) {
3627                 if (!atomic_read(&bp->eq_spq_left)) {
3628                         BNX2X_ERR("BUG! EQ ring full!\n");
3629                         spin_unlock_bh(&bp->spq_lock);
3630                         bnx2x_panic();
3631                         return -EBUSY;
3632                 }
3633         } else if (!atomic_read(&bp->cq_spq_left)) {
3634                         BNX2X_ERR("BUG! SPQ ring full!\n");
3635                         spin_unlock_bh(&bp->spq_lock);
3636                         bnx2x_panic();
3637                         return -EBUSY;
3638         }
3639 
3640         spe = bnx2x_sp_get_next(bp);
3641 
3642         /* CID needs port number to be encoded int it */
3643         spe->hdr.conn_and_cmd_data =
3644                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3645                                     HW_CID(bp, cid));
3646 
3647         type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3648 
3649         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3650                  SPE_HDR_FUNCTION_ID);
3651 
3652         spe->hdr.type = cpu_to_le16(type);
3653 
3654         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3655         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3656 
3657         /*
3658          * It's ok if the actual decrement is issued towards the memory
3659          * somewhere between the spin_lock and spin_unlock. Thus no
3660          * more explicit memory barrier is needed.
3661          */
3662         if (common)
3663                 atomic_dec(&bp->eq_spq_left);
3664         else
3665                 atomic_dec(&bp->cq_spq_left);
3666 
3667         DP(BNX2X_MSG_SP,
3668            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3669            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3670            (u32)(U64_LO(bp->spq_mapping) +
3671            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3672            HW_CID(bp, cid), data_hi, data_lo, type,
3673            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3674 
3675         bnx2x_sp_prod_update(bp);
3676         spin_unlock_bh(&bp->spq_lock);
3677         return 0;
3678 }
3679 
3680 /* acquire split MCP access lock register */
3681 static int bnx2x_acquire_alr(struct bnx2x *bp)
3682 {
3683         u32 j, val;
3684         int rc = 0;
3685 
3686         might_sleep();
3687         for (j = 0; j < 1000; j++) {
3688                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3689                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3690                 if (val & MCPR_ACCESS_LOCK_LOCK)
3691                         break;
3692 
3693                 usleep_range(5000, 10000);
3694         }
3695         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3696                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3697                 rc = -EBUSY;
3698         }
3699 
3700         return rc;
3701 }
3702 
3703 /* release split MCP access lock register */
3704 static void bnx2x_release_alr(struct bnx2x *bp)
3705 {
3706         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3707 }
3708 
3709 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3710 #define BNX2X_DEF_SB_IDX        0x0002
3711 
3712 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3713 {
3714         struct host_sp_status_block *def_sb = bp->def_status_blk;
3715         u16 rc = 0;
3716 
3717         barrier(); /* status block is written to by the chip */
3718         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3719                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3720                 rc |= BNX2X_DEF_SB_ATT_IDX;
3721         }
3722 
3723         if (bp->def_idx != def_sb->sp_sb.running_index) {
3724                 bp->def_idx = def_sb->sp_sb.running_index;
3725                 rc |= BNX2X_DEF_SB_IDX;
3726         }
3727 
3728         /* Do not reorder: indices reading should complete before handling */
3729         barrier();
3730         return rc;
3731 }
3732 
3733 /*
3734  * slow path service functions
3735  */
3736 
3737 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3738 {
3739         int port = BP_PORT(bp);
3740         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3741                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3742         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3743                                        NIG_REG_MASK_INTERRUPT_PORT0;
3744         u32 aeu_mask;
3745         u32 nig_mask = 0;
3746         u32 reg_addr;
3747 
3748         if (bp->attn_state & asserted)
3749                 BNX2X_ERR("IGU ERROR\n");
3750 
3751         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3752         aeu_mask = REG_RD(bp, aeu_addr);
3753 
3754         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3755            aeu_mask, asserted);
3756         aeu_mask &= ~(asserted & 0x3ff);
3757         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3758 
3759         REG_WR(bp, aeu_addr, aeu_mask);
3760         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3761 
3762         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3763         bp->attn_state |= asserted;
3764         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3765 
3766         if (asserted & ATTN_HARD_WIRED_MASK) {
3767                 if (asserted & ATTN_NIG_FOR_FUNC) {
3768 
3769                         bnx2x_acquire_phy_lock(bp);
3770 
3771                         /* save nig interrupt mask */
3772                         nig_mask = REG_RD(bp, nig_int_mask_addr);
3773 
3774                         /* If nig_mask is not set, no need to call the update
3775                          * function.
3776                          */
3777                         if (nig_mask) {
3778                                 REG_WR(bp, nig_int_mask_addr, 0);
3779 
3780                                 bnx2x_link_attn(bp);
3781                         }
3782 
3783                         /* handle unicore attn? */
3784                 }
3785                 if (asserted & ATTN_SW_TIMER_4_FUNC)
3786                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3787 
3788                 if (asserted & GPIO_2_FUNC)
3789                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3790 
3791                 if (asserted & GPIO_3_FUNC)
3792                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3793 
3794                 if (asserted & GPIO_4_FUNC)
3795                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3796 
3797                 if (port == 0) {
3798                         if (asserted & ATTN_GENERAL_ATTN_1) {
3799                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3800                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3801                         }
3802                         if (asserted & ATTN_GENERAL_ATTN_2) {
3803                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3804                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3805                         }
3806                         if (asserted & ATTN_GENERAL_ATTN_3) {
3807                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3808                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3809                         }
3810                 } else {
3811                         if (asserted & ATTN_GENERAL_ATTN_4) {
3812                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3813                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3814                         }
3815                         if (asserted & ATTN_GENERAL_ATTN_5) {
3816                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3817                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3818                         }
3819                         if (asserted & ATTN_GENERAL_ATTN_6) {
3820                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3821                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3822                         }
3823                 }
3824 
3825         } /* if hardwired */
3826 
3827         if (bp->common.int_block == INT_BLOCK_HC)
3828                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3829                             COMMAND_REG_ATTN_BITS_SET);
3830         else
3831                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3832 
3833         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3834            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3835         REG_WR(bp, reg_addr, asserted);
3836 
3837         /* now set back the mask */
3838         if (asserted & ATTN_NIG_FOR_FUNC) {
3839                 /* Verify that IGU ack through BAR was written before restoring
3840                  * NIG mask. This loop should exit after 2-3 iterations max.
3841                  */
3842                 if (bp->common.int_block != INT_BLOCK_HC) {
3843                         u32 cnt = 0, igu_acked;
3844                         do {
3845                                 igu_acked = REG_RD(bp,
3846                                                    IGU_REG_ATTENTION_ACK_BITS);
3847                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3848                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
3849                         if (!igu_acked)
3850                                 DP(NETIF_MSG_HW,
3851                                    "Failed to verify IGU ack on time\n");
3852                         barrier();
3853                 }
3854                 REG_WR(bp, nig_int_mask_addr, nig_mask);
3855                 bnx2x_release_phy_lock(bp);
3856         }
3857 }
3858 
3859 static void bnx2x_fan_failure(struct bnx2x *bp)
3860 {
3861         int port = BP_PORT(bp);
3862         u32 ext_phy_config;
3863         /* mark the failure */
3864         ext_phy_config =
3865                 SHMEM_RD(bp,
3866                          dev_info.port_hw_config[port].external_phy_config);
3867 
3868         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3870         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3871                  ext_phy_config);
3872 
3873         /* log the failure */
3874         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3875                             "Please contact OEM Support for assistance\n");
3876 
3877         /* Schedule device reset (unload)
3878          * This is due to some boards consuming sufficient power when driver is
3879          * up to overheat if fan fails.
3880          */
3881         smp_mb__before_clear_bit();
3882         set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3883         smp_mb__after_clear_bit();
3884         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3885 }
3886 
3887 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3888 {
3889         int port = BP_PORT(bp);
3890         int reg_offset;
3891         u32 val;
3892 
3893         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3894                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3895 
3896         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3897 
3898                 val = REG_RD(bp, reg_offset);
3899                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3900                 REG_WR(bp, reg_offset, val);
3901 
3902                 BNX2X_ERR("SPIO5 hw attention\n");
3903 
3904                 /* Fan failure attention */
3905                 bnx2x_hw_reset_phy(&bp->link_params);
3906                 bnx2x_fan_failure(bp);
3907         }
3908 
3909         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3910                 bnx2x_acquire_phy_lock(bp);
3911                 bnx2x_handle_module_detect_int(&bp->link_params);
3912                 bnx2x_release_phy_lock(bp);
3913         }
3914 
3915         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3916 
3917                 val = REG_RD(bp, reg_offset);
3918                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3919                 REG_WR(bp, reg_offset, val);
3920 
3921                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3922                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3923                 bnx2x_panic();
3924         }
3925 }
3926 
3927 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3928 {
3929         u32 val;
3930 
3931         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3932 
3933                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3934                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3935                 /* DORQ discard attention */
3936                 if (val & 0x2)
3937                         BNX2X_ERR("FATAL error from DORQ\n");
3938         }
3939 
3940         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3941 
3942                 int port = BP_PORT(bp);
3943                 int reg_offset;
3944 
3945                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3946                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3947 
3948                 val = REG_RD(bp, reg_offset);
3949                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3950                 REG_WR(bp, reg_offset, val);
3951 
3952                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3953                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3954                 bnx2x_panic();
3955         }
3956 }
3957 
3958 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3959 {
3960         u32 val;
3961 
3962         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3963 
3964                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3965                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3966                 /* CFC error attention */
3967                 if (val & 0x2)
3968                         BNX2X_ERR("FATAL error from CFC\n");
3969         }
3970 
3971         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3972                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3973                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3974                 /* RQ_USDMDP_FIFO_OVERFLOW */
3975                 if (val & 0x18000)
3976                         BNX2X_ERR("FATAL error from PXP\n");
3977 
3978                 if (!CHIP_IS_E1x(bp)) {
3979                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3980                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3981                 }
3982         }
3983 
3984         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3985 
3986                 int port = BP_PORT(bp);
3987                 int reg_offset;
3988 
3989                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3990                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3991 
3992                 val = REG_RD(bp, reg_offset);
3993                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3994                 REG_WR(bp, reg_offset, val);
3995 
3996                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3997                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3998                 bnx2x_panic();
3999         }
4000 }
4001 
4002 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4003 {
4004         u32 val;
4005 
4006         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4007 
4008                 if (attn & BNX2X_PMF_LINK_ASSERT) {
4009                         int func = BP_FUNC(bp);
4010 
4011                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4012                         bnx2x_read_mf_cfg(bp);
4013                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4014                                         func_mf_config[BP_ABS_FUNC(bp)].config);
4015                         val = SHMEM_RD(bp,
4016                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
4017                         if (val & DRV_STATUS_DCC_EVENT_MASK)
4018                                 bnx2x_dcc_event(bp,
4019                                             (val & DRV_STATUS_DCC_EVENT_MASK));
4020 
4021                         if (val & DRV_STATUS_SET_MF_BW)
4022                                 bnx2x_set_mf_bw(bp);
4023 
4024                         if (val & DRV_STATUS_DRV_INFO_REQ)
4025                                 bnx2x_handle_drv_info_req(bp);
4026 
4027                         if (val & DRV_STATUS_VF_DISABLED)
4028                                 bnx2x_vf_handle_flr_event(bp);
4029 
4030                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4031                                 bnx2x_pmf_update(bp);
4032 
4033                         if (bp->port.pmf &&
4034                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4035                                 bp->dcbx_enabled > 0)
4036                                 /* start dcbx state machine */
4037                                 bnx2x_dcbx_set_params(bp,
4038                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4039                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4040                                 bnx2x_handle_afex_cmd(bp,
4041                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4042                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4043                                 bnx2x_handle_eee_event(bp);
4044                         if (bp->link_vars.periodic_flags &
4045                             PERIODIC_FLAGS_LINK_EVENT) {
4046                                 /*  sync with link */
4047                                 bnx2x_acquire_phy_lock(bp);
4048                                 bp->link_vars.periodic_flags &=
4049                                         ~PERIODIC_FLAGS_LINK_EVENT;
4050                                 bnx2x_release_phy_lock(bp);
4051                                 if (IS_MF(bp))
4052                                         bnx2x_link_sync_notify(bp);
4053                                 bnx2x_link_report(bp);
4054                         }
4055                         /* Always call it here: bnx2x_link_report() will
4056                          * prevent the link indication duplication.
4057                          */
4058                         bnx2x__link_status_update(bp);
4059                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4060 
4061                         BNX2X_ERR("MC assert!\n");
4062                         bnx2x_mc_assert(bp);
4063                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4064                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4067                         bnx2x_panic();
4068 
4069                 } else if (attn & BNX2X_MCP_ASSERT) {
4070 
4071                         BNX2X_ERR("MCP assert!\n");
4072                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4073                         bnx2x_fw_dump(bp);
4074 
4075                 } else
4076                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4077         }
4078 
4079         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4080                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4081                 if (attn & BNX2X_GRC_TIMEOUT) {
4082                         val = CHIP_IS_E1(bp) ? 0 :
4083                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4084                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4085                 }
4086                 if (attn & BNX2X_GRC_RSV) {
4087                         val = CHIP_IS_E1(bp) ? 0 :
4088                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4089                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4090                 }
4091                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4092         }
4093 }
4094 
4095 /*
4096  * Bits map:
4097  * 0-7   - Engine0 load counter.
4098  * 8-15  - Engine1 load counter.
4099  * 16    - Engine0 RESET_IN_PROGRESS bit.
4100  * 17    - Engine1 RESET_IN_PROGRESS bit.
4101  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4102  *         on the engine
4103  * 19    - Engine1 ONE_IS_LOADED.
4104  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4105  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4106  *         just the one belonging to its engine).
4107  *
4108  */
4109 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4110 
4111 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4112 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4113 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4114 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4115 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4116 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4117 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4118 
4119 /*
4120  * Set the GLOBAL_RESET bit.
4121  *
4122  * Should be run under rtnl lock
4123  */
4124 void bnx2x_set_reset_global(struct bnx2x *bp)
4125 {
4126         u32 val;
4127         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4128         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4129         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4130         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4131 }
4132 
4133 /*
4134  * Clear the GLOBAL_RESET bit.
4135  *
4136  * Should be run under rtnl lock
4137  */
4138 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4139 {
4140         u32 val;
4141         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4142         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4143         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4144         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4145 }
4146 
4147 /*
4148  * Checks the GLOBAL_RESET bit.
4149  *
4150  * should be run under rtnl lock
4151  */
4152 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4153 {
4154         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4155 
4156         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4157         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4158 }
4159 
4160 /*
4161  * Clear RESET_IN_PROGRESS bit for the current engine.
4162  *
4163  * Should be run under rtnl lock
4164  */
4165 static void bnx2x_set_reset_done(struct bnx2x *bp)
4166 {
4167         u32 val;
4168         u32 bit = BP_PATH(bp) ?
4169                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4170         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4171         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4172 
4173         /* Clear the bit */
4174         val &= ~bit;
4175         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4176 
4177         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4178 }
4179 
4180 /*
4181  * Set RESET_IN_PROGRESS for the current engine.
4182  *
4183  * should be run under rtnl lock
4184  */
4185 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4186 {
4187         u32 val;
4188         u32 bit = BP_PATH(bp) ?
4189                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4190         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4191         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4192 
4193         /* Set the bit */
4194         val |= bit;
4195         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4196         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4197 }
4198 
4199 /*
4200  * Checks the RESET_IN_PROGRESS bit for the given engine.
4201  * should be run under rtnl lock
4202  */
4203 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4204 {
4205         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4206         u32 bit = engine ?
4207                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4208 
4209         /* return false if bit is set */
4210         return (val & bit) ? false : true;
4211 }
4212 
4213 /*
4214  * set pf load for the current pf.
4215  *
4216  * should be run under rtnl lock
4217  */
4218 void bnx2x_set_pf_load(struct bnx2x *bp)
4219 {
4220         u32 val1, val;
4221         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4222                              BNX2X_PATH0_LOAD_CNT_MASK;
4223         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4224                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4225 
4226         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4227         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4228 
4229         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4230 
4231         /* get the current counter value */
4232         val1 = (val & mask) >> shift;
4233 
4234         /* set bit of that PF */
4235         val1 |= (1 << bp->pf_num);
4236 
4237         /* clear the old value */
4238         val &= ~mask;
4239 
4240         /* set the new one */
4241         val |= ((val1 << shift) & mask);
4242 
4243         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4244         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4245 }
4246 
4247 /**
4248  * bnx2x_clear_pf_load - clear pf load mark
4249  *
4250  * @bp:         driver handle
4251  *
4252  * Should be run under rtnl lock.
4253  * Decrements the load counter for the current engine. Returns
4254  * whether other functions are still loaded
4255  */
4256 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4257 {
4258         u32 val1, val;
4259         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4260                              BNX2X_PATH0_LOAD_CNT_MASK;
4261         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4262                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4263 
4264         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4265         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4266         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4267 
4268         /* get the current counter value */
4269         val1 = (val & mask) >> shift;
4270 
4271         /* clear bit of that PF */
4272         val1 &= ~(1 << bp->pf_num);
4273 
4274         /* clear the old value */
4275         val &= ~mask;
4276 
4277         /* set the new one */
4278         val |= ((val1 << shift) & mask);
4279 
4280         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4281         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4282         return val1 != 0;
4283 }
4284 
4285 /*
4286  * Read the load status for the current engine.
4287  *
4288  * should be run under rtnl lock
4289  */
4290 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4291 {
4292         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4293                              BNX2X_PATH0_LOAD_CNT_MASK);
4294         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4295                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4296         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4297 
4298         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4299 
4300         val = (val & mask) >> shift;
4301 
4302         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4303            engine, val);
4304 
4305         return val != 0;
4306 }
4307 
4308 static void _print_parity(struct bnx2x *bp, u32 reg)
4309 {
4310         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4311 }
4312 
4313 static void _print_next_block(int idx, const char *blk)
4314 {
4315         pr_cont("%s%s", idx ? ", " : "", blk);
4316 }
4317 
4318 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4319                                             int *par_num, bool print)
4320 {
4321         u32 cur_bit;
4322         bool res;
4323         int i;
4324 
4325         res = false;
4326 
4327         for (i = 0; sig; i++) {
4328                 cur_bit = (0x1UL << i);
4329                 if (sig & cur_bit) {
4330                         res |= true; /* Each bit is real error! */
4331 
4332                         if (print) {
4333                                 switch (cur_bit) {
4334                                 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4335                                         _print_next_block((*par_num)++, "BRB");
4336                                         _print_parity(bp,
4337                                                       BRB1_REG_BRB1_PRTY_STS);
4338                                         break;
4339                                 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4340                                         _print_next_block((*par_num)++,
4341                                                           "PARSER");
4342                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4343                                         break;
4344                                 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4345                                         _print_next_block((*par_num)++, "TSDM");
4346                                         _print_parity(bp,
4347                                                       TSDM_REG_TSDM_PRTY_STS);
4348                                         break;
4349                                 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4350                                         _print_next_block((*par_num)++,
4351                                                           "SEARCHER");
4352                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4353                                         break;
4354                                 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4355                                         _print_next_block((*par_num)++, "TCM");
4356                                         _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4357                                         break;
4358                                 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4359                                         _print_next_block((*par_num)++,
4360                                                           "TSEMI");
4361                                         _print_parity(bp,
4362                                                       TSEM_REG_TSEM_PRTY_STS_0);
4363                                         _print_parity(bp,
4364                                                       TSEM_REG_TSEM_PRTY_STS_1);
4365                                         break;
4366                                 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4367                                         _print_next_block((*par_num)++, "XPB");
4368                                         _print_parity(bp, GRCBASE_XPB +
4369                                                           PB_REG_PB_PRTY_STS);
4370                                         break;
4371                                 }
4372                         }
4373 
4374                         /* Clear the bit */
4375                         sig &= ~cur_bit;
4376                 }
4377         }
4378 
4379         return res;
4380 }
4381 
4382 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4383                                             int *par_num, bool *global,
4384                                             bool print)
4385 {
4386         u32 cur_bit;
4387         bool res;
4388         int i;
4389 
4390         res = false;
4391 
4392         for (i = 0; sig; i++) {
4393                 cur_bit = (0x1UL << i);
4394                 if (sig & cur_bit) {
4395                         res |= true; /* Each bit is real error! */
4396                         switch (cur_bit) {
4397                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4398                                 if (print) {
4399                                         _print_next_block((*par_num)++, "PBF");
4400                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4401                                 }
4402                                 break;
4403                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4404                                 if (print) {
4405                                         _print_next_block((*par_num)++, "QM");
4406                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4407                                 }
4408                                 break;
4409                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4410                                 if (print) {
4411                                         _print_next_block((*par_num)++, "TM");
4412                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4413                                 }
4414                                 break;
4415                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4416                                 if (print) {
4417                                         _print_next_block((*par_num)++, "XSDM");
4418                                         _print_parity(bp,
4419                                                       XSDM_REG_XSDM_PRTY_STS);
4420                                 }
4421                                 break;
4422                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4423                                 if (print) {
4424                                         _print_next_block((*par_num)++, "XCM");
4425                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4426                                 }
4427                                 break;
4428                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4429                                 if (print) {
4430                                         _print_next_block((*par_num)++,
4431                                                           "XSEMI");
4432                                         _print_parity(bp,
4433                                                       XSEM_REG_XSEM_PRTY_STS_0);
4434                                         _print_parity(bp,
4435                                                       XSEM_REG_XSEM_PRTY_STS_1);
4436                                 }
4437                                 break;
4438                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4439                                 if (print) {
4440                                         _print_next_block((*par_num)++,
4441                                                           "DOORBELLQ");
4442                                         _print_parity(bp,
4443                                                       DORQ_REG_DORQ_PRTY_STS);
4444                                 }
4445                                 break;
4446                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4447                                 if (print) {
4448                                         _print_next_block((*par_num)++, "NIG");
4449                                         if (CHIP_IS_E1x(bp)) {
4450                                                 _print_parity(bp,
4451                                                         NIG_REG_NIG_PRTY_STS);
4452                                         } else {
4453                                                 _print_parity(bp,
4454                                                         NIG_REG_NIG_PRTY_STS_0);
4455                                                 _print_parity(bp,
4456                                                         NIG_REG_NIG_PRTY_STS_1);
4457                                         }
4458                                 }
4459                                 break;
4460                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4461                                 if (print)
4462                                         _print_next_block((*par_num)++,
4463                                                           "VAUX PCI CORE");
4464                                 *global = true;
4465                                 break;
4466                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4467                                 if (print) {
4468                                         _print_next_block((*par_num)++,
4469                                                           "DEBUG");
4470                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4471                                 }
4472                                 break;
4473                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4474                                 if (print) {
4475                                         _print_next_block((*par_num)++, "USDM");
4476                                         _print_parity(bp,
4477                                                       USDM_REG_USDM_PRTY_STS);
4478                                 }
4479                                 break;
4480                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4481                                 if (print) {
4482                                         _print_next_block((*par_num)++, "UCM");
4483                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4484                                 }
4485                                 break;
4486                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4487                                 if (print) {
4488                                         _print_next_block((*par_num)++,
4489                                                           "USEMI");
4490                                         _print_parity(bp,
4491                                                       USEM_REG_USEM_PRTY_STS_0);
4492                                         _print_parity(bp,
4493                                                       USEM_REG_USEM_PRTY_STS_1);
4494                                 }
4495                                 break;
4496                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4497                                 if (print) {
4498                                         _print_next_block((*par_num)++, "UPB");
4499                                         _print_parity(bp, GRCBASE_UPB +
4500                                                           PB_REG_PB_PRTY_STS);
4501                                 }
4502                                 break;
4503                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4504                                 if (print) {
4505                                         _print_next_block((*par_num)++, "CSDM");
4506                                         _print_parity(bp,
4507                                                       CSDM_REG_CSDM_PRTY_STS);
4508                                 }
4509                                 break;
4510                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4511                                 if (print) {
4512                                         _print_next_block((*par_num)++, "CCM");
4513                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4514                                 }
4515                                 break;
4516                         }
4517 
4518                         /* Clear the bit */
4519                         sig &= ~cur_bit;
4520                 }
4521         }
4522 
4523         return res;
4524 }
4525 
4526 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4527                                             int *par_num, bool print)
4528 {
4529         u32 cur_bit;
4530         bool res;
4531         int i;
4532 
4533         res = false;
4534 
4535         for (i = 0; sig; i++) {
4536                 cur_bit = (0x1UL << i);
4537                 if (sig & cur_bit) {
4538                         res |= true; /* Each bit is real error! */
4539                         if (print) {
4540                                 switch (cur_bit) {
4541                                 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4542                                         _print_next_block((*par_num)++,
4543                                                           "CSEMI");
4544                                         _print_parity(bp,
4545                                                       CSEM_REG_CSEM_PRTY_STS_0);
4546                                         _print_parity(bp,
4547                                                       CSEM_REG_CSEM_PRTY_STS_1);
4548                                         break;
4549                                 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4550                                         _print_next_block((*par_num)++, "PXP");
4551                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4552                                         _print_parity(bp,
4553                                                       PXP2_REG_PXP2_PRTY_STS_0);
4554                                         _print_parity(bp,
4555                                                       PXP2_REG_PXP2_PRTY_STS_1);
4556                                         break;
4557                                 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4558                                         _print_next_block((*par_num)++,
4559                                                           "PXPPCICLOCKCLIENT");
4560                                         break;
4561                                 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4562                                         _print_next_block((*par_num)++, "CFC");
4563                                         _print_parity(bp,
4564                                                       CFC_REG_CFC_PRTY_STS);
4565                                         break;
4566                                 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4567                                         _print_next_block((*par_num)++, "CDU");
4568                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4569                                         break;
4570                                 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4571                                         _print_next_block((*par_num)++, "DMAE");
4572                                         _print_parity(bp,
4573                                                       DMAE_REG_DMAE_PRTY_STS);
4574                                         break;
4575                                 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4576                                         _print_next_block((*par_num)++, "IGU");
4577                                         if (CHIP_IS_E1x(bp))
4578                                                 _print_parity(bp,
4579                                                         HC_REG_HC_PRTY_STS);
4580                                         else
4581                                                 _print_parity(bp,
4582                                                         IGU_REG_IGU_PRTY_STS);
4583                                         break;
4584                                 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4585                                         _print_next_block((*par_num)++, "MISC");
4586                                         _print_parity(bp,
4587                                                       MISC_REG_MISC_PRTY_STS);
4588                                         break;
4589                                 }
4590                         }
4591 
4592                         /* Clear the bit */
4593                         sig &= ~cur_bit;
4594                 }
4595         }
4596 
4597         return res;
4598 }
4599 
4600 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4601                                             int *par_num, bool *global,
4602                                             bool print)
4603 {
4604         bool res = false;
4605         u32 cur_bit;
4606         int i;
4607 
4608         for (i = 0; sig; i++) {
4609                 cur_bit = (0x1UL << i);
4610                 if (sig & cur_bit) {
4611                         switch (cur_bit) {
4612                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4613                                 if (print)
4614                                         _print_next_block((*par_num)++,
4615                                                           "MCP ROM");
4616                                 *global = true;
4617                                 res |= true;
4618                                 break;
4619                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4620                                 if (print)
4621                                         _print_next_block((*par_num)++,
4622                                                           "MCP UMP RX");
4623                                 *global = true;
4624                                 res |= true;
4625                                 break;
4626                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4627                                 if (print)
4628                                         _print_next_block((*par_num)++,
4629                                                           "MCP UMP TX");
4630                                 *global = true;
4631                                 res |= true;
4632                                 break;
4633                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4634                                 if (print)
4635                                         _print_next_block((*par_num)++,
4636                                                           "MCP SCPAD");
4637                                 /* clear latched SCPAD PATIRY from MCP */
4638                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4639                                        1UL << 10);
4640                                 break;
4641                         }
4642 
4643                         /* Clear the bit */
4644                         sig &= ~cur_bit;
4645                 }
4646         }
4647 
4648         return res;
4649 }
4650 
4651 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4652                                             int *par_num, bool print)
4653 {
4654         u32 cur_bit;
4655         bool res;
4656         int i;
4657 
4658         res = false;
4659 
4660         for (i = 0; sig; i++) {
4661                 cur_bit = (0x1UL << i);
4662                 if (sig & cur_bit) {
4663                         res |= true; /* Each bit is real error! */
4664                         if (print) {
4665                                 switch (cur_bit) {
4666                                 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4667                                         _print_next_block((*par_num)++,
4668                                                           "PGLUE_B");
4669                                         _print_parity(bp,
4670                                                       PGLUE_B_REG_PGLUE_B_PRTY_STS);
4671                                         break;
4672                                 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4673                                         _print_next_block((*par_num)++, "ATC");
4674                                         _print_parity(bp,
4675                                                       ATC_REG_ATC_PRTY_STS);
4676                                         break;
4677                                 }
4678                         }
4679                         /* Clear the bit */
4680                         sig &= ~cur_bit;
4681                 }
4682         }
4683 
4684         return res;
4685 }
4686 
4687 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4688                               u32 *sig)
4689 {
4690         bool res = false;
4691 
4692         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4693             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4694             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4695             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4696             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4697                 int par_num = 0;
4698                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4699                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4700                           sig[0] & HW_PRTY_ASSERT_SET_0,
4701                           sig[1] & HW_PRTY_ASSERT_SET_1,
4702                           sig[2] & HW_PRTY_ASSERT_SET_2,
4703                           sig[3] & HW_PRTY_ASSERT_SET_3,
4704                           sig[4] & HW_PRTY_ASSERT_SET_4);
4705                 if (print)
4706                         netdev_err(bp->dev,
4707                                    "Parity errors detected in blocks: ");
4708                 res |= bnx2x_check_blocks_with_parity0(bp,
4709                         sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4710                 res |= bnx2x_check_blocks_with_parity1(bp,
4711                         sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4712                 res |= bnx2x_check_blocks_with_parity2(bp,
4713                         sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4714                 res |= bnx2x_check_blocks_with_parity3(bp,
4715                         sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4716                 res |= bnx2x_check_blocks_with_parity4(bp,
4717                         sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4718 
4719                 if (print)
4720                         pr_cont("\n");
4721         }
4722 
4723         return res;
4724 }
4725 
4726 /**
4727  * bnx2x_chk_parity_attn - checks for parity attentions.
4728  *
4729  * @bp:         driver handle
4730  * @global:     true if there was a global attention
4731  * @print:      show parity attention in syslog
4732  */
4733 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4734 {
4735         struct attn_route attn = { {0} };
4736         int port = BP_PORT(bp);
4737 
4738         attn.sig[0] = REG_RD(bp,
4739                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4740                              port*4);
4741         attn.sig[1] = REG_RD(bp,
4742                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4743                              port*4);
4744         attn.sig[2] = REG_RD(bp,
4745                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4746                              port*4);
4747         attn.sig[3] = REG_RD(bp,
4748                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4749                              port*4);
4750         /* Since MCP attentions can't be disabled inside the block, we need to
4751          * read AEU registers to see whether they're currently disabled
4752          */
4753         attn.sig[3] &= ((REG_RD(bp,
4754                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4755                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4756                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4757                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4758 
4759         if (!CHIP_IS_E1x(bp))
4760                 attn.sig[4] = REG_RD(bp,
4761                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4762                                      port*4);
4763 
4764         return bnx2x_parity_attn(bp, global, print, attn.sig);
4765 }
4766 
4767 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4768 {
4769         u32 val;
4770         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4771 
4772                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4773                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4774                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4775                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4776                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4777                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4778                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4779                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4780                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4781                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4782                 if (val &
4783                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4784                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4785                 if (val &
4786                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4787                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4788                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4789                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4790                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4791                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4792                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4793                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4794         }
4795         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4796                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4797                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4798                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4799                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4800                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4801                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4802                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4803                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4804                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4805                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4806                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4807                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4808                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4809                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4810         }
4811 
4812         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4813                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4814                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4815                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4816                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4817         }
4818 }
4819 
4820 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4821 {
4822         struct attn_route attn, *group_mask;
4823         int port = BP_PORT(bp);
4824         int index;
4825         u32 reg_addr;
4826         u32 val;
4827         u32 aeu_mask;
4828         bool global = false;
4829 
4830         /* need to take HW lock because MCP or other port might also
4831            try to handle this event */
4832         bnx2x_acquire_alr(bp);
4833 
4834         if (bnx2x_chk_parity_attn(bp, &global, true)) {
4835 #ifndef BNX2X_STOP_ON_ERROR
4836                 bp->recovery_state = BNX2X_RECOVERY_INIT;
4837                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4838                 /* Disable HW interrupts */
4839                 bnx2x_int_disable(bp);
4840                 /* In case of parity errors don't handle attentions so that
4841                  * other function would "see" parity errors.
4842                  */
4843 #else
4844                 bnx2x_panic();
4845 #endif
4846                 bnx2x_release_alr(bp);
4847                 return;
4848         }
4849 
4850         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4851         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4852         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4853         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4854         if (!CHIP_IS_E1x(bp))
4855                 attn.sig[4] =
4856                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4857         else
4858                 attn.sig[4] = 0;
4859 
4860         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4861            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4862 
4863         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4864                 if (deasserted & (1 << index)) {
4865                         group_mask = &bp->attn_group[index];
4866 
4867                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4868                            index,
4869                            group_mask->sig[0], group_mask->sig[1],
4870                            group_mask->sig[2], group_mask->sig[3],
4871                            group_mask->sig[4]);
4872 
4873                         bnx2x_attn_int_deasserted4(bp,
4874                                         attn.sig[4] & group_mask->sig[4]);
4875                         bnx2x_attn_int_deasserted3(bp,
4876                                         attn.sig[3] & group_mask->sig[3]);
4877                         bnx2x_attn_int_deasserted1(bp,
4878                                         attn.sig[1] & group_mask->sig[1]);
4879                         bnx2x_attn_int_deasserted2(bp,
4880                                         attn.sig[2] & group_mask->sig[2]);
4881                         bnx2x_attn_int_deasserted0(bp,
4882                                         attn.sig[0] & group_mask->sig[0]);
4883                 }
4884         }
4885 
4886         bnx2x_release_alr(bp);
4887 
4888         if (bp->common.int_block == INT_BLOCK_HC)
4889                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4890                             COMMAND_REG_ATTN_BITS_CLR);
4891         else
4892                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4893 
4894         val = ~deasserted;
4895         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4896            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4897         REG_WR(bp, reg_addr, val);
4898 
4899         if (~bp->attn_state & deasserted)
4900                 BNX2X_ERR("IGU ERROR\n");
4901 
4902         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4903                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
4904 
4905         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4906         aeu_mask = REG_RD(bp, reg_addr);
4907 
4908         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
4909            aeu_mask, deasserted);
4910         aeu_mask |= (deasserted & 0x3ff);
4911         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4912 
4913         REG_WR(bp, reg_addr, aeu_mask);
4914         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4915 
4916         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4917         bp->attn_state &= ~deasserted;
4918         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4919 }
4920 
4921 static void bnx2x_attn_int(struct bnx2x *bp)
4922 {
4923         /* read local copy of bits */
4924         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4925                                                                 attn_bits);
4926         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4927                                                                 attn_bits_ack);
4928         u32 attn_state = bp->attn_state;
4929 
4930         /* look for changed bits */
4931         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
4932         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
4933 
4934         DP(NETIF_MSG_HW,
4935            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
4936            attn_bits, attn_ack, asserted, deasserted);
4937 
4938         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4939                 BNX2X_ERR("BAD attention state\n");
4940 
4941         /* handle bits that were raised */
4942         if (asserted)
4943                 bnx2x_attn_int_asserted(bp, asserted);
4944 
4945         if (deasserted)
4946                 bnx2x_attn_int_deasserted(bp, deasserted);
4947 }
4948 
4949 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4950                       u16 index, u8 op, u8 update)
4951 {
4952         u32 igu_addr = bp->igu_base_addr;
4953         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4954         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4955                              igu_addr);
4956 }
4957 
4958 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4959 {
4960         /* No memory barriers */
4961         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4962         mmiowb(); /* keep prod updates ordered */
4963 }
4964 
4965 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4966                                       union event_ring_elem *elem)
4967 {
4968         u8 err = elem->message.error;
4969 
4970         if (!bp->cnic_eth_dev.starting_cid  ||
4971             (cid < bp->cnic_eth_dev.starting_cid &&
4972             cid != bp->cnic_eth_dev.iscsi_l2_cid))
4973                 return 1;
4974 
4975         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4976 
4977         if (unlikely(err)) {
4978 
4979                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4980                           cid);
4981                 bnx2x_panic_dump(bp, false);
4982         }
4983         bnx2x_cnic_cfc_comp(bp, cid, err);
4984         return 0;
4985 }
4986 
4987 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4988 {
4989         struct bnx2x_mcast_ramrod_params rparam;
4990         int rc;
4991 
4992         memset(&rparam, 0, sizeof(rparam));
4993 
4994         rparam.mcast_obj = &bp->mcast_obj;
4995 
4996         netif_addr_lock_bh(bp->dev);
4997 
4998         /* Clear pending state for the last command */
4999         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5000 
5001         /* If there are pending mcast commands - send them */
5002         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5003                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5004                 if (rc < 0)
5005                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5006                                   rc);
5007         }
5008 
5009         netif_addr_unlock_bh(bp->dev);
5010 }
5011 
5012 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5013                                             union event_ring_elem *elem)
5014 {
5015         unsigned long ramrod_flags = 0;
5016         int rc = 0;
5017         u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5018         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5019 
5020         /* Always push next commands out, don't wait here */
5021         __set_bit(RAMROD_CONT, &ramrod_flags);
5022 
5023         switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5024                             >> BNX2X_SWCID_SHIFT) {
5025         case BNX2X_FILTER_MAC_PENDING:
5026                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5027                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5028                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5029                 else
5030                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5031 
5032                 break;
5033         case BNX2X_FILTER_MCAST_PENDING:
5034                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5035                 /* This is only relevant for 57710 where multicast MACs are
5036                  * configured as unicast MACs using the same ramrod.
5037                  */
5038                 bnx2x_handle_mcast_eqe(bp);
5039                 return;
5040         default:
5041                 BNX2X_ERR("Unsupported classification command: %d\n",
5042                           elem->message.data.eth_event.echo);
5043                 return;
5044         }
5045 
5046         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5047 
5048         if (rc < 0)
5049                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5050         else if (rc > 0)
5051                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5052 }
5053 
5054 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5055 
5056 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5057 {
5058         netif_addr_lock_bh(bp->dev);
5059 
5060         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5061 
5062         /* Send rx_mode command again if was requested */
5063         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5064                 bnx2x_set_storm_rx_mode(bp);
5065         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5066                                     &bp->sp_state))
5067                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5068         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5069                                     &bp->sp_state))
5070                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5071 
5072         netif_addr_unlock_bh(bp->dev);
5073 }
5074 
5075 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5076                                               union event_ring_elem *elem)
5077 {
5078         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5079                 DP(BNX2X_MSG_SP,
5080                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5081                    elem->message.data.vif_list_event.func_bit_map);
5082                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5083                         elem->message.data.vif_list_event.func_bit_map);
5084         } else if (elem->message.data.vif_list_event.echo ==
5085                    VIF_LIST_RULE_SET) {
5086                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5087                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5088         }
5089 }
5090 
5091 /* called with rtnl_lock */
5092 static void bnx2x_after_function_update(struct bnx2x *bp)
5093 {
5094         int q, rc;
5095         struct bnx2x_fastpath *fp;
5096         struct bnx2x_queue_state_params queue_params = {NULL};
5097         struct bnx2x_queue_update_params *q_update_params =
5098                 &queue_params.params.update;
5099 
5100         /* Send Q update command with afex vlan removal values for all Qs */
5101         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5102 
5103         /* set silent vlan removal values according to vlan mode */
5104         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5105                   &q_update_params->update_flags);
5106         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5107                   &q_update_params->update_flags);
5108         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5109 
5110         /* in access mode mark mask and value are 0 to strip all vlans */
5111         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5112                 q_update_params->silent_removal_value = 0;
5113                 q_update_params->silent_removal_mask = 0;
5114         } else {
5115                 q_update_params->silent_removal_value =
5116                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5117                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5118         }
5119 
5120         for_each_eth_queue(bp, q) {
5121                 /* Set the appropriate Queue object */
5122                 fp = &bp->fp[q];
5123                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5124 
5125                 /* send the ramrod */
5126                 rc = bnx2x_queue_state_change(bp, &queue_params);
5127                 if (rc < 0)
5128                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5129                                   q);
5130         }
5131 
5132         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5133                 fp = &bp->fp[FCOE_IDX(bp)];
5134                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5135 
5136                 /* clear pending completion bit */
5137                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5138 
5139                 /* mark latest Q bit */
5140                 smp_mb__before_clear_bit();
5141                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5142                 smp_mb__after_clear_bit();
5143 
5144                 /* send Q update ramrod for FCoE Q */
5145                 rc = bnx2x_queue_state_change(bp, &queue_params);
5146                 if (rc < 0)
5147                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5148                                   q);
5149         } else {
5150                 /* If no FCoE ring - ACK MCP now */
5151                 bnx2x_link_report(bp);
5152                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5153         }
5154 }
5155 
5156 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5157         struct bnx2x *bp, u32 cid)
5158 {
5159         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5160 
5161         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5162                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5163         else
5164                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5165 }
5166 
5167 static void bnx2x_eq_int(struct bnx2x *bp)
5168 {
5169         u16 hw_cons, sw_cons, sw_prod;
5170         union event_ring_elem *elem;
5171         u8 echo;
5172         u32 cid;
5173         u8 opcode;
5174         int rc, spqe_cnt = 0;
5175         struct bnx2x_queue_sp_obj *q_obj;
5176         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5177         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5178 
5179         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5180 
5181         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5182          * when we get the next-page we need to adjust so the loop
5183          * condition below will be met. The next element is the size of a
5184          * regular element and hence incrementing by 1
5185          */
5186         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5187                 hw_cons++;
5188 
5189         /* This function may never run in parallel with itself for a
5190          * specific bp, thus there is no need in "paired" read memory
5191          * barrier here.
5192          */
5193         sw_cons = bp->eq_cons;
5194         sw_prod = bp->eq_prod;
5195 
5196         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5197                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5198 
5199         for (; sw_cons != hw_cons;
5200               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5201 
5202                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5203 
5204                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5205                 if (!rc) {
5206                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5207                            rc);
5208                         goto next_spqe;
5209                 }
5210 
5211                 /* elem CID originates from FW; actually LE */
5212                 cid = SW_CID((__force __le32)
5213                              elem->message.data.cfc_del_event.cid);
5214                 opcode = elem->message.opcode;
5215 
5216                 /* handle eq element */
5217                 switch (opcode) {
5218                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5219                         DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5220                         bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5221                         continue;
5222 
5223                 case EVENT_RING_OPCODE_STAT_QUERY:
5224                         DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5225                            "got statistics comp event %d\n",
5226                            bp->stats_comp++);
5227                         /* nothing to do with stats comp */
5228                         goto next_spqe;
5229 
5230                 case EVENT_RING_OPCODE_CFC_DEL:
5231                         /* handle according to cid range */
5232                         /*
5233                          * we may want to verify here that the bp state is
5234                          * HALTING
5235                          */
5236                         DP(BNX2X_MSG_SP,
5237                            "got delete ramrod for MULTI[%d]\n", cid);
5238 
5239                         if (CNIC_LOADED(bp) &&
5240                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5241                                 goto next_spqe;
5242 
5243                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5244 
5245                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5246                                 break;
5247 
5248                         goto next_spqe;
5249 
5250                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5251                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5252                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5253                         if (f_obj->complete_cmd(bp, f_obj,
5254                                                 BNX2X_F_CMD_TX_STOP))
5255                                 break;
5256                         goto next_spqe;
5257 
5258                 case EVENT_RING_OPCODE_START_TRAFFIC:
5259                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5260                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5261                         if (f_obj->complete_cmd(bp, f_obj,
5262                                                 BNX2X_F_CMD_TX_START))
5263                                 break;
5264                         goto next_spqe;
5265 
5266                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5267                         echo = elem->message.data.function_update_event.echo;
5268                         if (echo == SWITCH_UPDATE) {
5269                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5270                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5271                                 if (f_obj->complete_cmd(
5272                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5273                                         break;
5274 
5275                         } else {
5276                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5277                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5278                                 f_obj->complete_cmd(bp, f_obj,
5279                                                     BNX2X_F_CMD_AFEX_UPDATE);
5280 
5281                                 /* We will perform the Queues update from
5282                                  * sp_rtnl task as all Queue SP operations
5283                                  * should run under rtnl_lock.
5284                                  */
5285                                 smp_mb__before_clear_bit();
5286                                 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5287                                         &bp->sp_rtnl_state);
5288                                 smp_mb__after_clear_bit();
5289 
5290                                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5291                         }
5292 
5293                         goto next_spqe;
5294 
5295                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5296                         f_obj->complete_cmd(bp, f_obj,
5297                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5298                         bnx2x_after_afex_vif_lists(bp, elem);
5299                         goto next_spqe;
5300                 case EVENT_RING_OPCODE_FUNCTION_START:
5301                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5302                            "got FUNC_START ramrod\n");
5303                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5304                                 break;
5305 
5306                         goto next_spqe;
5307 
5308                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5309                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5310                            "got FUNC_STOP ramrod\n");
5311                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5312                                 break;
5313 
5314                         goto next_spqe;
5315                 }
5316 
5317                 switch (opcode | bp->state) {
5318                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5319                       BNX2X_STATE_OPEN):
5320                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5321                       BNX2X_STATE_OPENING_WAIT4_PORT):
5322                         cid = elem->message.data.eth_event.echo &
5323                                 BNX2X_SWCID_MASK;
5324                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5325                            cid);
5326                         rss_raw->clear_pending(rss_raw);
5327                         break;
5328 
5329                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5330                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5331                 case (EVENT_RING_OPCODE_SET_MAC |
5332                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5333                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5334                       BNX2X_STATE_OPEN):
5335                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5336                       BNX2X_STATE_DIAG):
5337                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5338                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5339                         DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5340                         bnx2x_handle_classification_eqe(bp, elem);
5341                         break;
5342 
5343                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5344                       BNX2X_STATE_OPEN):
5345                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5346                       BNX2X_STATE_DIAG):
5347                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5348                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5349                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5350                         bnx2x_handle_mcast_eqe(bp);
5351                         break;
5352 
5353                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5354                       BNX2X_STATE_OPEN):
5355                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5356                       BNX2X_STATE_DIAG):
5357                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5358                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5359                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5360                         bnx2x_handle_rx_mode_eqe(bp);
5361                         break;
5362                 default:
5363                         /* unknown event log error and continue */
5364                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5365                                   elem->message.opcode, bp->state);
5366                 }
5367 next_spqe:
5368                 spqe_cnt++;
5369         } /* for */
5370 
5371         smp_mb__before_atomic_inc();
5372         atomic_add(spqe_cnt, &bp->eq_spq_left);
5373 
5374         bp->eq_cons = sw_cons;
5375         bp->eq_prod = sw_prod;
5376         /* Make sure that above mem writes were issued towards the memory */
5377         smp_wmb();
5378 
5379         /* update producer */
5380         bnx2x_update_eq_prod(bp, bp->eq_prod);
5381 }
5382 
5383 static void bnx2x_sp_task(struct work_struct *work)
5384 {
5385         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5386 
5387         DP(BNX2X_MSG_SP, "sp task invoked\n");
5388 
5389         /* make sure the atomic interrupt_occurred has been written */
5390         smp_rmb();
5391         if (atomic_read(&bp->interrupt_occurred)) {
5392 
5393                 /* what work needs to be performed? */
5394                 u16 status = bnx2x_update_dsb_idx(bp);
5395 
5396                 DP(BNX2X_MSG_SP, "status %x\n", status);
5397                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5398                 atomic_set(&bp->interrupt_occurred, 0);
5399 
5400                 /* HW attentions */
5401                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5402                         bnx2x_attn_int(bp);
5403                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5404                 }
5405 
5406                 /* SP events: STAT_QUERY and others */
5407                 if (status & BNX2X_DEF_SB_IDX) {
5408                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5409 
5410                 if (FCOE_INIT(bp) &&
5411                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5412                                 /* Prevent local bottom-halves from running as
5413                                  * we are going to change the local NAPI list.
5414                                  */
5415                                 local_bh_disable();
5416                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5417                                 local_bh_enable();
5418                         }
5419 
5420                         /* Handle EQ completions */
5421                         bnx2x_eq_int(bp);
5422                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5423                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5424 
5425                         status &= ~BNX2X_DEF_SB_IDX;
5426                 }
5427 
5428                 /* if status is non zero then perhaps something went wrong */
5429                 if (unlikely(status))
5430                         DP(BNX2X_MSG_SP,
5431                            "got an unknown interrupt! (status 0x%x)\n", status);
5432 
5433                 /* ack status block only if something was actually handled */
5434                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5435                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5436         }
5437 
5438         /* must be called after the EQ processing (since eq leads to sriov
5439          * ramrod completion flows).
5440          * This flow may have been scheduled by the arrival of a ramrod
5441          * completion, or by the sriov code rescheduling itself.
5442          */
5443         bnx2x_iov_sp_task(bp);
5444 
5445         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5446         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5447                                &bp->sp_state)) {
5448                 bnx2x_link_report(bp);
5449                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5450         }
5451 }
5452 
5453 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5454 {
5455         struct net_device *dev = dev_instance;
5456         struct bnx2x *bp = netdev_priv(dev);
5457 
5458         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5459                      IGU_INT_DISABLE, 0);
5460 
5461 #ifdef BNX2X_STOP_ON_ERROR
5462         if (unlikely(bp->panic))
5463                 return IRQ_HANDLED;
5464 #endif
5465 
5466         if (CNIC_LOADED(bp)) {
5467                 struct cnic_ops *c_ops;
5468 
5469                 rcu_read_lock();
5470                 c_ops = rcu_dereference(bp->cnic_ops);
5471                 if (c_ops)
5472                         c_ops->cnic_handler(bp->cnic_data, NULL);
5473                 rcu_read_unlock();
5474         }
5475 
5476         /* schedule sp task to perform default status block work, ack
5477          * attentions and enable interrupts.
5478          */
5479         bnx2x_schedule_sp_task(bp);
5480 
5481         return IRQ_HANDLED;
5482 }
5483 
5484 /* end of slow path */
5485 
5486 void bnx2x_drv_pulse(struct bnx2x *bp)
5487 {
5488         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5489                  bp->fw_drv_pulse_wr_seq);
5490 }
5491 
5492 static void bnx2x_timer(unsigned long data)
5493 {
5494         struct bnx2x *bp = (struct bnx2x *) data;
5495 
5496         if (!netif_running(bp->dev))
5497                 return;
5498 
5499         if (IS_PF(bp) &&
5500             !BP_NOMCP(bp)) {
5501                 int mb_idx = BP_FW_MB_IDX(bp);
5502                 u16 drv_pulse;
5503                 u16 mcp_pulse;
5504 
5505                 ++bp->fw_drv_pulse_wr_seq;
5506                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5507                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5508                 bnx2x_drv_pulse(bp);
5509 
5510                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5511                              MCP_PULSE_SEQ_MASK);
5512                 /* The delta between driver pulse and mcp response
5513                  * should not get too big. If the MFW is more than 5 pulses
5514                  * behind, we should worry about it enough to generate an error
5515                  * log.
5516                  */
5517                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5518                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5519                                   drv_pulse, mcp_pulse);
5520         }
5521 
5522         if (bp->state == BNX2X_STATE_OPEN)
5523                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5524 
5525         /* sample pf vf bulletin board for new posts from pf */
5526         if (IS_VF(bp))
5527                 bnx2x_timer_sriov(bp);
5528 
5529         mod_timer(&bp->timer, jiffies + bp->current_interval);
5530 }
5531 
5532 /* end of Statistics */
5533 
5534 /* nic init */
5535 
5536 /*
5537  * nic init service functions
5538  */
5539 
5540 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5541 {
5542         u32 i;
5543         if (!(len%4) && !(addr%4))
5544                 for (i = 0; i < len; i += 4)
5545                         REG_WR(bp, addr + i, fill);
5546         else
5547                 for (i = 0; i < len; i++)
5548                         REG_WR8(bp, addr + i, fill);
5549 }
5550 
5551 /* helper: writes FP SP data to FW - data_size in dwords */
5552 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5553                                 int fw_sb_id,
5554                                 u32 *sb_data_p,
5555                                 u32 data_size)
5556 {
5557         int index;
5558         for (index = 0; index < data_size; index++)
5559                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5560                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5561                         sizeof(u32)*index,
5562                         *(sb_data_p + index));
5563 }
5564 
5565 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5566 {
5567         u32 *sb_data_p;
5568         u32 data_size = 0;
5569         struct hc_status_block_data_e2 sb_data_e2;
5570         struct hc_status_block_data_e1x sb_data_e1x;
5571 
5572         /* disable the function first */
5573         if (!CHIP_IS_E1x(bp)) {
5574                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5575                 sb_data_e2.common.state = SB_DISABLED;
5576                 sb_data_e2.common.p_func.vf_valid = false;
5577                 sb_data_p = (u32 *)&sb_data_e2;
5578                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5579         } else {
5580                 memset(&sb_data_e1x, 0,
5581                        sizeof(struct hc_status_block_data_e1x));
5582                 sb_data_e1x.common.state = SB_DISABLED;
5583                 sb_data_e1x.common.p_func.vf_valid = false;
5584                 sb_data_p = (u32 *)&sb_data_e1x;
5585                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5586         }
5587         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5588 
5589         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5590                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5591                         CSTORM_STATUS_BLOCK_SIZE);
5592         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5593                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5594                         CSTORM_SYNC_BLOCK_SIZE);
5595 }
5596 
5597 /* helper:  writes SP SB data to FW */
5598 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5599                 struct hc_sp_status_block_data *sp_sb_data)
5600 {
5601         int func = BP_FUNC(bp);
5602         int i;
5603         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5604                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5605                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5606                         i*sizeof(u32),
5607                         *((u32 *)sp_sb_data + i));
5608 }
5609 
5610 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5611 {
5612         int func = BP_FUNC(bp);
5613         struct hc_sp_status_block_data sp_sb_data;
5614         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5615 
5616         sp_sb_data.state = SB_DISABLED;
5617         sp_sb_data.p_func.vf_valid = false;
5618 
5619         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5620 
5621         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5622                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5623                         CSTORM_SP_STATUS_BLOCK_SIZE);
5624         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5625                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5626                         CSTORM_SP_SYNC_BLOCK_SIZE);
5627 }
5628 
5629 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5630                                            int igu_sb_id, int igu_seg_id)
5631 {
5632         hc_sm->igu_sb_id = igu_sb_id;
5633         hc_sm->igu_seg_id = igu_seg_id;
5634         hc_sm->timer_value = 0xFF;
5635         hc_sm->time_to_expire = 0xFFFFFFFF;
5636 }
5637 
5638 /* allocates state machine ids. */
5639 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5640 {
5641         /* zero out state machine indices */
5642         /* rx indices */
5643         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5644 
5645         /* tx indices */
5646         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5647         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5648         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5649         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5650 
5651         /* map indices */
5652         /* rx indices */
5653         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5654                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5655 
5656         /* tx indices */
5657         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5658                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5659         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5660                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5661         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5662                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5663         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5664                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5665 }
5666 
5667 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5668                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5669 {
5670         int igu_seg_id;
5671 
5672         struct hc_status_block_data_e2 sb_data_e2;
5673         struct hc_status_block_data_e1x sb_data_e1x;
5674         struct hc_status_block_sm  *hc_sm_p;
5675         int data_size;
5676         u32 *sb_data_p;
5677 
5678         if (CHIP_INT_MODE_IS_BC(bp))
5679                 igu_seg_id = HC_SEG_ACCESS_NORM;
5680         else
5681                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5682 
5683         bnx2x_zero_fp_sb(bp, fw_sb_id);
5684 
5685         if (!CHIP_IS_E1x(bp)) {
5686                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5687                 sb_data_e2.common.state = SB_ENABLED;
5688                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5689                 sb_data_e2.common.p_func.vf_id = vfid;
5690                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5691                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5692                 sb_data_e2.common.same_igu_sb_1b = true;
5693                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5694                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5695                 hc_sm_p = sb_data_e2.common.state_machine;
5696                 sb_data_p = (u32 *)&sb_data_e2;
5697                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5698                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5699         } else {
5700                 memset(&sb_data_e1x, 0,
5701                        sizeof(struct hc_status_block_data_e1x));
5702                 sb_data_e1x.common.state = SB_ENABLED;
5703                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5704                 sb_data_e1x.common.p_func.vf_id = 0xff;
5705                 sb_data_e1x.common.p_func.vf_valid = false;
5706                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5707                 sb_data_e1x.common.same_igu_sb_1b = true;
5708                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5709                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5710                 hc_sm_p = sb_data_e1x.common.state_machine;
5711                 sb_data_p = (u32 *)&sb_data_e1x;
5712                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5713                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5714         }
5715 
5716         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5717                                        igu_sb_id, igu_seg_id);
5718         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5719                                        igu_sb_id, igu_seg_id);
5720 
5721         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5722 
5723         /* write indices to HW - PCI guarantees endianity of regpairs */
5724         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5725 }
5726 
5727 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5728                                      u16 tx_usec, u16 rx_usec)
5729 {
5730         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5731                                     false, rx_usec);
5732         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5733                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5734                                        tx_usec);
5735         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5736                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5737                                        tx_usec);
5738         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5739                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5740                                        tx_usec);
5741 }
5742 
5743 static void bnx2x_init_def_sb(struct bnx2x *bp)
5744 {
5745         struct host_sp_status_block *def_sb = bp->def_status_blk;
5746         dma_addr_t mapping = bp->def_status_blk_mapping;
5747         int igu_sp_sb_index;
5748         int igu_seg_id;
5749         int port = BP_PORT(bp);
5750         int func = BP_FUNC(bp);
5751         int reg_offset, reg_offset_en5;
5752         u64 section;
5753         int index;
5754         struct hc_sp_status_block_data sp_sb_data;
5755         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5756 
5757         if (CHIP_INT_MODE_IS_BC(bp)) {
5758                 igu_sp_sb_index = DEF_SB_IGU_ID;
5759                 igu_seg_id = HC_SEG_ACCESS_DEF;
5760         } else {
5761                 igu_sp_sb_index = bp->igu_dsb_id;
5762                 igu_seg_id = IGU_SEG_ACCESS_DEF;
5763         }
5764 
5765         /* ATTN */
5766         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5767                                             atten_status_block);
5768         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5769 
5770         bp->attn_state = 0;
5771 
5772         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5773                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5774         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5775                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5776         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5777                 int sindex;
5778                 /* take care of sig[0]..sig[4] */
5779                 for (sindex = 0; sindex < 4; sindex++)
5780                         bp->attn_group[index].sig[sindex] =
5781                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5782 
5783                 if (!CHIP_IS_E1x(bp))
5784                         /*
5785                          * enable5 is separate from the rest of the registers,
5786                          * and therefore the address skip is 4
5787                          * and not 16 between the different groups
5788                          */
5789                         bp->attn_group[index].sig[4] = REG_RD(bp,
5790                                         reg_offset_en5 + 0x4*index);
5791                 else
5792                         bp->attn_group[index].sig[4] = 0;
5793         }
5794 
5795         if (bp->common.int_block == INT_BLOCK_HC) {
5796                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5797                                      HC_REG_ATTN_MSG0_ADDR_L);
5798 
5799                 REG_WR(bp, reg_offset, U64_LO(section));
5800                 REG_WR(bp, reg_offset + 4, U64_HI(section));
5801         } else if (!CHIP_IS_E1x(bp)) {
5802                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5803                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5804         }
5805 
5806         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5807                                             sp_sb);
5808 
5809         bnx2x_zero_sp_sb(bp);
5810 
5811         /* PCI guarantees endianity of regpairs */
5812         sp_sb_data.state                = SB_ENABLED;
5813         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
5814         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
5815         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
5816         sp_sb_data.igu_seg_id           = igu_seg_id;
5817         sp_sb_data.p_func.pf_id         = func;
5818         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
5819         sp_sb_data.p_func.vf_id         = 0xff;
5820 
5821         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5822 
5823         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5824 }
5825 
5826 void bnx2x_update_coalesce(struct bnx2x *bp)
5827 {
5828         int i;
5829 
5830         for_each_eth_queue(bp, i)
5831                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5832                                          bp->tx_ticks, bp->rx_ticks);
5833 }
5834 
5835 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5836 {
5837         spin_lock_init(&bp->spq_lock);
5838         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5839 
5840         bp->spq_prod_idx = 0;
5841         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5842         bp->spq_prod_bd = bp->spq;
5843         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5844 }
5845 
5846 static void bnx2x_init_eq_ring(struct bnx2x *bp)
5847 {
5848         int i;
5849         for (i = 1; i <= NUM_EQ_PAGES; i++) {
5850                 union event_ring_elem *elem =
5851                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5852 
5853                 elem->next_page.addr.hi =
5854                         cpu_to_le32(U64_HI(bp->eq_mapping +
5855                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5856                 elem->next_page.addr.lo =
5857                         cpu_to_le32(U64_LO(bp->eq_mapping +
5858                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5859         }
5860         bp->eq_cons = 0;
5861         bp->eq_prod = NUM_EQ_DESC;
5862         bp->eq_cons_sb = BNX2X_EQ_INDEX;
5863         /* we want a warning message before it gets wrought... */
5864         atomic_set(&bp->eq_spq_left,
5865                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5866 }
5867 
5868 /* called with netif_addr_lock_bh() */
5869 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5870                                unsigned long rx_mode_flags,
5871                                unsigned long rx_accept_flags,
5872                                unsigned long tx_accept_flags,
5873                                unsigned long ramrod_flags)
5874 {
5875         struct bnx2x_rx_mode_ramrod_params ramrod_param;
5876         int rc;
5877 
5878         memset(&ramrod_param, 0, sizeof(ramrod_param));
5879 
5880         /* Prepare ramrod parameters */
5881         ramrod_param.cid = 0;
5882         ramrod_param.cl_id = cl_id;
5883         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5884         ramrod_param.func_id = BP_FUNC(bp);
5885 
5886         ramrod_param.pstate = &bp->sp_state;
5887         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5888 
5889         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5890         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5891 
5892         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5893 
5894         ramrod_param.ramrod_flags = ramrod_flags;
5895         ramrod_param.rx_mode_flags = rx_mode_flags;
5896 
5897         ramrod_param.rx_accept_flags = rx_accept_flags;
5898         ramrod_param.tx_accept_flags = tx_accept_flags;
5899 
5900         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5901         if (rc < 0) {
5902                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5903                 return rc;
5904         }
5905 
5906         return 0;
5907 }
5908 
5909 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5910                                    unsigned long *rx_accept_flags,
5911                                    unsigned long *tx_accept_flags)
5912 {
5913         /* Clear the flags first */
5914         *rx_accept_flags = 0;
5915         *tx_accept_flags = 0;
5916 
5917         switch (rx_mode) {
5918         case BNX2X_RX_MODE_NONE:
5919                 /*
5920                  * 'drop all' supersedes any accept flags that may have been
5921                  * passed to the function.
5922                  */
5923                 break;
5924         case BNX2X_RX_MODE_NORMAL:
5925                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5926                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5927                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5928 
5929                 /* internal switching mode */
5930                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5931                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5932                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5933 
5934                 break;
5935         case BNX2X_RX_MODE_ALLMULTI:
5936                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5937                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5938                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5939 
5940                 /* internal switching mode */
5941                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5942                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5943                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5944 
5945                 break;
5946         case BNX2X_RX_MODE_PROMISC:
5947                 /* According to definition of SI mode, iface in promisc mode
5948                  * should receive matched and unmatched (in resolution of port)
5949                  * unicast packets.
5950                  */
5951                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5952                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5953                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5954                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5955 
5956                 /* internal switching mode */
5957                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5958                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5959 
5960                 if (IS_MF_SI(bp))
5961                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
5962                 else
5963                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5964 
5965                 break;
5966         default:
5967                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5968                 return -EINVAL;
5969         }
5970 
5971         /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
5972         if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5973                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5974                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
5975         }
5976 
5977         return 0;
5978 }
5979 
5980 /* called with netif_addr_lock_bh() */
5981 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5982 {
5983         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5984         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5985         int rc;
5986 
5987         if (!NO_FCOE(bp))
5988                 /* Configure rx_mode of FCoE Queue */
5989                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5990 
5991         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5992                                      &tx_accept_flags);
5993         if (rc)
5994                 return rc;
5995 
5996         __set_bit(RAMROD_RX, &ramrod_flags);
5997         __set_bit(RAMROD_TX, &ramrod_flags);
5998 
5999         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6000                                    rx_accept_flags, tx_accept_flags,
6001                                    ramrod_flags);
6002 }
6003 
6004 static void bnx2x_init_internal_common(struct bnx2x *bp)
6005 {
6006         int i;
6007 
6008         if (IS_MF_SI(bp))
6009                 /*
6010                  * In switch independent mode, the TSTORM needs to accept
6011                  * packets that failed classification, since approximate match
6012                  * mac addresses aren't written to NIG LLH
6013                  */
6014                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6015                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
6016         else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
6017                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6018                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
6019 
6020         /* Zero this manually as its initialization is
6021            currently missing in the initTool */
6022         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6023                 REG_WR(bp, BAR_USTRORM_INTMEM +
6024                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
6025         if (!CHIP_IS_E1x(bp)) {
6026                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6027                         CHIP_INT_MODE_IS_BC(bp) ?
6028                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6029         }
6030 }
6031 
6032 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6033 {
6034         switch (load_code) {
6035         case FW_MSG_CODE_DRV_LOAD_COMMON:
6036         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6037                 bnx2x_init_internal_common(bp);
6038                 /* no break */
6039 
6040         case FW_MSG_CODE_DRV_LOAD_PORT:
6041                 /* nothing to do */
6042                 /* no break */
6043 
6044         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6045                 /* internal memory per function is
6046                    initialized inside bnx2x_pf_init */
6047                 break;
6048 
6049         default:
6050                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6051                 break;
6052         }
6053 }
6054 
6055 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6056 {
6057         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6058 }
6059 
6060 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6061 {
6062         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6063 }
6064 
6065 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6066 {
6067         if (CHIP_IS_E1x(fp->bp))
6068                 return BP_L_ID(fp->bp) + fp->index;
6069         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6070                 return bnx2x_fp_igu_sb_id(fp);
6071 }
6072 
6073 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6074 {
6075         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6076         u8 cos;
6077         unsigned long q_type = 0;
6078         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6079         fp->rx_queue = fp_idx;
6080         fp->cid = fp_idx;
6081         fp->cl_id = bnx2x_fp_cl_id(fp);
6082         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6083         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6084         /* qZone id equals to FW (per path) client id */
6085         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6086 
6087         /* init shortcut */
6088         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6089 
6090         /* Setup SB indices */
6091         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6092 
6093         /* Configure Queue State object */
6094         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6095         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6096 
6097         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6098 
6099         /* init tx data */
6100         for_each_cos_in_tx_queue(fp, cos) {
6101                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6102                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6103                                   FP_COS_TO_TXQ(fp, cos, bp),
6104                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6105                 cids[cos] = fp->txdata_ptr[cos]->cid;
6106         }
6107 
6108         /* nothing more for vf to do here */
6109         if (IS_VF(bp))
6110                 return;
6111 
6112         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6113                       fp->fw_sb_id, fp->igu_sb_id);
6114         bnx2x_update_fpsb_idx(fp);
6115         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6116                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6117                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6118 
6119         /**
6120          * Configure classification DBs: Always enable Tx switching
6121          */
6122         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6123 
6124         DP(NETIF_MSG_IFUP,
6125            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6126            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6127            fp->igu_sb_id);
6128 }
6129 
6130 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6131 {
6132         int i;
6133 
6134         for (i = 1; i <= NUM_TX_RINGS; i++) {
6135                 struct eth_tx_next_bd *tx_next_bd =
6136                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6137 
6138                 tx_next_bd->addr_hi =
6139                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6140                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6141                 tx_next_bd->addr_lo =
6142                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6143                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6144         }
6145 
6146         *txdata->tx_cons_sb = cpu_to_le16(0);
6147 
6148         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6149         txdata->tx_db.data.zero_fill1 = 0;
6150         txdata->tx_db.data.prod = 0;
6151 
6152         txdata->tx_pkt_prod = 0;
6153         txdata->tx_pkt_cons = 0;
6154         txdata->tx_bd_prod = 0;
6155         txdata->tx_bd_cons = 0;
6156         txdata->tx_pkt = 0;
6157 }
6158 
6159 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6160 {
6161         int i;
6162 
6163         for_each_tx_queue_cnic(bp, i)
6164                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6165 }
6166 
6167 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6168 {
6169         int i;
6170         u8 cos;
6171 
6172         for_each_eth_queue(bp, i)
6173                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6174                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6175 }
6176 
6177 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6178 {
6179         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6180         unsigned long q_type = 0;
6181 
6182         bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6183         bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6184                                                      BNX2X_FCOE_ETH_CL_ID_IDX);
6185         bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6186         bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6187         bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6188         bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6189         bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6190                           fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6191                           fp);
6192 
6193         DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6194 
6195         /* qZone id equals to FW (per path) client id */
6196         bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6197         /* init shortcut */
6198         bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6199                 bnx2x_rx_ustorm_prods_offset(fp);
6200 
6201         /* Configure Queue State object */
6202         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6203         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6204 
6205         /* No multi-CoS for FCoE L2 client */
6206         BUG_ON(fp->max_cos != 1);
6207 
6208         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6209                              &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6210                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6211 
6212         DP(NETIF_MSG_IFUP,
6213            "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6214            fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6215            fp->igu_sb_id);
6216 }
6217 
6218 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6219 {
6220         if (!NO_FCOE(bp))
6221                 bnx2x_init_fcoe_fp(bp);
6222 
6223         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6224                       BNX2X_VF_ID_INVALID, false,
6225                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6226 
6227         /* ensure status block indices were read */
6228         rmb();
6229         bnx2x_init_rx_rings_cnic(bp);
6230         bnx2x_init_tx_rings_cnic(bp);
6231 
6232         /* flush all */
6233         mb();
6234         mmiowb();
6235 }
6236 
6237 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6238 {
6239         int i;
6240 
6241         /* Setup NIC internals and enable interrupts */
6242         for_each_eth_queue(bp, i)
6243                 bnx2x_init_eth_fp(bp, i);
6244 
6245         /* ensure status block indices were read */
6246         rmb();
6247         bnx2x_init_rx_rings(bp);
6248         bnx2x_init_tx_rings(bp);
6249 
6250         if (IS_PF(bp)) {
6251                 /* Initialize MOD_ABS interrupts */
6252                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6253                                        bp->common.shmem_base,
6254                                        bp->common.shmem2_base, BP_PORT(bp));
6255 
6256                 /* initialize the default status block and sp ring */
6257                 bnx2x_init_def_sb(bp);
6258                 bnx2x_update_dsb_idx(bp);
6259                 bnx2x_init_sp_ring(bp);
6260         } else {
6261                 bnx2x_memset_stats(bp);
6262         }
6263 }
6264 
6265 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6266 {
6267         bnx2x_init_eq_ring(bp);
6268         bnx2x_init_internal(bp, load_code);
6269         bnx2x_pf_init(bp);
6270         bnx2x_stats_init(bp);
6271 
6272         /* flush all before enabling interrupts */
6273         mb();
6274         mmiowb();
6275 
6276         bnx2x_int_enable(bp);
6277 
6278         /* Check for SPIO5 */
6279         bnx2x_attn_int_deasserted0(bp,
6280                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6281                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6282 }
6283 
6284 /* gzip service functions */
6285 static int bnx2x_gunzip_init(struct bnx2x *bp)
6286 {
6287         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6288                                             &bp->gunzip_mapping, GFP_KERNEL);
6289         if (bp->gunzip_buf  == NULL)
6290                 goto gunzip_nomem1;
6291 
6292         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6293         if (bp->strm  == NULL)
6294                 goto gunzip_nomem2;
6295 
6296         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6297         if (bp->strm->workspace == NULL)
6298                 goto gunzip_nomem3;
6299 
6300         return 0;
6301 
6302 gunzip_nomem3:
6303         kfree(bp->strm);
6304         bp->strm = NULL;
6305 
6306 gunzip_nomem2:
6307         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6308                           bp->gunzip_mapping);
6309         bp->gunzip_buf = NULL;
6310 
6311 gunzip_nomem1:
6312         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6313         return -ENOMEM;
6314 }
6315 
6316 static void bnx2x_gunzip_end(struct bnx2x *bp)
6317 {
6318         if (bp->strm) {
6319                 vfree(bp->strm->workspace);
6320                 kfree(bp->strm);
6321                 bp->strm = NULL;
6322         }
6323 
6324         if (bp->gunzip_buf) {
6325                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6326                                   bp->gunzip_mapping);
6327                 bp->gunzip_buf = NULL;
6328         }
6329 }
6330 
6331 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6332 {
6333         int n, rc;
6334 
6335         /* check gzip header */
6336         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6337                 BNX2X_ERR("Bad gzip header\n");