Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/scsi/lpfc/lpfc_init.c

  1 /*******************************************************************
  2  * This file is part of the Emulex Linux Device Driver for         *
  3  * Fibre Channel Host Bus Adapters.                                *
  4  * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  5  * EMULEX and SLI are trademarks of Emulex.                        *
  6  * www.emulex.com                                                  *
  7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  8  *                                                                 *
  9  * This program is free software; you can redistribute it and/or   *
 10  * modify it under the terms of version 2 of the GNU General       *
 11  * Public License as published by the Free Software Foundation.    *
 12  * This program is distributed in the hope that it will be useful. *
 13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 18  * more details, a copy of which can be found in the file COPYING  *
 19  * included with this package.                                     *
 20  *******************************************************************/
 21 
 22 #include <linux/blkdev.h>
 23 #include <linux/delay.h>
 24 #include <linux/dma-mapping.h>
 25 #include <linux/idr.h>
 26 #include <linux/interrupt.h>
 27 #include <linux/module.h>
 28 #include <linux/kthread.h>
 29 #include <linux/pci.h>
 30 #include <linux/spinlock.h>
 31 #include <linux/ctype.h>
 32 #include <linux/aer.h>
 33 #include <linux/slab.h>
 34 #include <linux/firmware.h>
 35 #include <linux/miscdevice.h>
 36 #include <linux/percpu.h>
 37 
 38 #include <scsi/scsi.h>
 39 #include <scsi/scsi_device.h>
 40 #include <scsi/scsi_host.h>
 41 #include <scsi/scsi_transport_fc.h>
 42 
 43 #include "lpfc_hw4.h"
 44 #include "lpfc_hw.h"
 45 #include "lpfc_sli.h"
 46 #include "lpfc_sli4.h"
 47 #include "lpfc_nl.h"
 48 #include "lpfc_disc.h"
 49 #include "lpfc_scsi.h"
 50 #include "lpfc.h"
 51 #include "lpfc_logmsg.h"
 52 #include "lpfc_crtn.h"
 53 #include "lpfc_vport.h"
 54 #include "lpfc_version.h"
 55 
 56 char *_dump_buf_data;
 57 unsigned long _dump_buf_data_order;
 58 char *_dump_buf_dif;
 59 unsigned long _dump_buf_dif_order;
 60 spinlock_t _dump_buf_lock;
 61 
 62 /* Used when mapping IRQ vectors in a driver centric manner */
 63 uint16_t *lpfc_used_cpu;
 64 uint32_t lpfc_present_cpu;
 65 
 66 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 67 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 68 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
 69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
 70 static int lpfc_setup_endian_order(struct lpfc_hba *);
 71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
 72 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
 73 static void lpfc_init_sgl_list(struct lpfc_hba *);
 74 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
 75 static void lpfc_free_active_sgl(struct lpfc_hba *);
 76 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
 77 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
 78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
 79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
 80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 81 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
 83 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 84 
 85 static struct scsi_transport_template *lpfc_transport_template = NULL;
 86 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
 87 static DEFINE_IDR(lpfc_hba_index);
 88 
 89 /**
 90  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
 91  * @phba: pointer to lpfc hba data structure.
 92  *
 93  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
 94  * mailbox command. It retrieves the revision information from the HBA and
 95  * collects the Vital Product Data (VPD) about the HBA for preparing the
 96  * configuration of the HBA.
 97  *
 98  * Return codes:
 99  *   0 - success.
100  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
101  *   Any other value - indicates an error.
102  **/
103 int
104 lpfc_config_port_prep(struct lpfc_hba *phba)
105 {
106         lpfc_vpd_t *vp = &phba->vpd;
107         int i = 0, rc;
108         LPFC_MBOXQ_t *pmb;
109         MAILBOX_t *mb;
110         char *lpfc_vpd_data = NULL;
111         uint16_t offset = 0;
112         static char licensed[56] =
113                     "key unlock for use with gnu public licensed code only\0";
114         static int init_key = 1;
115 
116         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
117         if (!pmb) {
118                 phba->link_state = LPFC_HBA_ERROR;
119                 return -ENOMEM;
120         }
121 
122         mb = &pmb->u.mb;
123         phba->link_state = LPFC_INIT_MBX_CMDS;
124 
125         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
126                 if (init_key) {
127                         uint32_t *ptext = (uint32_t *) licensed;
128 
129                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
130                                 *ptext = cpu_to_be32(*ptext);
131                         init_key = 0;
132                 }
133 
134                 lpfc_read_nv(phba, pmb);
135                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
136                         sizeof (mb->un.varRDnvp.rsvd3));
137                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
138                          sizeof (licensed));
139 
140                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
141 
142                 if (rc != MBX_SUCCESS) {
143                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
144                                         "0324 Config Port initialization "
145                                         "error, mbxCmd x%x READ_NVPARM, "
146                                         "mbxStatus x%x\n",
147                                         mb->mbxCommand, mb->mbxStatus);
148                         mempool_free(pmb, phba->mbox_mem_pool);
149                         return -ERESTART;
150                 }
151                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
152                        sizeof(phba->wwnn));
153                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
154                        sizeof(phba->wwpn));
155         }
156 
157         phba->sli3_options = 0x0;
158 
159         /* Setup and issue mailbox READ REV command */
160         lpfc_read_rev(phba, pmb);
161         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
162         if (rc != MBX_SUCCESS) {
163                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
164                                 "0439 Adapter failed to init, mbxCmd x%x "
165                                 "READ_REV, mbxStatus x%x\n",
166                                 mb->mbxCommand, mb->mbxStatus);
167                 mempool_free( pmb, phba->mbox_mem_pool);
168                 return -ERESTART;
169         }
170 
171 
172         /*
173          * The value of rr must be 1 since the driver set the cv field to 1.
174          * This setting requires the FW to set all revision fields.
175          */
176         if (mb->un.varRdRev.rr == 0) {
177                 vp->rev.rBit = 0;
178                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
179                                 "0440 Adapter failed to init, READ_REV has "
180                                 "missing revision information.\n");
181                 mempool_free(pmb, phba->mbox_mem_pool);
182                 return -ERESTART;
183         }
184 
185         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
186                 mempool_free(pmb, phba->mbox_mem_pool);
187                 return -EINVAL;
188         }
189 
190         /* Save information as VPD data */
191         vp->rev.rBit = 1;
192         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
193         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
194         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
195         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
196         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
197         vp->rev.biuRev = mb->un.varRdRev.biuRev;
198         vp->rev.smRev = mb->un.varRdRev.smRev;
199         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
200         vp->rev.endecRev = mb->un.varRdRev.endecRev;
201         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
202         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
203         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
204         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
205         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
206         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
207 
208         /* If the sli feature level is less then 9, we must
209          * tear down all RPIs and VPIs on link down if NPIV
210          * is enabled.
211          */
212         if (vp->rev.feaLevelHigh < 9)
213                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
214 
215         if (lpfc_is_LC_HBA(phba->pcidev->device))
216                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
217                                                 sizeof (phba->RandomData));
218 
219         /* Get adapter VPD information */
220         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
221         if (!lpfc_vpd_data)
222                 goto out_free_mbox;
223         do {
224                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
225                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
226 
227                 if (rc != MBX_SUCCESS) {
228                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
229                                         "0441 VPD not present on adapter, "
230                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
231                                         mb->mbxCommand, mb->mbxStatus);
232                         mb->un.varDmp.word_cnt = 0;
233                 }
234                 /* dump mem may return a zero when finished or we got a
235                  * mailbox error, either way we are done.
236                  */
237                 if (mb->un.varDmp.word_cnt == 0)
238                         break;
239                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
240                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
241                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
242                                       lpfc_vpd_data + offset,
243                                       mb->un.varDmp.word_cnt);
244                 offset += mb->un.varDmp.word_cnt;
245         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
246         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
247 
248         kfree(lpfc_vpd_data);
249 out_free_mbox:
250         mempool_free(pmb, phba->mbox_mem_pool);
251         return 0;
252 }
253 
254 /**
255  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
256  * @phba: pointer to lpfc hba data structure.
257  * @pmboxq: pointer to the driver internal queue element for mailbox command.
258  *
259  * This is the completion handler for driver's configuring asynchronous event
260  * mailbox command to the device. If the mailbox command returns successfully,
261  * it will set internal async event support flag to 1; otherwise, it will
262  * set internal async event support flag to 0.
263  **/
264 static void
265 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
266 {
267         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
268                 phba->temp_sensor_support = 1;
269         else
270                 phba->temp_sensor_support = 0;
271         mempool_free(pmboxq, phba->mbox_mem_pool);
272         return;
273 }
274 
275 /**
276  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
277  * @phba: pointer to lpfc hba data structure.
278  * @pmboxq: pointer to the driver internal queue element for mailbox command.
279  *
280  * This is the completion handler for dump mailbox command for getting
281  * wake up parameters. When this command complete, the response contain
282  * Option rom version of the HBA. This function translate the version number
283  * into a human readable string and store it in OptionROMVersion.
284  **/
285 static void
286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
287 {
288         struct prog_id *prg;
289         uint32_t prog_id_word;
290         char dist = ' ';
291         /* character array used for decoding dist type. */
292         char dist_char[] = "nabx";
293 
294         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
295                 mempool_free(pmboxq, phba->mbox_mem_pool);
296                 return;
297         }
298 
299         prg = (struct prog_id *) &prog_id_word;
300 
301         /* word 7 contain option rom version */
302         prog_id_word = pmboxq->u.mb.un.varWords[7];
303 
304         /* Decode the Option rom version word to a readable string */
305         if (prg->dist < 4)
306                 dist = dist_char[prg->dist];
307 
308         if ((prg->dist == 3) && (prg->num == 0))
309                 sprintf(phba->OptionROMVersion, "%d.%d%d",
310                         prg->ver, prg->rev, prg->lev);
311         else
312                 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
313                         prg->ver, prg->rev, prg->lev,
314                         dist, prg->num);
315         mempool_free(pmboxq, phba->mbox_mem_pool);
316         return;
317 }
318 
319 /**
320  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
321  *      cfg_soft_wwnn, cfg_soft_wwpn
322  * @vport: pointer to lpfc vport data structure.
323  *
324  *
325  * Return codes
326  *   None.
327  **/
328 void
329 lpfc_update_vport_wwn(struct lpfc_vport *vport)
330 {
331         /* If the soft name exists then update it using the service params */
332         if (vport->phba->cfg_soft_wwnn)
333                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
334                            vport->fc_sparam.nodeName.u.wwn);
335         if (vport->phba->cfg_soft_wwpn)
336                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
337                            vport->fc_sparam.portName.u.wwn);
338 
339         /*
340          * If the name is empty or there exists a soft name
341          * then copy the service params name, otherwise use the fc name
342          */
343         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
344                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
345                         sizeof(struct lpfc_name));
346         else
347                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
348                         sizeof(struct lpfc_name));
349 
350         if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
351                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
352                         sizeof(struct lpfc_name));
353         else
354                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
355                         sizeof(struct lpfc_name));
356 }
357 
358 /**
359  * lpfc_config_port_post - Perform lpfc initialization after config port
360  * @phba: pointer to lpfc hba data structure.
361  *
362  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
363  * command call. It performs all internal resource and state setups on the
364  * port: post IOCB buffers, enable appropriate host interrupt attentions,
365  * ELS ring timers, etc.
366  *
367  * Return codes
368  *   0 - success.
369  *   Any other value - error.
370  **/
371 int
372 lpfc_config_port_post(struct lpfc_hba *phba)
373 {
374         struct lpfc_vport *vport = phba->pport;
375         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
376         LPFC_MBOXQ_t *pmb;
377         MAILBOX_t *mb;
378         struct lpfc_dmabuf *mp;
379         struct lpfc_sli *psli = &phba->sli;
380         uint32_t status, timeout;
381         int i, j;
382         int rc;
383 
384         spin_lock_irq(&phba->hbalock);
385         /*
386          * If the Config port completed correctly the HBA is not
387          * over heated any more.
388          */
389         if (phba->over_temp_state == HBA_OVER_TEMP)
390                 phba->over_temp_state = HBA_NORMAL_TEMP;
391         spin_unlock_irq(&phba->hbalock);
392 
393         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
394         if (!pmb) {
395                 phba->link_state = LPFC_HBA_ERROR;
396                 return -ENOMEM;
397         }
398         mb = &pmb->u.mb;
399 
400         /* Get login parameters for NID.  */
401         rc = lpfc_read_sparam(phba, pmb, 0);
402         if (rc) {
403                 mempool_free(pmb, phba->mbox_mem_pool);
404                 return -ENOMEM;
405         }
406 
407         pmb->vport = vport;
408         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
409                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
410                                 "0448 Adapter failed init, mbxCmd x%x "
411                                 "READ_SPARM mbxStatus x%x\n",
412                                 mb->mbxCommand, mb->mbxStatus);
413                 phba->link_state = LPFC_HBA_ERROR;
414                 mp = (struct lpfc_dmabuf *) pmb->context1;
415                 mempool_free(pmb, phba->mbox_mem_pool);
416                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
417                 kfree(mp);
418                 return -EIO;
419         }
420 
421         mp = (struct lpfc_dmabuf *) pmb->context1;
422 
423         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
424         lpfc_mbuf_free(phba, mp->virt, mp->phys);
425         kfree(mp);
426         pmb->context1 = NULL;
427         lpfc_update_vport_wwn(vport);
428 
429         /* Update the fc_host data structures with new wwn. */
430         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
431         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
432         fc_host_max_npiv_vports(shost) = phba->max_vpi;
433 
434         /* If no serial number in VPD data, use low 6 bytes of WWNN */
435         /* This should be consolidated into parse_vpd ? - mr */
436         if (phba->SerialNumber[0] == 0) {
437                 uint8_t *outptr;
438 
439                 outptr = &vport->fc_nodename.u.s.IEEE[0];
440                 for (i = 0; i < 12; i++) {
441                         status = *outptr++;
442                         j = ((status & 0xf0) >> 4);
443                         if (j <= 9)
444                                 phba->SerialNumber[i] =
445                                     (char)((uint8_t) 0x30 + (uint8_t) j);
446                         else
447                                 phba->SerialNumber[i] =
448                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449                         i++;
450                         j = (status & 0xf);
451                         if (j <= 9)
452                                 phba->SerialNumber[i] =
453                                     (char)((uint8_t) 0x30 + (uint8_t) j);
454                         else
455                                 phba->SerialNumber[i] =
456                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
457                 }
458         }
459 
460         lpfc_read_config(phba, pmb);
461         pmb->vport = vport;
462         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
463                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
464                                 "0453 Adapter failed to init, mbxCmd x%x "
465                                 "READ_CONFIG, mbxStatus x%x\n",
466                                 mb->mbxCommand, mb->mbxStatus);
467                 phba->link_state = LPFC_HBA_ERROR;
468                 mempool_free( pmb, phba->mbox_mem_pool);
469                 return -EIO;
470         }
471 
472         /* Check if the port is disabled */
473         lpfc_sli_read_link_ste(phba);
474 
475         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
476         i = (mb->un.varRdConfig.max_xri + 1);
477         if (phba->cfg_hba_queue_depth > i) {
478                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
479                                 "3359 HBA queue depth changed from %d to %d\n",
480                                 phba->cfg_hba_queue_depth, i);
481                 phba->cfg_hba_queue_depth = i;
482         }
483 
484         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
485         i = (mb->un.varRdConfig.max_xri >> 3);
486         if (phba->pport->cfg_lun_queue_depth > i) {
487                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
488                                 "3360 LUN queue depth changed from %d to %d\n",
489                                 phba->pport->cfg_lun_queue_depth, i);
490                 phba->pport->cfg_lun_queue_depth = i;
491         }
492 
493         phba->lmt = mb->un.varRdConfig.lmt;
494 
495         /* Get the default values for Model Name and Description */
496         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
497 
498         phba->link_state = LPFC_LINK_DOWN;
499 
500         /* Only process IOCBs on ELS ring till hba_state is READY */
501         if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
502                 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
503         if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
504                 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
505         if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
506                 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
507 
508         /* Post receive buffers for desired rings */
509         if (phba->sli_rev != 3)
510                 lpfc_post_rcv_buf(phba);
511 
512         /*
513          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
514          */
515         if (phba->intr_type == MSIX) {
516                 rc = lpfc_config_msi(phba, pmb);
517                 if (rc) {
518                         mempool_free(pmb, phba->mbox_mem_pool);
519                         return -EIO;
520                 }
521                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
522                 if (rc != MBX_SUCCESS) {
523                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
524                                         "0352 Config MSI mailbox command "
525                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
526                                         pmb->u.mb.mbxCommand,
527                                         pmb->u.mb.mbxStatus);
528                         mempool_free(pmb, phba->mbox_mem_pool);
529                         return -EIO;
530                 }
531         }
532 
533         spin_lock_irq(&phba->hbalock);
534         /* Initialize ERATT handling flag */
535         phba->hba_flag &= ~HBA_ERATT_HANDLED;
536 
537         /* Enable appropriate host interrupts */
538         if (lpfc_readl(phba->HCregaddr, &status)) {
539                 spin_unlock_irq(&phba->hbalock);
540                 return -EIO;
541         }
542         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
543         if (psli->num_rings > 0)
544                 status |= HC_R0INT_ENA;
545         if (psli->num_rings > 1)
546                 status |= HC_R1INT_ENA;
547         if (psli->num_rings > 2)
548                 status |= HC_R2INT_ENA;
549         if (psli->num_rings > 3)
550                 status |= HC_R3INT_ENA;
551 
552         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
553             (phba->cfg_poll & DISABLE_FCP_RING_INT))
554                 status &= ~(HC_R0INT_ENA);
555 
556         writel(status, phba->HCregaddr);
557         readl(phba->HCregaddr); /* flush */
558         spin_unlock_irq(&phba->hbalock);
559 
560         /* Set up ring-0 (ELS) timer */
561         timeout = phba->fc_ratov * 2;
562         mod_timer(&vport->els_tmofunc,
563                   jiffies + msecs_to_jiffies(1000 * timeout));
564         /* Set up heart beat (HB) timer */
565         mod_timer(&phba->hb_tmofunc,
566                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
567         phba->hb_outstanding = 0;
568         phba->last_completion_time = jiffies;
569         /* Set up error attention (ERATT) polling timer */
570         mod_timer(&phba->eratt_poll,
571                   jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
572 
573         if (phba->hba_flag & LINK_DISABLED) {
574                 lpfc_printf_log(phba,
575                         KERN_ERR, LOG_INIT,
576                         "2598 Adapter Link is disabled.\n");
577                 lpfc_down_link(phba, pmb);
578                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
579                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
580                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
581                         lpfc_printf_log(phba,
582                         KERN_ERR, LOG_INIT,
583                         "2599 Adapter failed to issue DOWN_LINK"
584                         " mbox command rc 0x%x\n", rc);
585 
586                         mempool_free(pmb, phba->mbox_mem_pool);
587                         return -EIO;
588                 }
589         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
590                 mempool_free(pmb, phba->mbox_mem_pool);
591                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
592                 if (rc)
593                         return rc;
594         }
595         /* MBOX buffer will be freed in mbox compl */
596         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
597         if (!pmb) {
598                 phba->link_state = LPFC_HBA_ERROR;
599                 return -ENOMEM;
600         }
601 
602         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
603         pmb->mbox_cmpl = lpfc_config_async_cmpl;
604         pmb->vport = phba->pport;
605         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
606 
607         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
608                 lpfc_printf_log(phba,
609                                 KERN_ERR,
610                                 LOG_INIT,
611                                 "0456 Adapter failed to issue "
612                                 "ASYNCEVT_ENABLE mbox status x%x\n",
613                                 rc);
614                 mempool_free(pmb, phba->mbox_mem_pool);
615         }
616 
617         /* Get Option rom version */
618         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
619         if (!pmb) {
620                 phba->link_state = LPFC_HBA_ERROR;
621                 return -ENOMEM;
622         }
623 
624         lpfc_dump_wakeup_param(phba, pmb);
625         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
626         pmb->vport = phba->pport;
627         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
628 
629         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
630                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
631                                 "to get Option ROM version status x%x\n", rc);
632                 mempool_free(pmb, phba->mbox_mem_pool);
633         }
634 
635         return 0;
636 }
637 
638 /**
639  * lpfc_hba_init_link - Initialize the FC link
640  * @phba: pointer to lpfc hba data structure.
641  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
642  *
643  * This routine will issue the INIT_LINK mailbox command call.
644  * It is available to other drivers through the lpfc_hba data
645  * structure for use as a delayed link up mechanism with the
646  * module parameter lpfc_suppress_link_up.
647  *
648  * Return code
649  *              0 - success
650  *              Any other value - error
651  **/
652 int
653 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
654 {
655         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
656 }
657 
658 /**
659  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
660  * @phba: pointer to lpfc hba data structure.
661  * @fc_topology: desired fc topology.
662  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
663  *
664  * This routine will issue the INIT_LINK mailbox command call.
665  * It is available to other drivers through the lpfc_hba data
666  * structure for use as a delayed link up mechanism with the
667  * module parameter lpfc_suppress_link_up.
668  *
669  * Return code
670  *              0 - success
671  *              Any other value - error
672  **/
673 int
674 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
675                                uint32_t flag)
676 {
677         struct lpfc_vport *vport = phba->pport;
678         LPFC_MBOXQ_t *pmb;
679         MAILBOX_t *mb;
680         int rc;
681 
682         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
683         if (!pmb) {
684                 phba->link_state = LPFC_HBA_ERROR;
685                 return -ENOMEM;
686         }
687         mb = &pmb->u.mb;
688         pmb->vport = vport;
689 
690         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
691             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
692              !(phba->lmt & LMT_1Gb)) ||
693             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
694              !(phba->lmt & LMT_2Gb)) ||
695             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
696              !(phba->lmt & LMT_4Gb)) ||
697             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
698              !(phba->lmt & LMT_8Gb)) ||
699             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
700              !(phba->lmt & LMT_10Gb)) ||
701             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
702              !(phba->lmt & LMT_16Gb))) {
703                 /* Reset link speed to auto */
704                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
705                         "1302 Invalid speed for this board:%d "
706                         "Reset link speed to auto.\n",
707                         phba->cfg_link_speed);
708                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
709         }
710         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
711         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
712         if (phba->sli_rev < LPFC_SLI_REV4)
713                 lpfc_set_loopback_flag(phba);
714         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
715         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
716                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
717                         "0498 Adapter failed to init, mbxCmd x%x "
718                         "INIT_LINK, mbxStatus x%x\n",
719                         mb->mbxCommand, mb->mbxStatus);
720                 if (phba->sli_rev <= LPFC_SLI_REV3) {
721                         /* Clear all interrupt enable conditions */
722                         writel(0, phba->HCregaddr);
723                         readl(phba->HCregaddr); /* flush */
724                         /* Clear all pending interrupts */
725                         writel(0xffffffff, phba->HAregaddr);
726                         readl(phba->HAregaddr); /* flush */
727                 }
728                 phba->link_state = LPFC_HBA_ERROR;
729                 if (rc != MBX_BUSY || flag == MBX_POLL)
730                         mempool_free(pmb, phba->mbox_mem_pool);
731                 return -EIO;
732         }
733         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
734         if (flag == MBX_POLL)
735                 mempool_free(pmb, phba->mbox_mem_pool);
736 
737         return 0;
738 }
739 
740 /**
741  * lpfc_hba_down_link - this routine downs the FC link
742  * @phba: pointer to lpfc hba data structure.
743  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
744  *
745  * This routine will issue the DOWN_LINK mailbox command call.
746  * It is available to other drivers through the lpfc_hba data
747  * structure for use to stop the link.
748  *
749  * Return code
750  *              0 - success
751  *              Any other value - error
752  **/
753 int
754 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
755 {
756         LPFC_MBOXQ_t *pmb;
757         int rc;
758 
759         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
760         if (!pmb) {
761                 phba->link_state = LPFC_HBA_ERROR;
762                 return -ENOMEM;
763         }
764 
765         lpfc_printf_log(phba,
766                 KERN_ERR, LOG_INIT,
767                 "0491 Adapter Link is disabled.\n");
768         lpfc_down_link(phba, pmb);
769         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
770         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
771         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
772                 lpfc_printf_log(phba,
773                 KERN_ERR, LOG_INIT,
774                 "2522 Adapter failed to issue DOWN_LINK"
775                 " mbox command rc 0x%x\n", rc);
776 
777                 mempool_free(pmb, phba->mbox_mem_pool);
778                 return -EIO;
779         }
780         if (flag == MBX_POLL)
781                 mempool_free(pmb, phba->mbox_mem_pool);
782 
783         return 0;
784 }
785 
786 /**
787  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
788  * @phba: pointer to lpfc HBA data structure.
789  *
790  * This routine will do LPFC uninitialization before the HBA is reset when
791  * bringing down the SLI Layer.
792  *
793  * Return codes
794  *   0 - success.
795  *   Any other value - error.
796  **/
797 int
798 lpfc_hba_down_prep(struct lpfc_hba *phba)
799 {
800         struct lpfc_vport **vports;
801         int i;
802 
803         if (phba->sli_rev <= LPFC_SLI_REV3) {
804                 /* Disable interrupts */
805                 writel(0, phba->HCregaddr);
806                 readl(phba->HCregaddr); /* flush */
807         }
808 
809         if (phba->pport->load_flag & FC_UNLOADING)
810                 lpfc_cleanup_discovery_resources(phba->pport);
811         else {
812                 vports = lpfc_create_vport_work_array(phba);
813                 if (vports != NULL)
814                         for (i = 0; i <= phba->max_vports &&
815                                 vports[i] != NULL; i++)
816                                 lpfc_cleanup_discovery_resources(vports[i]);
817                 lpfc_destroy_vport_work_array(phba, vports);
818         }
819         return 0;
820 }
821 
822 /**
823  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
824  * @phba: pointer to lpfc HBA data structure.
825  *
826  * This routine will do uninitialization after the HBA is reset when bring
827  * down the SLI Layer.
828  *
829  * Return codes
830  *   0 - success.
831  *   Any other value - error.
832  **/
833 static int
834 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
835 {
836         struct lpfc_sli *psli = &phba->sli;
837         struct lpfc_sli_ring *pring;
838         struct lpfc_dmabuf *mp, *next_mp;
839         LIST_HEAD(completions);
840         int i;
841 
842         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
843                 lpfc_sli_hbqbuf_free_all(phba);
844         else {
845                 /* Cleanup preposted buffers on the ELS ring */
846                 pring = &psli->ring[LPFC_ELS_RING];
847                 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
848                         list_del(&mp->list);
849                         pring->postbufq_cnt--;
850                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
851                         kfree(mp);
852                 }
853         }
854 
855         spin_lock_irq(&phba->hbalock);
856         for (i = 0; i < psli->num_rings; i++) {
857                 pring = &psli->ring[i];
858 
859                 /* At this point in time the HBA is either reset or DOA. Either
860                  * way, nothing should be on txcmplq as it will NEVER complete.
861                  */
862                 list_splice_init(&pring->txcmplq, &completions);
863                 spin_unlock_irq(&phba->hbalock);
864 
865                 /* Cancel all the IOCBs from the completions list */
866                 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
867                                       IOERR_SLI_ABORTED);
868 
869                 lpfc_sli_abort_iocb_ring(phba, pring);
870                 spin_lock_irq(&phba->hbalock);
871         }
872         spin_unlock_irq(&phba->hbalock);
873 
874         return 0;
875 }
876 
877 /**
878  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
879  * @phba: pointer to lpfc HBA data structure.
880  *
881  * This routine will do uninitialization after the HBA is reset when bring
882  * down the SLI Layer.
883  *
884  * Return codes
885  *   0 - success.
886  *   Any other value - error.
887  **/
888 static int
889 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
890 {
891         struct lpfc_scsi_buf *psb, *psb_next;
892         LIST_HEAD(aborts);
893         int ret;
894         unsigned long iflag = 0;
895         struct lpfc_sglq *sglq_entry = NULL;
896 
897         ret = lpfc_hba_down_post_s3(phba);
898         if (ret)
899                 return ret;
900         /* At this point in time the HBA is either reset or DOA. Either
901          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
902          * on the lpfc_sgl_list so that it can either be freed if the
903          * driver is unloading or reposted if the driver is restarting
904          * the port.
905          */
906         spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
907                                         /* scsl_buf_list */
908         /* abts_sgl_list_lock required because worker thread uses this
909          * list.
910          */
911         spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
912         list_for_each_entry(sglq_entry,
913                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
914                 sglq_entry->state = SGL_FREED;
915 
916         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
917                         &phba->sli4_hba.lpfc_sgl_list);
918         spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
919         /* abts_scsi_buf_list_lock required because worker thread uses this
920          * list.
921          */
922         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
923         list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
924                         &aborts);
925         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
926         spin_unlock_irq(&phba->hbalock);
927 
928         list_for_each_entry_safe(psb, psb_next, &aborts, list) {
929                 psb->pCmd = NULL;
930                 psb->status = IOSTAT_SUCCESS;
931         }
932         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
933         list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
934         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
935         return 0;
936 }
937 
938 /**
939  * lpfc_hba_down_post - Wrapper func for hba down post routine
940  * @phba: pointer to lpfc HBA data structure.
941  *
942  * This routine wraps the actual SLI3 or SLI4 routine for performing
943  * uninitialization after the HBA is reset when bring down the SLI Layer.
944  *
945  * Return codes
946  *   0 - success.
947  *   Any other value - error.
948  **/
949 int
950 lpfc_hba_down_post(struct lpfc_hba *phba)
951 {
952         return (*phba->lpfc_hba_down_post)(phba);
953 }
954 
955 /**
956  * lpfc_hb_timeout - The HBA-timer timeout handler
957  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
958  *
959  * This is the HBA-timer timeout handler registered to the lpfc driver. When
960  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
961  * work-port-events bitmap and the worker thread is notified. This timeout
962  * event will be used by the worker thread to invoke the actual timeout
963  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
964  * be performed in the timeout handler and the HBA timeout event bit shall
965  * be cleared by the worker thread after it has taken the event bitmap out.
966  **/
967 static void
968 lpfc_hb_timeout(unsigned long ptr)
969 {
970         struct lpfc_hba *phba;
971         uint32_t tmo_posted;
972         unsigned long iflag;
973 
974         phba = (struct lpfc_hba *)ptr;
975 
976         /* Check for heart beat timeout conditions */
977         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
978         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
979         if (!tmo_posted)
980                 phba->pport->work_port_events |= WORKER_HB_TMO;
981         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
982 
983         /* Tell the worker thread there is work to do */
984         if (!tmo_posted)
985                 lpfc_worker_wake_up(phba);
986         return;
987 }
988 
989 /**
990  * lpfc_rrq_timeout - The RRQ-timer timeout handler
991  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
992  *
993  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
994  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
995  * work-port-events bitmap and the worker thread is notified. This timeout
996  * event will be used by the worker thread to invoke the actual timeout
997  * handler routine, lpfc_rrq_handler. Any periodical operations will
998  * be performed in the timeout handler and the RRQ timeout event bit shall
999  * be cleared by the worker thread after it has taken the event bitmap out.
1000  **/
1001 static void
1002 lpfc_rrq_timeout(unsigned long ptr)
1003 {
1004         struct lpfc_hba *phba;
1005         unsigned long iflag;
1006 
1007         phba = (struct lpfc_hba *)ptr;
1008         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1009         if (!(phba->pport->load_flag & FC_UNLOADING))
1010                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1011         else
1012                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1013         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1014 
1015         if (!(phba->pport->load_flag & FC_UNLOADING))
1016                 lpfc_worker_wake_up(phba);
1017 }
1018 
1019 /**
1020  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1021  * @phba: pointer to lpfc hba data structure.
1022  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1023  *
1024  * This is the callback function to the lpfc heart-beat mailbox command.
1025  * If configured, the lpfc driver issues the heart-beat mailbox command to
1026  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1027  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1028  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1029  * heart-beat outstanding state. Once the mailbox command comes back and
1030  * no error conditions detected, the heart-beat mailbox command timer is
1031  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1032  * state is cleared for the next heart-beat. If the timer expired with the
1033  * heart-beat outstanding state set, the driver will put the HBA offline.
1034  **/
1035 static void
1036 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1037 {
1038         unsigned long drvr_flag;
1039 
1040         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1041         phba->hb_outstanding = 0;
1042         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1043 
1044         /* Check and reset heart-beat timer is necessary */
1045         mempool_free(pmboxq, phba->mbox_mem_pool);
1046         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1047                 !(phba->link_state == LPFC_HBA_ERROR) &&
1048                 !(phba->pport->load_flag & FC_UNLOADING))
1049                 mod_timer(&phba->hb_tmofunc,
1050                           jiffies +
1051                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1052         return;
1053 }
1054 
1055 /**
1056  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1057  * @phba: pointer to lpfc hba data structure.
1058  *
1059  * This is the actual HBA-timer timeout handler to be invoked by the worker
1060  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1061  * handler performs any periodic operations needed for the device. If such
1062  * periodic event has already been attended to either in the interrupt handler
1063  * or by processing slow-ring or fast-ring events within the HBA-timer
1064  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1065  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1066  * is configured and there is no heart-beat mailbox command outstanding, a
1067  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1068  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1069  * to offline.
1070  **/
1071 void
1072 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1073 {
1074         struct lpfc_vport **vports;
1075         LPFC_MBOXQ_t *pmboxq;
1076         struct lpfc_dmabuf *buf_ptr;
1077         int retval, i;
1078         struct lpfc_sli *psli = &phba->sli;
1079         LIST_HEAD(completions);
1080 
1081         vports = lpfc_create_vport_work_array(phba);
1082         if (vports != NULL)
1083                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1084                         lpfc_rcv_seq_check_edtov(vports[i]);
1085         lpfc_destroy_vport_work_array(phba, vports);
1086 
1087         if ((phba->link_state == LPFC_HBA_ERROR) ||
1088                 (phba->pport->load_flag & FC_UNLOADING) ||
1089                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1090                 return;
1091 
1092         spin_lock_irq(&phba->pport->work_port_lock);
1093 
1094         if (time_after(phba->last_completion_time +
1095                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1096                         jiffies)) {
1097                 spin_unlock_irq(&phba->pport->work_port_lock);
1098                 if (!phba->hb_outstanding)
1099                         mod_timer(&phba->hb_tmofunc,
1100                                 jiffies +
1101                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1102                 else
1103                         mod_timer(&phba->hb_tmofunc,
1104                                 jiffies +
1105                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1106                 return;
1107         }
1108         spin_unlock_irq(&phba->pport->work_port_lock);
1109 
1110         if (phba->elsbuf_cnt &&
1111                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1112                 spin_lock_irq(&phba->hbalock);
1113                 list_splice_init(&phba->elsbuf, &completions);
1114                 phba->elsbuf_cnt = 0;
1115                 phba->elsbuf_prev_cnt = 0;
1116                 spin_unlock_irq(&phba->hbalock);
1117 
1118                 while (!list_empty(&completions)) {
1119                         list_remove_head(&completions, buf_ptr,
1120                                 struct lpfc_dmabuf, list);
1121                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1122                         kfree(buf_ptr);
1123                 }
1124         }
1125         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1126 
1127         /* If there is no heart beat outstanding, issue a heartbeat command */
1128         if (phba->cfg_enable_hba_heartbeat) {
1129                 if (!phba->hb_outstanding) {
1130                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1131                                 (list_empty(&psli->mboxq))) {
1132                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1133                                                         GFP_KERNEL);
1134                                 if (!pmboxq) {
1135                                         mod_timer(&phba->hb_tmofunc,
1136                                                  jiffies +
1137                                                  msecs_to_jiffies(1000 *
1138                                                  LPFC_HB_MBOX_INTERVAL));
1139                                         return;
1140                                 }
1141 
1142                                 lpfc_heart_beat(phba, pmboxq);
1143                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1144                                 pmboxq->vport = phba->pport;
1145                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1146                                                 MBX_NOWAIT);
1147 
1148                                 if (retval != MBX_BUSY &&
1149                                         retval != MBX_SUCCESS) {
1150                                         mempool_free(pmboxq,
1151                                                         phba->mbox_mem_pool);
1152                                         mod_timer(&phba->hb_tmofunc,
1153                                                 jiffies +
1154                                                 msecs_to_jiffies(1000 *
1155                                                 LPFC_HB_MBOX_INTERVAL));
1156                                         return;
1157                                 }
1158                                 phba->skipped_hb = 0;
1159                                 phba->hb_outstanding = 1;
1160                         } else if (time_before_eq(phba->last_completion_time,
1161                                         phba->skipped_hb)) {
1162                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1163                                         "2857 Last completion time not "
1164                                         " updated in %d ms\n",
1165                                         jiffies_to_msecs(jiffies
1166                                                  - phba->last_completion_time));
1167                         } else
1168                                 phba->skipped_hb = jiffies;
1169 
1170                         mod_timer(&phba->hb_tmofunc,
1171                                  jiffies +
1172                                  msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1173                         return;
1174                 } else {
1175                         /*
1176                         * If heart beat timeout called with hb_outstanding set
1177                         * we need to give the hb mailbox cmd a chance to
1178                         * complete or TMO.
1179                         */
1180                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1181                                         "0459 Adapter heartbeat still out"
1182                                         "standing:last compl time was %d ms.\n",
1183                                         jiffies_to_msecs(jiffies
1184                                                  - phba->last_completion_time));
1185                         mod_timer(&phba->hb_tmofunc,
1186                                 jiffies +
1187                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1188                 }
1189         }
1190 }
1191 
1192 /**
1193  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1194  * @phba: pointer to lpfc hba data structure.
1195  *
1196  * This routine is called to bring the HBA offline when HBA hardware error
1197  * other than Port Error 6 has been detected.
1198  **/
1199 static void
1200 lpfc_offline_eratt(struct lpfc_hba *phba)
1201 {
1202         struct lpfc_sli   *psli = &phba->sli;
1203 
1204         spin_lock_irq(&phba->hbalock);
1205         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1206         spin_unlock_irq(&phba->hbalock);
1207         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1208 
1209         lpfc_offline(phba);
1210         lpfc_reset_barrier(phba);
1211         spin_lock_irq(&phba->hbalock);
1212         lpfc_sli_brdreset(phba);
1213         spin_unlock_irq(&phba->hbalock);
1214         lpfc_hba_down_post(phba);
1215         lpfc_sli_brdready(phba, HS_MBRDY);
1216         lpfc_unblock_mgmt_io(phba);
1217         phba->link_state = LPFC_HBA_ERROR;
1218         return;
1219 }
1220 
1221 /**
1222  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1223  * @phba: pointer to lpfc hba data structure.
1224  *
1225  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1226  * other than Port Error 6 has been detected.
1227  **/
1228 void
1229 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1230 {
1231         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1232         lpfc_offline(phba);
1233         lpfc_sli4_brdreset(phba);
1234         lpfc_hba_down_post(phba);
1235         lpfc_sli4_post_status_check(phba);
1236         lpfc_unblock_mgmt_io(phba);
1237         phba->link_state = LPFC_HBA_ERROR;
1238 }
1239 
1240 /**
1241  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1242  * @phba: pointer to lpfc hba data structure.
1243  *
1244  * This routine is invoked to handle the deferred HBA hardware error
1245  * conditions. This type of error is indicated by HBA by setting ER1
1246  * and another ER bit in the host status register. The driver will
1247  * wait until the ER1 bit clears before handling the error condition.
1248  **/
1249 static void
1250 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1251 {
1252         uint32_t old_host_status = phba->work_hs;
1253         struct lpfc_sli_ring  *pring;
1254         struct lpfc_sli *psli = &phba->sli;
1255 
1256         /* If the pci channel is offline, ignore possible errors,
1257          * since we cannot communicate with the pci card anyway.
1258          */
1259         if (pci_channel_offline(phba->pcidev)) {
1260                 spin_lock_irq(&phba->hbalock);
1261                 phba->hba_flag &= ~DEFER_ERATT;
1262                 spin_unlock_irq(&phba->hbalock);
1263                 return;
1264         }
1265 
1266         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1267                 "0479 Deferred Adapter Hardware Error "
1268                 "Data: x%x x%x x%x\n",
1269                 phba->work_hs,
1270                 phba->work_status[0], phba->work_status[1]);
1271 
1272         spin_lock_irq(&phba->hbalock);
1273         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1274         spin_unlock_irq(&phba->hbalock);
1275 
1276 
1277         /*
1278          * Firmware stops when it triggred erratt. That could cause the I/Os
1279          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1280          * SCSI layer retry it after re-establishing link.
1281          */
1282         pring = &psli->ring[psli->fcp_ring];
1283         lpfc_sli_abort_iocb_ring(phba, pring);
1284 
1285         /*
1286          * There was a firmware error. Take the hba offline and then
1287          * attempt to restart it.
1288          */
1289         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1290         lpfc_offline(phba);
1291 
1292         /* Wait for the ER1 bit to clear.*/
1293         while (phba->work_hs & HS_FFER1) {
1294                 msleep(100);
1295                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1296                         phba->work_hs = UNPLUG_ERR ;
1297                         break;
1298                 }
1299                 /* If driver is unloading let the worker thread continue */
1300                 if (phba->pport->load_flag & FC_UNLOADING) {
1301                         phba->work_hs = 0;
1302                         break;
1303                 }
1304         }
1305 
1306         /*
1307          * This is to ptrotect against a race condition in which
1308          * first write to the host attention register clear the
1309          * host status register.
1310          */
1311         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1312                 phba->work_hs = old_host_status & ~HS_FFER1;
1313 
1314         spin_lock_irq(&phba->hbalock);
1315         phba->hba_flag &= ~DEFER_ERATT;
1316         spin_unlock_irq(&phba->hbalock);
1317         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1318         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1319 }
1320 
1321 static void
1322 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1323 {
1324         struct lpfc_board_event_header board_event;
1325         struct Scsi_Host *shost;
1326 
1327         board_event.event_type = FC_REG_BOARD_EVENT;
1328         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1329         shost = lpfc_shost_from_vport(phba->pport);
1330         fc_host_post_vendor_event(shost, fc_get_event_number(),
1331                                   sizeof(board_event),
1332                                   (char *) &board_event,
1333                                   LPFC_NL_VENDOR_ID);
1334 }
1335 
1336 /**
1337  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1338  * @phba: pointer to lpfc hba data structure.
1339  *
1340  * This routine is invoked to handle the following HBA hardware error
1341  * conditions:
1342  * 1 - HBA error attention interrupt
1343  * 2 - DMA ring index out of range
1344  * 3 - Mailbox command came back as unknown
1345  **/
1346 static void
1347 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1348 {
1349         struct lpfc_vport *vport = phba->pport;
1350         struct lpfc_sli   *psli = &phba->sli;
1351         struct lpfc_sli_ring  *pring;
1352         uint32_t event_data;
1353         unsigned long temperature;
1354         struct temp_event temp_event_data;
1355         struct Scsi_Host  *shost;
1356 
1357         /* If the pci channel is offline, ignore possible errors,
1358          * since we cannot communicate with the pci card anyway.
1359          */
1360         if (pci_channel_offline(phba->pcidev)) {
1361                 spin_lock_irq(&phba->hbalock);
1362                 phba->hba_flag &= ~DEFER_ERATT;
1363                 spin_unlock_irq(&phba->hbalock);
1364                 return;
1365         }
1366 
1367         /* If resets are disabled then leave the HBA alone and return */
1368         if (!phba->cfg_enable_hba_reset)
1369                 return;
1370 
1371         /* Send an internal error event to mgmt application */
1372         lpfc_board_errevt_to_mgmt(phba);
1373 
1374         if (phba->hba_flag & DEFER_ERATT)
1375                 lpfc_handle_deferred_eratt(phba);
1376 
1377         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1378                 if (phba->work_hs & HS_FFER6)
1379                         /* Re-establishing Link */
1380                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1381                                         "1301 Re-establishing Link "
1382                                         "Data: x%x x%x x%x\n",
1383                                         phba->work_hs, phba->work_status[0],
1384                                         phba->work_status[1]);
1385                 if (phba->work_hs & HS_FFER8)
1386                         /* Device Zeroization */
1387                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1388                                         "2861 Host Authentication device "
1389                                         "zeroization Data:x%x x%x x%x\n",
1390                                         phba->work_hs, phba->work_status[0],
1391                                         phba->work_status[1]);
1392 
1393                 spin_lock_irq(&phba->hbalock);
1394                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1395                 spin_unlock_irq(&phba->hbalock);
1396 
1397                 /*
1398                 * Firmware stops when it triggled erratt with HS_FFER6.
1399                 * That could cause the I/Os dropped by the firmware.
1400                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1401                 * retry it after re-establishing link.
1402                 */
1403                 pring = &psli->ring[psli->fcp_ring];
1404                 lpfc_sli_abort_iocb_ring(phba, pring);
1405 
1406                 /*
1407                  * There was a firmware error.  Take the hba offline and then
1408                  * attempt to restart it.
1409                  */
1410                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1411                 lpfc_offline(phba);
1412                 lpfc_sli_brdrestart(phba);
1413                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1414                         lpfc_unblock_mgmt_io(phba);
1415                         return;
1416                 }
1417                 lpfc_unblock_mgmt_io(phba);
1418         } else if (phba->work_hs & HS_CRIT_TEMP) {
1419                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1420                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1421                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1422                 temp_event_data.data = (uint32_t)temperature;
1423 
1424                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1425                                 "0406 Adapter maximum temperature exceeded "
1426                                 "(%ld), taking this port offline "
1427                                 "Data: x%x x%x x%x\n",
1428                                 temperature, phba->work_hs,
1429                                 phba->work_status[0], phba->work_status[1]);
1430 
1431                 shost = lpfc_shost_from_vport(phba->pport);
1432                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1433                                           sizeof(temp_event_data),
1434                                           (char *) &temp_event_data,
1435                                           SCSI_NL_VID_TYPE_PCI
1436                                           | PCI_VENDOR_ID_EMULEX);
1437 
1438                 spin_lock_irq(&phba->hbalock);
1439                 phba->over_temp_state = HBA_OVER_TEMP;
1440                 spin_unlock_irq(&phba->hbalock);
1441                 lpfc_offline_eratt(phba);
1442 
1443         } else {
1444                 /* The if clause above forces this code path when the status
1445                  * failure is a value other than FFER6. Do not call the offline
1446                  * twice. This is the adapter hardware error path.
1447                  */
1448                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1449                                 "0457 Adapter Hardware Error "
1450                                 "Data: x%x x%x x%x\n",
1451                                 phba->work_hs,
1452                                 phba->work_status[0], phba->work_status[1]);
1453 
1454                 event_data = FC_REG_DUMP_EVENT;
1455                 shost = lpfc_shost_from_vport(vport);
1456                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1457                                 sizeof(event_data), (char *) &event_data,
1458                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1459 
1460                 lpfc_offline_eratt(phba);
1461         }
1462         return;
1463 }
1464 
1465 /**
1466  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1467  * @phba: pointer to lpfc hba data structure.
1468  * @mbx_action: flag for mailbox shutdown action.
1469  *
1470  * This routine is invoked to perform an SLI4 port PCI function reset in
1471  * response to port status register polling attention. It waits for port
1472  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1473  * During this process, interrupt vectors are freed and later requested
1474  * for handling possible port resource change.
1475  **/
1476 static int
1477 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1478                             bool en_rn_msg)
1479 {
1480         int rc;
1481         uint32_t intr_mode;
1482 
1483         /*
1484          * On error status condition, driver need to wait for port
1485          * ready before performing reset.
1486          */
1487         rc = lpfc_sli4_pdev_status_reg_wait(phba);
1488         if (!rc) {
1489                 /* need reset: attempt for port recovery */
1490                 if (en_rn_msg)
1491                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1492                                         "2887 Reset Needed: Attempting Port "
1493                                         "Recovery...\n");
1494                 lpfc_offline_prep(phba, mbx_action);
1495                 lpfc_offline(phba);
1496                 /* release interrupt for possible resource change */
1497                 lpfc_sli4_disable_intr(phba);
1498                 lpfc_sli_brdrestart(phba);
1499                 /* request and enable interrupt */
1500                 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1501                 if (intr_mode == LPFC_INTR_ERROR) {
1502                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1503                                         "3175 Failed to enable interrupt\n");
1504                         return -EIO;
1505                 } else {
1506                         phba->intr_mode = intr_mode;
1507                 }
1508                 rc = lpfc_online(phba);
1509                 if (rc == 0)
1510                         lpfc_unblock_mgmt_io(phba);
1511         }
1512         return rc;
1513 }
1514 
1515 /**
1516  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1517  * @phba: pointer to lpfc hba data structure.
1518  *
1519  * This routine is invoked to handle the SLI4 HBA hardware error attention
1520  * conditions.
1521  **/
1522 static void
1523 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1524 {
1525         struct lpfc_vport *vport = phba->pport;
1526         uint32_t event_data;
1527         struct Scsi_Host *shost;
1528         uint32_t if_type;
1529         struct lpfc_register portstat_reg = {0};
1530         uint32_t reg_err1, reg_err2;
1531         uint32_t uerrlo_reg, uemasklo_reg;
1532         uint32_t pci_rd_rc1, pci_rd_rc2;
1533         bool en_rn_msg = true;
1534         int rc;
1535 
1536         /* If the pci channel is offline, ignore possible errors, since
1537          * we cannot communicate with the pci card anyway.
1538          */
1539         if (pci_channel_offline(phba->pcidev))
1540                 return;
1541         /* If resets are disabled then leave the HBA alone and return */
1542         if (!phba->cfg_enable_hba_reset)
1543                 return;
1544 
1545         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1546         switch (if_type) {
1547         case LPFC_SLI_INTF_IF_TYPE_0:
1548                 pci_rd_rc1 = lpfc_readl(
1549                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1550                                 &uerrlo_reg);
1551                 pci_rd_rc2 = lpfc_readl(
1552                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1553                                 &uemasklo_reg);
1554                 /* consider PCI bus read error as pci_channel_offline */
1555                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1556                         return;
1557                 lpfc_sli4_offline_eratt(phba);
1558                 break;
1559         case LPFC_SLI_INTF_IF_TYPE_2:
1560                 pci_rd_rc1 = lpfc_readl(
1561                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1562                                 &portstat_reg.word0);
1563                 /* consider PCI bus read error as pci_channel_offline */
1564                 if (pci_rd_rc1 == -EIO) {
1565                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1566                                 "3151 PCI bus read access failure: x%x\n",
1567                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1568                         return;
1569                 }
1570                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1571                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1572                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1573                         /* TODO: Register for Overtemp async events. */
1574                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1575                                 "2889 Port Overtemperature event, "
1576                                 "taking port offline\n");
1577                         spin_lock_irq(&phba->hbalock);
1578                         phba->over_temp_state = HBA_OVER_TEMP;
1579                         spin_unlock_irq(&phba->hbalock);
1580                         lpfc_sli4_offline_eratt(phba);
1581                         break;
1582                 }
1583                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1584                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1585                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1586                                         "3143 Port Down: Firmware Update "
1587                                         "Detected\n");
1588                         en_rn_msg = false;
1589                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1590                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1591                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1592                                         "3144 Port Down: Debug Dump\n");
1593                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1594                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1595                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1596                                         "3145 Port Down: Provisioning\n");
1597 
1598                 /* Check port status register for function reset */
1599                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1600                                 en_rn_msg);
1601                 if (rc == 0) {
1602                         /* don't report event on forced debug dump */
1603                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1604                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1605                                 return;
1606                         else
1607                                 break;
1608                 }
1609                 /* fall through for not able to recover */
1610                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1611                                 "3152 Unrecoverable error, bring the port "
1612                                 "offline\n");
1613                 lpfc_sli4_offline_eratt(phba);
1614                 break;
1615         case LPFC_SLI_INTF_IF_TYPE_1:
1616         default:
1617                 break;
1618         }
1619         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1620                         "3123 Report dump event to upper layer\n");
1621         /* Send an internal error event to mgmt application */
1622         lpfc_board_errevt_to_mgmt(phba);
1623 
1624         event_data = FC_REG_DUMP_EVENT;
1625         shost = lpfc_shost_from_vport(vport);
1626         fc_host_post_vendor_event(shost, fc_get_event_number(),
1627                                   sizeof(event_data), (char *) &event_data,
1628                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1629 }
1630 
1631 /**
1632  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1633  * @phba: pointer to lpfc HBA data structure.
1634  *
1635  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1636  * routine from the API jump table function pointer from the lpfc_hba struct.
1637  *
1638  * Return codes
1639  *   0 - success.
1640  *   Any other value - error.
1641  **/
1642 void
1643 lpfc_handle_eratt(struct lpfc_hba *phba)
1644 {
1645         (*phba->lpfc_handle_eratt)(phba);
1646 }
1647 
1648 /**
1649  * lpfc_handle_latt - The HBA link event handler
1650  * @phba: pointer to lpfc hba data structure.
1651  *
1652  * This routine is invoked from the worker thread to handle a HBA host
1653  * attention link event.
1654  **/
1655 void
1656 lpfc_handle_latt(struct lpfc_hba *phba)
1657 {
1658         struct lpfc_vport *vport = phba->pport;
1659         struct lpfc_sli   *psli = &phba->sli;
1660         LPFC_MBOXQ_t *pmb;
1661         volatile uint32_t control;
1662         struct lpfc_dmabuf *mp;
1663         int rc = 0;
1664 
1665         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1666         if (!pmb) {
1667                 rc = 1;
1668                 goto lpfc_handle_latt_err_exit;
1669         }
1670 
1671         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1672         if (!mp) {
1673                 rc = 2;
1674                 goto lpfc_handle_latt_free_pmb;
1675         }
1676 
1677         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1678         if (!mp->virt) {
1679                 rc = 3;
1680                 goto lpfc_handle_latt_free_mp;
1681         }
1682 
1683         /* Cleanup any outstanding ELS commands */
1684         lpfc_els_flush_all_cmd(phba);
1685 
1686         psli->slistat.link_event++;
1687         lpfc_read_topology(phba, pmb, mp);
1688         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1689         pmb->vport = vport;
1690         /* Block ELS IOCBs until we have processed this mbox command */
1691         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1692         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1693         if (rc == MBX_NOT_FINISHED) {
1694                 rc = 4;
1695                 goto lpfc_handle_latt_free_mbuf;
1696         }
1697 
1698         /* Clear Link Attention in HA REG */
1699         spin_lock_irq(&phba->hbalock);
1700         writel(HA_LATT, phba->HAregaddr);
1701         readl(phba->HAregaddr); /* flush */
1702         spin_unlock_irq(&phba->hbalock);
1703 
1704         return;
1705 
1706 lpfc_handle_latt_free_mbuf:
1707         phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1708         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1709 lpfc_handle_latt_free_mp:
1710         kfree(mp);
1711 lpfc_handle_latt_free_pmb:
1712         mempool_free(pmb, phba->mbox_mem_pool);
1713 lpfc_handle_latt_err_exit:
1714         /* Enable Link attention interrupts */
1715         spin_lock_irq(&phba->hbalock);
1716         psli->sli_flag |= LPFC_PROCESS_LA;
1717         control = readl(phba->HCregaddr);
1718         control |= HC_LAINT_ENA;
1719         writel(control, phba->HCregaddr);
1720         readl(phba->HCregaddr); /* flush */
1721 
1722         /* Clear Link Attention in HA REG */
1723         writel(HA_LATT, phba->HAregaddr);
1724         readl(phba->HAregaddr); /* flush */
1725         spin_unlock_irq(&phba->hbalock);
1726         lpfc_linkdown(phba);
1727         phba->link_state = LPFC_HBA_ERROR;
1728 
1729         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1730                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1731 
1732         return;
1733 }
1734 
1735 /**
1736  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1737  * @phba: pointer to lpfc hba data structure.
1738  * @vpd: pointer to the vital product data.
1739  * @len: length of the vital product data in bytes.
1740  *
1741  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1742  * an array of characters. In this routine, the ModelName, ProgramType, and
1743  * ModelDesc, etc. fields of the phba data structure will be populated.
1744  *
1745  * Return codes
1746  *   0 - pointer to the VPD passed in is NULL
1747  *   1 - success
1748  **/
1749 int
1750 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1751 {
1752         uint8_t lenlo, lenhi;
1753         int Length;
1754         int i, j;
1755         int finished = 0;
1756         int index = 0;
1757 
1758         if (!vpd)
1759                 return 0;
1760 
1761         /* Vital Product */
1762         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1763                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
1764                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1765                         (uint32_t) vpd[3]);
1766         while (!finished && (index < (len - 4))) {
1767                 switch (vpd[index]) {
1768                 case 0x82:
1769                 case 0x91:
1770                         index += 1;
1771                         lenlo = vpd[index];
1772                         index += 1;
1773                         lenhi = vpd[index];
1774                         index += 1;
1775                         i = ((((unsigned short)lenhi) << 8) + lenlo);
1776                         index += i;
1777                         break;
1778                 case 0x90:
1779                         index += 1;
1780                         lenlo = vpd[index];
1781                         index += 1;
1782                         lenhi = vpd[index];
1783                         index += 1;
1784                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
1785                         if (Length > len - index)
1786                                 Length = len - index;
1787                         while (Length > 0) {
1788                         /* Look for Serial Number */
1789                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1790                                 index += 2;
1791                                 i = vpd[index];
1792                                 index += 1;
1793                                 j = 0;
1794                                 Length -= (3+i);
1795                                 while(i--) {
1796                                         phba->SerialNumber[j++] = vpd[index++];
1797                                         if (j == 31)
1798                                                 break;
1799                                 }
1800                                 phba->SerialNumber[j] = 0;
1801                                 continue;
1802                         }
1803                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1804                                 phba->vpd_flag |= VPD_MODEL_DESC;
1805                                 index += 2;
1806                                 i = vpd[index];
1807                                 index += 1;
1808                                 j = 0;
1809                                 Length -= (3+i);
1810                                 while(i--) {
1811                                         phba->ModelDesc[j++] = vpd[index++];
1812                                         if (j == 255)
1813                                                 break;
1814                                 }
1815                                 phba->ModelDesc[j] = 0;
1816                                 continue;
1817                         }
1818                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1819                                 phba->vpd_flag |= VPD_MODEL_NAME;
1820                                 index += 2;
1821                                 i = vpd[index];
1822                                 index += 1;
1823                                 j = 0;
1824                                 Length -= (3+i);
1825                                 while(i--) {
1826                                         phba->ModelName[j++] = vpd[index++];
1827                                         if (j == 79)
1828                                                 break;
1829                                 }
1830                                 phba->ModelName[j] = 0;
1831                                 continue;
1832                         }
1833                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1834                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1835                                 index += 2;
1836                                 i = vpd[index];
1837                                 index += 1;
1838                                 j = 0;
1839                                 Length -= (3+i);
1840                                 while(i--) {
1841                                         phba->ProgramType[j++] = vpd[index++];
1842                                         if (j == 255)
1843                                                 break;
1844                                 }
1845                                 phba->ProgramType[j] = 0;
1846                                 continue;
1847                         }
1848                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1849                                 phba->vpd_flag |= VPD_PORT;
1850                                 index += 2;
1851                                 i = vpd[index];
1852                                 index += 1;
1853                                 j = 0;
1854                                 Length -= (3+i);
1855                                 while(i--) {
1856                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
1857                                             (phba->sli4_hba.pport_name_sta ==
1858                                              LPFC_SLI4_PPNAME_GET)) {
1859                                                 j++;
1860                                                 index++;
1861                                         } else
1862                                                 phba->Port[j++] = vpd[index++];
1863                                         if (j == 19)
1864                                                 break;
1865                                 }
1866                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1867                                     (phba->sli4_hba.pport_name_sta ==
1868                                      LPFC_SLI4_PPNAME_NON))
1869                                         phba->Port[j] = 0;
1870                                 continue;
1871                         }
1872                         else {
1873                                 index += 2;
1874                                 i = vpd[index];
1875                                 index += 1;
1876                                 index += i;
1877                                 Length -= (3 + i);
1878                         }
1879                 }
1880                 finished = 0;
1881                 break;
1882                 case 0x78:
1883                         finished = 1;
1884                         break;
1885                 default:
1886                         index ++;
1887                         break;
1888                 }
1889         }
1890 
1891         return(1);
1892 }
1893 
1894 /**
1895  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1896  * @phba: pointer to lpfc hba data structure.
1897  * @mdp: pointer to the data structure to hold the derived model name.
1898  * @descp: pointer to the data structure to hold the derived description.
1899  *
1900  * This routine retrieves HBA's description based on its registered PCI device
1901  * ID. The @descp passed into this function points to an array of 256 chars. It
1902  * shall be returned with the model name, maximum speed, and the host bus type.
1903  * The @mdp passed into this function points to an array of 80 chars. When the
1904  * function returns, the @mdp will be filled with the model name.
1905  **/
1906 static void
1907 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1908 {
1909         lpfc_vpd_t *vp;
1910         uint16_t dev_id = phba->pcidev->device;
1911         int max_speed;
1912         int GE = 0;
1913         int oneConnect = 0; /* default is not a oneConnect */
1914         struct {
1915                 char *name;
1916                 char *bus;
1917                 char *function;
1918         } m = {"<Unknown>", "", ""};
1919 
1920         if (mdp && mdp[0] != '\0'
1921                 && descp && descp[0] != '\0')
1922                 return;
1923 
1924         if (phba->lmt & LMT_16Gb)
1925                 max_speed = 16;
1926         else if (phba->lmt & LMT_10Gb)
1927                 max_speed = 10;
1928         else if (phba->lmt & LMT_8Gb)
1929                 max_speed = 8;
1930         else if (phba->lmt & LMT_4Gb)
1931                 max_speed = 4;
1932         else if (phba->lmt & LMT_2Gb)
1933                 max_speed = 2;
1934         else if (phba->lmt & LMT_1Gb)
1935                 max_speed = 1;
1936         else
1937                 max_speed = 0;
1938 
1939         vp = &phba->vpd;
1940 
1941         switch (dev_id) {
1942         case PCI_DEVICE_ID_FIREFLY:
1943                 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1944                 break;
1945         case PCI_DEVICE_ID_SUPERFLY:
1946                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1947                         m = (typeof(m)){"LP7000", "PCI",
1948                                         "Fibre Channel Adapter"};
1949                 else
1950                         m = (typeof(m)){"LP7000E", "PCI",
1951                                         "Fibre Channel Adapter"};
1952                 break;
1953         case PCI_DEVICE_ID_DRAGONFLY:
1954                 m = (typeof(m)){"LP8000", "PCI",
1955                                 "Fibre Channel Adapter"};
1956                 break;
1957         case PCI_DEVICE_ID_CENTAUR:
1958                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1959                         m = (typeof(m)){"LP9002", "PCI",
1960                                         "Fibre Channel Adapter"};
1961                 else
1962                         m = (typeof(m)){"LP9000", "PCI",
1963                                         "Fibre Channel Adapter"};
1964                 break;
1965         case PCI_DEVICE_ID_RFLY:
1966                 m = (typeof(m)){"LP952", "PCI",
1967                                 "Fibre Channel Adapter"};
1968                 break;
1969         case PCI_DEVICE_ID_PEGASUS:
1970                 m = (typeof(m)){"LP9802", "PCI-X",
1971                                 "Fibre Channel Adapter"};
1972                 break;
1973         case PCI_DEVICE_ID_THOR:
1974                 m = (typeof(m)){"LP10000", "PCI-X",
1975                                 "Fibre Channel Adapter"};
1976                 break;
1977         case PCI_DEVICE_ID_VIPER:
1978                 m = (typeof(m)){"LPX1000",  "PCI-X",
1979                                 "Fibre Channel Adapter"};
1980                 break;
1981         case PCI_DEVICE_ID_PFLY:
1982                 m = (typeof(m)){"LP982", "PCI-X",
1983                                 "Fibre Channel Adapter"};
1984                 break;
1985         case PCI_DEVICE_ID_TFLY:
1986                 m = (typeof(m)){"LP1050", "PCI-X",
1987                                 "Fibre Channel Adapter"};
1988                 break;
1989         case PCI_DEVICE_ID_HELIOS:
1990                 m = (typeof(m)){"LP11000", "PCI-X2",
1991                                 "Fibre Channel Adapter"};
1992                 break;
1993         case PCI_DEVICE_ID_HELIOS_SCSP:
1994                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1995                                 "Fibre Channel Adapter"};
1996                 break;
1997         case PCI_DEVICE_ID_HELIOS_DCSP:
1998                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1999                                 "Fibre Channel Adapter"};
2000                 break;
2001         case PCI_DEVICE_ID_NEPTUNE:
2002                 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
2003                 break;
2004         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2005                 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
2006                 break;
2007         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2008                 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
2009                 break;
2010         case PCI_DEVICE_ID_BMID:
2011                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2012                 break;
2013         case PCI_DEVICE_ID_BSMB:
2014                 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
2015                 break;
2016         case PCI_DEVICE_ID_ZEPHYR:
2017                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2018                 break;
2019         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2020                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2021                 break;
2022         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2023                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2024                 GE = 1;
2025                 break;
2026         case PCI_DEVICE_ID_ZMID:
2027                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2028                 break;
2029         case PCI_DEVICE_ID_ZSMB:
2030                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2031                 break;
2032         case PCI_DEVICE_ID_LP101:
2033                 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
2034                 break;
2035         case PCI_DEVICE_ID_LP10000S:
2036                 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
2037                 break;
2038         case PCI_DEVICE_ID_LP11000S:
2039                 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
2040                 break;
2041         case PCI_DEVICE_ID_LPE11000S:
2042                 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
2043                 break;
2044         case PCI_DEVICE_ID_SAT:
2045                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2046                 break;
2047         case PCI_DEVICE_ID_SAT_MID:
2048                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2049                 break;
2050         case PCI_DEVICE_ID_SAT_SMB:
2051                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2052                 break;
2053         case PCI_DEVICE_ID_SAT_DCSP:
2054                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2055                 break;
2056         case PCI_DEVICE_ID_SAT_SCSP:
2057                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2058                 break;
2059         case PCI_DEVICE_ID_SAT_S:
2060                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2061                 break;
2062         case PCI_DEVICE_ID_HORNET:
2063                 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
2064                 GE = 1;
2065                 break;
2066         case PCI_DEVICE_ID_PROTEUS_VF:
2067                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2068                                 "Fibre Channel Adapter"};
2069                 break;
2070         case PCI_DEVICE_ID_PROTEUS_PF:
2071                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2072                                 "Fibre Channel Adapter"};
2073                 break;
2074         case PCI_DEVICE_ID_PROTEUS_S:
2075                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2076                                 "Fibre Channel Adapter"};
2077                 break;
2078         case PCI_DEVICE_ID_TIGERSHARK:
2079                 oneConnect = 1;
2080                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2081                 break;
2082         case PCI_DEVICE_ID_TOMCAT:
2083                 oneConnect = 1;
2084                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2085                 break;
2086         case PCI_DEVICE_ID_FALCON:
2087                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2088                                 "EmulexSecure Fibre"};
2089                 break;
2090         case PCI_DEVICE_ID_BALIUS:
2091                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2092                                 "Fibre Channel Adapter"};
2093                 break;
2094         case PCI_DEVICE_ID_LANCER_FC:
2095         case PCI_DEVICE_ID_LANCER_FC_VF:
2096                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2097                 break;
2098         case PCI_DEVICE_ID_LANCER_FCOE:
2099         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2100                 oneConnect = 1;
2101                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2102                 break;
2103         case PCI_DEVICE_ID_SKYHAWK:
2104         case PCI_DEVICE_ID_SKYHAWK_VF:
2105                 oneConnect = 1;
2106                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2107                 break;
2108         default:
2109                 m = (typeof(m)){"Unknown", "", ""};
2110                 break;
2111         }
2112 
2113         if (mdp && mdp[0] == '\0')
2114                 snprintf(mdp, 79,"%s", m.name);
2115         /*
2116          * oneConnect hba requires special processing, they are all initiators
2117          * and we put the port number on the end
2118          */
2119         if (descp && descp[0] == '\0') {
2120                 if (oneConnect)
2121                         snprintf(descp, 255,
2122                                 "Emulex OneConnect %s, %s Initiator %s",
2123                                 m.name, m.function,
2124                                 phba->Port);
2125                 else if (max_speed == 0)
2126                         snprintf(descp, 255,
2127                                 "Emulex %s %s %s ",
2128                                 m.name, m.bus, m.function);
2129                 else
2130                         snprintf(descp, 255,
2131                                 "Emulex %s %d%s %s %s",
2132                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2133                                 m.bus, m.function);
2134         }
2135 }
2136 
2137 /**
2138  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2139  * @phba: pointer to lpfc hba data structure.
2140  * @pring: pointer to a IOCB ring.
2141  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2142  *
2143  * This routine posts a given number of IOCBs with the associated DMA buffer
2144  * descriptors specified by the cnt argument to the given IOCB ring.
2145  *
2146  * Return codes
2147  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2148  **/
2149 int
2150 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2151 {
2152         IOCB_t *icmd;
2153         struct lpfc_iocbq *iocb;
2154         struct lpfc_dmabuf *mp1, *mp2;
2155 
2156         cnt += pring->missbufcnt;
2157 
2158         /* While there are buffers to post */
2159         while (cnt > 0) {
2160                 /* Allocate buffer for  command iocb */
2161                 iocb = lpfc_sli_get_iocbq(phba);
2162                 if (iocb == NULL) {
2163                         pring->missbufcnt = cnt;
2164                         return cnt;
2165                 }
2166                 icmd = &iocb->iocb;
2167 
2168                 /* 2 buffers can be posted per command */
2169                 /* Allocate buffer to post */
2170                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2171                 if (mp1)
2172                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2173                 if (!mp1 || !mp1->virt) {
2174                         kfree(mp1);
2175                         lpfc_sli_release_iocbq(phba, iocb);
2176                         pring->missbufcnt = cnt;
2177                         return cnt;
2178                 }
2179 
2180                 INIT_LIST_HEAD(&mp1->list);
2181                 /* Allocate buffer to post */
2182                 if (cnt > 1) {
2183                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2184                         if (mp2)
2185                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2186                                                             &mp2->phys);
2187                         if (!mp2 || !mp2->virt) {
2188                                 kfree(mp2);
2189                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2190                                 kfree(mp1);
2191                                 lpfc_sli_release_iocbq(phba, iocb);
2192                                 pring->missbufcnt = cnt;
2193                                 return cnt;
2194                         }
2195 
2196                         INIT_LIST_HEAD(&mp2->list);
2197                 } else {
2198                         mp2 = NULL;
2199                 }
2200 
2201                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2202                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2203                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2204                 icmd->ulpBdeCount = 1;
2205                 cnt--;
2206                 if (mp2) {
2207                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2208                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2209                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2210                         cnt--;
2211                         icmd->ulpBdeCount = 2;
2212                 }
2213 
2214                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2215                 icmd->ulpLe = 1;
2216 
2217                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2218                     IOCB_ERROR) {
2219                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2220                         kfree(mp1);
2221                         cnt++;
2222                         if (mp2) {
2223                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2224                                 kfree(mp2);
2225                                 cnt++;
2226                         }
2227                         lpfc_sli_release_iocbq(phba, iocb);
2228                         pring->missbufcnt = cnt;
2229                         return cnt;
2230                 }
2231                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2232                 if (mp2)
2233                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2234         }
2235         pring->missbufcnt = 0;
2236         return 0;
2237 }
2238 
2239 /**
2240  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2241  * @phba: pointer to lpfc hba data structure.
2242  *
2243  * This routine posts initial receive IOCB buffers to the ELS ring. The
2244  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2245  * set to 64 IOCBs.
2246  *
2247  * Return codes
2248  *   0 - success (currently always success)
2249  **/
2250 static int
2251 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2252 {
2253         struct lpfc_sli *psli = &phba->sli;
2254 
2255         /* Ring 0, ELS / CT buffers */
2256         lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2257         /* Ring 2 - FCP no buffers needed */
2258 
2259         return 0;
2260 }
2261 
2262 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2263 
2264 /**
2265  * lpfc_sha_init - Set up initial array of hash table entries
2266  * @HashResultPointer: pointer to an array as hash table.
2267  *
2268  * This routine sets up the initial values to the array of hash table entries
2269  * for the LC HBAs.
2270  **/
2271 static void
2272 lpfc_sha_init(uint32_t * HashResultPointer)
2273 {
2274         HashResultPointer[0] = 0x67452301;
2275         HashResultPointer[1] = 0xEFCDAB89;
2276         HashResultPointer[2] = 0x98BADCFE;
2277         HashResultPointer[3] = 0x10325476;
2278         HashResultPointer[4] = 0xC3D2E1F0;
2279 }
2280 
2281 /**
2282  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2283  * @HashResultPointer: pointer to an initial/result hash table.
2284  * @HashWorkingPointer: pointer to an working hash table.
2285  *
2286  * This routine iterates an initial hash table pointed by @HashResultPointer
2287  * with the values from the working hash table pointeed by @HashWorkingPointer.
2288  * The results are putting back to the initial hash table, returned through
2289  * the @HashResultPointer as the result hash table.
2290  **/
2291 static void
2292 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2293 {
2294         int t;
2295         uint32_t TEMP;
2296         uint32_t A, B, C, D, E;
2297         t = 16;
2298         do {
2299                 HashWorkingPointer[t] =
2300                     S(1,
2301                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2302                                                                      8] ^
2303                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2304         } while (++t <= 79);
2305         t = 0;
2306         A = HashResultPointer[0];
2307         B = HashResultPointer[1];
2308         C = HashResultPointer[2];
2309         D = HashResultPointer[3];
2310         E = HashResultPointer[4];
2311 
2312         do {
2313                 if (t < 20) {
2314                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2315                 } else if (t < 40) {
2316                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2317                 } else if (t < 60) {
2318                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2319                 } else {
2320                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2321                 }
2322                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2323                 E = D;
2324                 D = C;
2325                 C = S(30, B);
2326                 B = A;
2327                 A = TEMP;
2328         } while (++t <= 79);
2329 
2330         HashResultPointer[0] += A;
2331         HashResultPointer[1] += B;
2332         HashResultPointer[2] += C;
2333         HashResultPointer[3] += D;
2334         HashResultPointer[4] += E;
2335 
2336 }
2337 
2338 /**
2339  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2340  * @RandomChallenge: pointer to the entry of host challenge random number array.
2341  * @HashWorking: pointer to the entry of the working hash array.
2342  *
2343  * This routine calculates the working hash array referred by @HashWorking
2344  * from the challenge random numbers associated with the host, referred by
2345  * @RandomChallenge. The result is put into the entry of the working hash
2346  * array and returned by reference through @HashWorking.
2347  **/
2348 static void
2349 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2350 {
2351         *HashWorking = (*RandomChallenge ^ *HashWorking);
2352 }
2353 
2354 /**
2355  * lpfc_hba_init - Perform special handling for LC HBA initialization
2356  * @phba: pointer to lpfc hba data structure.
2357  * @hbainit: pointer to an array of unsigned 32-bit integers.
2358  *
2359  * This routine performs the special handling for LC HBA initialization.
2360  **/
2361 void
2362 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2363 {
2364         int t;
2365         uint32_t *HashWorking;
2366         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2367 
2368         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2369         if (!HashWorking)
2370                 return;
2371 
2372         HashWorking[0] = HashWorking[78] = *pwwnn++;
2373         HashWorking[1] = HashWorking[79] = *pwwnn;
2374 
2375         for (t = 0; t < 7; t++)
2376                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2377 
2378         lpfc_sha_init(hbainit);
2379         lpfc_sha_iterate(hbainit, HashWorking);
2380         kfree(HashWorking);
2381 }
2382 
2383 /**
2384  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2385  * @vport: pointer to a virtual N_Port data structure.
2386  *
2387  * This routine performs the necessary cleanups before deleting the @vport.
2388  * It invokes the discovery state machine to perform necessary state
2389  * transitions and to release the ndlps associated with the @vport. Note,
2390  * the physical port is treated as @vport 0.
2391  **/
2392 void
2393 lpfc_cleanup(struct lpfc_vport *vport)
2394 {
2395         struct lpfc_hba   *phba = vport->phba;
2396         struct lpfc_nodelist *ndlp, *next_ndlp;
2397         int i = 0;
2398 
2399         if (phba->link_state > LPFC_LINK_DOWN)
2400                 lpfc_port_link_failure(vport);
2401 
2402         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2403                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2404                         ndlp = lpfc_enable_node(vport, ndlp,
2405                                                 NLP_STE_UNUSED_NODE);
2406                         if (!ndlp)
2407                                 continue;
2408                         spin_lock_irq(&phba->ndlp_lock);
2409                         NLP_SET_FREE_REQ(ndlp);
2410                         spin_unlock_irq(&phba->ndlp_lock);
2411                         /* Trigger the release of the ndlp memory */
2412                         lpfc_nlp_put(ndlp);
2413                         continue;
2414                 }
2415                 spin_lock_irq(&phba->ndlp_lock);
2416                 if (NLP_CHK_FREE_REQ(ndlp)) {
2417                         /* The ndlp should not be in memory free mode already */
2418                         spin_unlock_irq(&phba->ndlp_lock);
2419                         continue;
2420                 } else
2421                         /* Indicate request for freeing ndlp memory */
2422                         NLP_SET_FREE_REQ(ndlp);
2423                 spin_unlock_irq(&phba->ndlp_lock);
2424 
2425                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2426                     ndlp->nlp_DID == Fabric_DID) {
2427                         /* Just free up ndlp with Fabric_DID for vports */
2428                         lpfc_nlp_put(ndlp);
2429                         continue;
2430                 }
2431 
2432                 /* take care of nodes in unused state before the state
2433                  * machine taking action.
2434                  */
2435                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2436                         lpfc_nlp_put(ndlp);
2437                         continue;
2438                 }
2439 
2440                 if (ndlp->nlp_type & NLP_FABRIC)
2441                         lpfc_disc_state_machine(vport, ndlp, NULL,
2442                                         NLP_EVT_DEVICE_RECOVERY);
2443 
2444                 lpfc_disc_state_machine(vport, ndlp, NULL,
2445                                              NLP_EVT_DEVICE_RM);
2446         }
2447 
2448         /* At this point, ALL ndlp's should be gone
2449          * because of the previous NLP_EVT_DEVICE_RM.
2450          * Lets wait for this to happen, if needed.
2451          */
2452         while (!list_empty(&vport->fc_nodes)) {
2453                 if (i++ > 3000) {
2454                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2455                                 "0233 Nodelist not empty\n");
2456                         list_for_each_entry_safe(ndlp, next_ndlp,
2457                                                 &vport->fc_nodes, nlp_listp) {
2458                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2459                                                 LOG_NODE,
2460                                                 "0282 did:x%x ndlp:x%p "
2461                                                 "usgmap:x%x refcnt:%d\n",
2462                                                 ndlp->nlp_DID, (void *)ndlp,
2463                                                 ndlp->nlp_usg_map,
2464                                                 atomic_read(
2465                                                         &ndlp->kref.refcount));
2466                         }
2467                         break;
2468                 }
2469 
2470                 /* Wait for any activity on ndlps to settle */
2471                 msleep(10);
2472         }
2473         lpfc_cleanup_vports_rrqs(vport, NULL);
2474 }
2475 
2476 /**
2477  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2478  * @vport: pointer to a virtual N_Port data structure.
2479  *
2480  * This routine stops all the timers associated with a @vport. This function
2481  * is invoked before disabling or deleting a @vport. Note that the physical
2482  * port is treated as @vport 0.
2483  **/
2484 void
2485 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2486 {
2487         del_timer_sync(&vport->els_tmofunc);
2488         del_timer_sync(&vport->fc_fdmitmo);
2489         del_timer_sync(&vport->delayed_disc_tmo);
2490         lpfc_can_disctmo(vport);
2491         return;
2492 }
2493 
2494 /**
2495  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2496  * @phba: pointer to lpfc hba data structure.
2497  *
2498  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2499  * caller of this routine should already hold the host lock.
2500  **/
2501 void
2502 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2503 {
2504         /* Clear pending FCF rediscovery wait flag */
2505         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2506 
2507         /* Now, try to stop the timer */
2508         del_timer(&phba->fcf.redisc_wait);
2509 }
2510 
2511 /**
2512  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2513  * @phba: pointer to lpfc hba data structure.
2514  *
2515  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2516  * checks whether the FCF rediscovery wait timer is pending with the host
2517  * lock held before proceeding with disabling the timer and clearing the
2518  * wait timer pendig flag.
2519  **/
2520 void
2521 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2522 {
2523         spin_lock_irq(&phba->hbalock);
2524         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2525                 /* FCF rediscovery timer already fired or stopped */
2526                 spin_unlock_irq(&phba->hbalock);
2527                 return;
2528         }
2529         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2530         /* Clear failover in progress flags */
2531         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2532         spin_unlock_irq(&phba->hbalock);
2533 }
2534 
2535 /**
2536  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2537  * @phba: pointer to lpfc hba data structure.
2538  *
2539  * This routine stops all the timers associated with a HBA. This function is
2540  * invoked before either putting a HBA offline or unloading the driver.
2541  **/
2542 void
2543 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2544 {
2545         lpfc_stop_vport_timers(phba->pport);
2546         del_timer_sync(&phba->sli.mbox_tmo);
2547         del_timer_sync(&phba->fabric_block_timer);
2548         del_timer_sync(&phba->eratt_poll);
2549         del_timer_sync(&phba->hb_tmofunc);
2550         if (phba->sli_rev == LPFC_SLI_REV4) {
2551                 del_timer_sync(&phba->rrq_tmr);
2552                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2553         }
2554         phba->hb_outstanding = 0;
2555 
2556         switch (phba->pci_dev_grp) {
2557         case LPFC_PCI_DEV_LP:
2558                 /* Stop any LightPulse device specific driver timers */
2559                 del_timer_sync(&phba->fcp_poll_timer);
2560                 break;
2561         case LPFC_PCI_DEV_OC:
2562                 /* Stop any OneConnect device sepcific driver timers */
2563                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2564                 break;
2565         default:
2566                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2567                                 "0297 Invalid device group (x%x)\n",
2568                                 phba->pci_dev_grp);
2569                 break;
2570         }
2571         return;
2572 }
2573 
2574 /**
2575  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2576  * @phba: pointer to lpfc hba data structure.
2577  *
2578  * This routine marks a HBA's management interface as blocked. Once the HBA's
2579  * management interface is marked as blocked, all the user space access to
2580  * the HBA, whether they are from sysfs interface or libdfc interface will
2581  * all be blocked. The HBA is set to block the management interface when the
2582  * driver prepares the HBA interface for online or offline.
2583  **/
2584 static void
2585 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2586 {
2587         unsigned long iflag;
2588         uint8_t actcmd = MBX_HEARTBEAT;
2589         unsigned long timeout;
2590 
2591         spin_lock_irqsave(&phba->hbalock, iflag);
2592         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2593         spin_unlock_irqrestore(&phba->hbalock, iflag);
2594         if (mbx_action == LPFC_MBX_NO_WAIT)
2595                 return;
2596         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2597         spin_lock_irqsave(&phba->hbalock, iflag);
2598         if (phba->sli.mbox_active) {
2599                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2600                 /* Determine how long we might wait for the active mailbox
2601                  * command to be gracefully completed by firmware.
2602                  */
2603                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2604                                 phba->sli.mbox_active) * 1000) + jiffies;
2605         }
2606         spin_unlock_irqrestore(&phba->hbalock, iflag);
2607 
2608         /* Wait for the outstnading mailbox command to complete */
2609         while (phba->sli.mbox_active) {
2610                 /* Check active mailbox complete status every 2ms */
2611                 msleep(2);
2612                 if (time_after(jiffies, timeout)) {
2613                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2614                                 "2813 Mgmt IO is Blocked %x "
2615                                 "- mbox cmd %x still active\n",
2616                                 phba->sli.sli_flag, actcmd);
2617                         break;
2618                 }
2619         }
2620 }
2621 
2622 /**
2623  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2624  * @phba: pointer to lpfc hba data structure.
2625  *
2626  * Allocate RPIs for all active remote nodes. This is needed whenever
2627  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2628  * is to fixup the temporary rpi assignments.
2629  **/
2630 void
2631 lpfc_sli4_node_prep(struct lpfc_hba *phba)
2632 {
2633         struct lpfc_nodelist  *ndlp, *next_ndlp;
2634         struct lpfc_vport **vports;
2635         int i;
2636 
2637         if (phba->sli_rev != LPFC_SLI_REV4)
2638                 return;
2639 
2640         vports = lpfc_create_vport_work_array(phba);
2641         if (vports != NULL) {
2642                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2643                         if (vports[i]->load_flag & FC_UNLOADING)
2644                                 continue;
2645 
2646                         list_for_each_entry_safe(ndlp, next_ndlp,
2647                                                  &vports[i]->fc_nodes,
2648                                                  nlp_listp) {
2649                                 if (NLP_CHK_NODE_ACT(ndlp))
2650                                         ndlp->nlp_rpi =
2651                                                 lpfc_sli4_alloc_rpi(phba);
2652                         }
2653                 }
2654         }
2655         lpfc_destroy_vport_work_array(phba, vports);
2656 }
2657 
2658 /**
2659  * lpfc_online - Initialize and bring a HBA online
2660  * @phba: pointer to lpfc hba data structure.
2661  *
2662  * This routine initializes the HBA and brings a HBA online. During this
2663  * process, the management interface is blocked to prevent user space access
2664  * to the HBA interfering with the driver initialization.
2665  *
2666  * Return codes
2667  *   0 - successful
2668  *   1 - failed
2669  **/
2670 int
2671 lpfc_online(struct lpfc_hba *phba)
2672 {
2673         struct lpfc_vport *vport;
2674         struct lpfc_vport **vports;
2675         int i;
2676         bool vpis_cleared = false;
2677 
2678         if (!phba)
2679                 return 0;
2680         vport = phba->pport;
2681 
2682         if (!(vport->fc_flag & FC_OFFLINE_MODE))
2683                 return 0;
2684 
2685         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2686                         "0458 Bring Adapter online\n");
2687 
2688         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2689 
2690         if (!lpfc_sli_queue_setup(phba)) {
2691                 lpfc_unblock_mgmt_io(phba);
2692                 return 1;
2693         }
2694 
2695         if (phba->sli_rev == LPFC_SLI_REV4) {
2696                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2697                         lpfc_unblock_mgmt_io(phba);
2698                         return 1;
2699                 }
2700                 spin_lock_irq(&phba->hbalock);
2701                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2702                         vpis_cleared = true;
2703                 spin_unlock_irq(&phba->hbalock);
2704         } else {
2705                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2706                         lpfc_unblock_mgmt_io(phba);
2707                         return 1;
2708                 }
2709         }
2710 
2711         vports = lpfc_create_vport_work_array(phba);
2712         if (vports != NULL)
2713                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2714                         struct Scsi_Host *shost;
2715                         shost = lpfc_shost_from_vport(vports[i]);
2716                         spin_lock_irq(shost->host_lock);
2717                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2718                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2719                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2720                         if (phba->sli_rev == LPFC_SLI_REV4) {
2721                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2722                                 if ((vpis_cleared) &&
2723                                     (vports[i]->port_type !=
2724                                         LPFC_PHYSICAL_PORT))
2725                                         vports[i]->vpi = 0;
2726                         }
2727                         spin_unlock_irq(shost->host_lock);
2728                 }
2729                 lpfc_destroy_vport_work_array(phba, vports);
2730 
2731         lpfc_unblock_mgmt_io(phba);
2732         return 0;
2733 }
2734 
2735 /**
2736  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2737  * @phba: pointer to lpfc hba data structure.
2738  *
2739  * This routine marks a HBA's management interface as not blocked. Once the
2740  * HBA's management interface is marked as not blocked, all the user space
2741  * access to the HBA, whether they are from sysfs interface or libdfc
2742  * interface will be allowed. The HBA is set to block the management interface
2743  * when the driver prepares the HBA interface for online or offline and then
2744  * set to unblock the management interface afterwards.
2745  **/
2746 void
2747 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2748 {
2749         unsigned long iflag;
2750 
2751         spin_lock_irqsave(&phba->hbalock, iflag);
2752         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2753         spin_unlock_irqrestore(&phba->hbalock, iflag);
2754 }
2755 
2756 /**
2757  * lpfc_offline_prep - Prepare a HBA to be brought offline
2758  * @phba: pointer to lpfc hba data structure.
2759  *
2760  * This routine is invoked to prepare a HBA to be brought offline. It performs
2761  * unregistration login to all the nodes on all vports and flushes the mailbox
2762  * queue to make it ready to be brought offline.
2763  **/
2764 void
2765 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2766 {
2767         struct lpfc_vport *vport = phba->pport;
2768         struct lpfc_nodelist  *ndlp, *next_ndlp;
2769         struct lpfc_vport **vports;
2770         struct Scsi_Host *shost;
2771         int i;
2772 
2773         if (vport->fc_flag & FC_OFFLINE_MODE)
2774                 return;
2775 
2776         lpfc_block_mgmt_io(phba, mbx_action);
2777 
2778         lpfc_linkdown(phba);
2779 
2780         /* Issue an unreg_login to all nodes on all vports */
2781         vports = lpfc_create_vport_work_array(phba);
2782         if (vports != NULL) {
2783                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2784                         if (vports[i]->load_flag & FC_UNLOADING)
2785                                 continue;
2786                         shost = lpfc_shost_from_vport(vports[i]);
2787                         spin_lock_irq(shost->host_lock);
2788                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2789                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2790                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2791                         spin_unlock_irq(shost->host_lock);
2792 
2793                         shost = lpfc_shost_from_vport(vports[i]);
2794                         list_for_each_entry_safe(ndlp, next_ndlp,
2795                                                  &vports[i]->fc_nodes,
2796                                                  nlp_listp) {
2797                                 if (!NLP_CHK_NODE_ACT(ndlp))
2798                                         continue;
2799                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2800                                         continue;
2801                                 if (ndlp->nlp_type & NLP_FABRIC) {
2802                                         lpfc_disc_state_machine(vports[i], ndlp,
2803                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
2804                                         lpfc_disc_state_machine(vports[i], ndlp,
2805                                                 NULL, NLP_EVT_DEVICE_RM);
2806                                 }
2807                                 spin_lock_irq(shost->host_lock);
2808                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2809                                 spin_unlock_irq(shost->host_lock);
2810                                 /*
2811                                  * Whenever an SLI4 port goes offline, free the
2812                                  * RPI. Get a new RPI when the adapter port
2813                                  * comes back online.
2814                                  */
2815                                 if (phba->sli_rev == LPFC_SLI_REV4)
2816                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2817                                 lpfc_unreg_rpi(vports[i], ndlp);
2818                         }
2819                 }
2820         }
2821         lpfc_destroy_vport_work_array(phba, vports);
2822 
2823         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2824 }
2825 
2826 /**
2827  * lpfc_offline - Bring a HBA offline
2828  * @phba: pointer to lpfc hba data structure.
2829  *
2830  * This routine actually brings a HBA offline. It stops all the timers
2831  * associated with the HBA, brings down the SLI layer, and eventually
2832  * marks the HBA as in offline state for the upper layer protocol.
2833  **/
2834 void
2835 lpfc_offline(struct lpfc_hba *phba)
2836 {
2837         struct Scsi_Host  *shost;
2838         struct lpfc_vport **vports;
2839         int i;
2840 
2841         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2842                 return;
2843 
2844         /* stop port and all timers associated with this hba */
2845         lpfc_stop_port(phba);
2846         vports = lpfc_create_vport_work_array(phba);
2847         if (vports != NULL)
2848                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2849                         lpfc_stop_vport_timers(vports[i]);
2850         lpfc_destroy_vport_work_array(phba, vports);
2851         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2852                         "0460 Bring Adapter offline\n");
2853         /* Bring down the SLI Layer and cleanup.  The HBA is offline
2854            now.  */
2855         lpfc_sli_hba_down(phba);
2856         spin_lock_irq(&phba->hbalock);
2857         phba->work_ha = 0;
2858         spin_unlock_irq(&phba->hbalock);
2859         vports = lpfc_create_vport_work_array(phba);
2860         if (vports != NULL)
2861                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2862                         shost = lpfc_shost_from_vport(vports[i]);
2863                         spin_lock_irq(shost->host_lock);
2864                         vports[i]->work_port_events = 0;
2865                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
2866                         spin_unlock_irq(shost->host_lock);
2867                 }
2868         lpfc_destroy_vport_work_array(phba, vports);
2869 }
2870 
2871 /**
2872  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2873  * @phba: pointer to lpfc hba data structure.
2874  *
2875  * This routine is to free all the SCSI buffers and IOCBs from the driver
2876  * list back to kernel. It is called from lpfc_pci_remove_one to free
2877  * the internal resources before the device is removed from the system.
2878  **/
2879 static void
2880 lpfc_scsi_free(struct lpfc_hba *phba)
2881 {
2882         struct lpfc_scsi_buf *sb, *sb_next;
2883         struct lpfc_iocbq *io, *io_next;
2884 
2885         spin_lock_irq(&phba->hbalock);
2886 
2887         /* Release all the lpfc_scsi_bufs maintained by this host. */
2888 
2889         spin_lock(&phba->scsi_buf_list_put_lock);
2890         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2891                                  list) {
2892                 list_del(&sb->list);
2893                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2894                               sb->dma_handle);
2895                 kfree(sb);
2896                 phba->total_scsi_bufs--;
2897         }
2898         spin_unlock(&phba->scsi_buf_list_put_lock);
2899 
2900         spin_lock(&phba->scsi_buf_list_get_lock);
2901         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2902                                  list) {
2903                 list_del(&sb->list);
2904                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2905                               sb->dma_handle);
2906                 kfree(sb);
2907                 phba->total_scsi_bufs--;
2908         }
2909         spin_unlock(&phba->scsi_buf_list_get_lock);
2910 
2911         /* Release all the lpfc_iocbq entries maintained by this host. */
2912         list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2913                 list_del(&io->list);
2914                 kfree(io);
2915                 phba->total_iocbq_bufs--;
2916         }
2917 
2918         spin_unlock_irq(&phba->hbalock);
2919 }
2920 
2921 /**
2922  * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2923  * @phba: pointer to lpfc hba data structure.
2924  *
2925  * This routine first calculates the sizes of the current els and allocated
2926  * scsi sgl lists, and then goes through all sgls to updates the physical
2927  * XRIs assigned due to port function reset. During port initialization, the
2928  * current els and allocated scsi sgl lists are 0s.
2929  *
2930  * Return codes
2931  *   0 - successful (for now, it always returns 0)
2932  **/
2933 int
2934 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2935 {
2936         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2937         struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2938         uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2939         LIST_HEAD(els_sgl_list);
2940         LIST_HEAD(scsi_sgl_list);
2941         int rc;
2942 
2943         /*
2944          * update on pci function's els xri-sgl list
2945          */
2946         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2947         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2948                 /* els xri-sgl expanded */
2949                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2950                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2951                                 "3157 ELS xri-sgl count increased from "
2952                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2953                                 els_xri_cnt);
2954                 /* allocate the additional els sgls */
2955                 for (i = 0; i < xri_cnt; i++) {
2956                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2957                                              GFP_KERNEL);
2958                         if (sglq_entry == NULL) {
2959                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2960                                                 "2562 Failure to allocate an "
2961                                                 "ELS sgl entry:%d\n", i);
2962                                 rc = -ENOMEM;
2963                                 goto out_free_mem;
2964                         }
2965                         sglq_entry->buff_type = GEN_BUFF_TYPE;
2966                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2967                                                            &sglq_entry->phys);
2968                         if (sglq_entry->virt == NULL) {
2969                                 kfree(sglq_entry);
2970                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2971                                                 "2563 Failure to allocate an "
2972                                                 "ELS mbuf:%d\n", i);
2973                                 rc = -ENOMEM;
2974                                 goto out_free_mem;
2975                         }
2976                         sglq_entry->sgl = sglq_entry->virt;
2977                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2978                         sglq_entry->state = SGL_FREED;
2979                         list_add_tail(&sglq_entry->list, &els_sgl_list);
2980                 }
2981                 spin_lock_irq(&phba->hbalock);
2982                 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2983                 spin_unlock_irq(&phba->hbalock);
2984         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2985                 /* els xri-sgl shrinked */
2986                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2987                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2988                                 "3158 ELS xri-sgl count decreased from "
2989                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2990                                 els_xri_cnt);
2991                 spin_lock_irq(&phba->hbalock);
2992                 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2993                 spin_unlock_irq(&phba->hbalock);
2994                 /* release extra els sgls from list */
2995                 for (i = 0; i < xri_cnt; i++) {
2996                         list_remove_head(&els_sgl_list,
2997                                          sglq_entry, struct lpfc_sglq, list);
2998                         if (sglq_entry) {
2999                                 lpfc_mbuf_free(phba, sglq_entry->virt,
3000                                                sglq_entry->phys);
3001                                 kfree(sglq_entry);
3002                         }
3003                 }
3004                 spin_lock_irq(&phba->hbalock);
3005                 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3006                 spin_unlock_irq(&phba->hbalock);
3007         } else
3008                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3009                                 "3163 ELS xri-sgl count unchanged: %d\n",
3010                                 els_xri_cnt);
3011         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3012 
3013         /* update xris to els sgls on the list */
3014         sglq_entry = NULL;
3015         sglq_entry_next = NULL;
3016         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3017                                  &phba->sli4_hba.lpfc_sgl_list, list) {
3018                 lxri = lpfc_sli4_next_xritag(phba);
3019                 if (lxri == NO_XRI) {
3020                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3021                                         "2400 Failed to allocate xri for "
3022                                         "ELS sgl\n");
3023                         rc = -ENOMEM;
3024                         goto out_free_mem;
3025                 }
3026                 sglq_entry->sli4_lxritag = lxri;
3027                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3028         }
3029 
3030         /*
3031          * update on pci function's allocated scsi xri-sgl list
3032          */
3033         phba->total_scsi_bufs = 0;
3034 
3035         /* maximum number of xris available for scsi buffers */
3036         phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3037                                       els_xri_cnt;
3038 
3039         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3040                         "2401 Current allocated SCSI xri-sgl count:%d, "
3041                         "maximum  SCSI xri count:%d\n",
3042                         phba->sli4_hba.scsi_xri_cnt,
3043                         phba->sli4_hba.scsi_xri_max);
3044 
3045         spin_lock_irq(&phba->scsi_buf_list_get_lock);
3046         spin_lock(&phba->scsi_buf_list_put_lock);
3047         list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3048         list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3049         spin_unlock(&phba->scsi_buf_list_put_lock);
3050         spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3051 
3052         if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3053                 /* max scsi xri shrinked below the allocated scsi buffers */
3054                 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3055                                         phba->sli4_hba.scsi_xri_max;
3056                 /* release the extra allocated scsi buffers */
3057                 for (i = 0; i < scsi_xri_cnt; i++) {
3058                         list_remove_head(&scsi_sgl_list, psb,
3059                                          struct lpfc_scsi_buf, list);
3060                         pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
3061                                       psb->dma_handle);
3062                         kfree(psb);
3063                 }
3064                 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3065                 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3066                 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3067         }
3068 
3069         /* update xris associated to remaining allocated scsi buffers */
3070         psb = NULL;
3071         psb_next = NULL;
3072         list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3073                 lxri = lpfc_sli4_next_xritag(phba);
3074                 if (lxri == NO_XRI) {
3075                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3076                                         "2560 Failed to allocate xri for "
3077                                         "scsi buffer\n");
3078                         rc = -ENOMEM;
3079                         goto out_free_mem;
3080                 }
3081                 psb->cur_iocbq.sli4_lxritag = lxri;
3082                 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3083         }
3084         spin_lock_irq(&phba->scsi_buf_list_get_lock);
3085         spin_lock(&phba->scsi_buf_list_put_lock);
3086         list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3087         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3088         spin_unlock(&phba->scsi_buf_list_put_lock);
3089         spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3090 
3091         return 0;
3092 
3093 out_free_mem:
3094         lpfc_free_els_sgl_list(phba);
3095         lpfc_scsi_free(phba);
3096         return rc;
3097 }
3098 
3099 /**
3100  * lpfc_create_port - Create an FC port
3101  * @phba: pointer to lpfc hba data structure.
3102  * @instance: a unique integer ID to this FC port.
3103  * @dev: pointer to the device data structure.
3104  *
3105  * This routine creates a FC port for the upper layer protocol. The FC port
3106  * can be created on top of either a physical port or a virtual port provided
3107  * by the HBA. This routine also allocates a SCSI host data structure (shost)
3108  * and associates the FC port created before adding the shost into the SCSI
3109  * layer.
3110  *
3111  * Return codes
3112  *   @vport - pointer to the virtual N_Port data structure.
3113  *   NULL - port create failed.
3114  **/
3115 struct lpfc_vport *
3116 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3117 {
3118         struct lpfc_vport *vport;
3119         struct Scsi_Host  *shost;
3120         int error = 0;
3121 
3122         if (dev != &phba->pcidev->dev)
3123                 shost = scsi_host_alloc(&lpfc_vport_template,
3124                                         sizeof(struct lpfc_vport));
3125         else
3126                 shost = scsi_host_alloc(&lpfc_template,
3127                                         sizeof(struct lpfc_vport));
3128         if (!shost)
3129                 goto out;
3130 
3131         vport = (struct lpfc_vport *) shost->hostdata;
3132         vport->phba = phba;
3133         vport->load_flag |= FC_LOADING;
3134         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3135         vport->fc_rscn_flush = 0;
3136 
3137         lpfc_get_vport_cfgparam(vport);
3138         shost->unique_id = instance;
3139         shost->max_id = LPFC_MAX_TARGET;
3140         shost->max_lun = vport->cfg_max_luns;
3141         shost->this_id = -1;
3142         shost->max_cmd_len = 16;
3143         if (phba->sli_rev == LPFC_SLI_REV4) {
3144                 shost->dma_boundary =
3145                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3146                 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3147         }
3148 
3149         /*
3150          * Set initial can_queue value since 0 is no longer supported and
3151          * scsi_add_host will fail. This will be adjusted later based on the
3152          * max xri value determined in hba setup.
3153          */
3154         shost->can_queue = phba->cfg_hba_queue_depth - 10;
3155         if (dev != &phba->pcidev->dev) {
3156                 shost->transportt = lpfc_vport_transport_template;
3157                 vport->port_type = LPFC_NPIV_PORT;
3158         } else {
3159                 shost->transportt = lpfc_transport_template;
3160                 vport->port_type = LPFC_PHYSICAL_PORT;
3161         }
3162 
3163         /* Initialize all internally managed lists. */
3164         INIT_LIST_HEAD(&vport->fc_nodes);
3165         INIT_LIST_HEAD(&vport->rcv_buffer_list);
3166         spin_lock_init(&vport->work_port_lock);
3167 
3168         init_timer(&vport->fc_disctmo);
3169         vport->fc_disctmo.function = lpfc_disc_timeout;
3170         vport->fc_disctmo.data = (unsigned long)vport;
3171 
3172         init_timer(&vport->fc_fdmitmo);
3173         vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3174         vport->fc_fdmitmo.data = (unsigned long)vport;
3175 
3176         init_timer(&vport->els_tmofunc);
3177         vport->els_tmofunc.function = lpfc_els_timeout;
3178         vport->els_tmofunc.data = (unsigned long)vport;
3179 
3180         init_timer(&vport->delayed_disc_tmo);
3181         vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3182         vport->delayed_disc_tmo.data = (unsigned long)vport;
3183 
3184         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3185         if (error)
3186                 goto out_put_shost;
3187 
3188         spin_lock_irq(&phba->hbalock);
3189         list_add_tail(&vport->listentry, &phba->port_list);
3190         spin_unlock_irq(&phba->hbalock);
3191         return vport;
3192 
3193 out_put_shost:
3194         scsi_host_put(shost);
3195 out:
3196         return NULL;
3197 }
3198 
3199 /**
3200  * destroy_port -  destroy an FC port
3201  * @vport: pointer to an lpfc virtual N_Port data structure.
3202  *
3203  * This routine destroys a FC port from the upper layer protocol. All the
3204  * resources associated with the port are released.
3205  **/
3206 void
3207 destroy_port(struct lpfc_vport *vport)
3208 {
3209         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3210         struct lpfc_hba  *phba = vport->phba;
3211 
3212         lpfc_debugfs_terminate(vport);
3213         fc_remove_host(shost);
3214         scsi_remove_host(shost);
3215 
3216         spin_lock_irq(&phba->hbalock);
3217         list_del_init(&vport->listentry);
3218         spin_unlock_irq(&phba->hbalock);
3219 
3220         lpfc_cleanup(vport);
3221         return;
3222 }
3223 
3224 /**
3225  * lpfc_get_instance - Get a unique integer ID
3226  *
3227  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3228  * uses the kernel idr facility to perform the task.
3229  *
3230  * Return codes:
3231  *   instance - a unique integer ID allocated as the new instance.
3232  *   -1 - lpfc get instance failed.
3233  **/
3234 int
3235 lpfc_get_instance(void)
3236 {
3237         int ret;
3238 
3239         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3240         return ret < 0 ? -1 : ret;
3241 }
3242 
3243 /**
3244  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3245  * @shost: pointer to SCSI host data structure.
3246  * @time: elapsed time of the scan in jiffies.
3247  *
3248  * This routine is called by the SCSI layer with a SCSI host to determine
3249  * whether the scan host is finished.
3250  *
3251  * Note: there is no scan_start function as adapter initialization will have
3252  * asynchronously kicked off the link initialization.
3253  *
3254  * Return codes
3255  *   0 - SCSI host scan is not over yet.
3256  *   1 - SCSI host scan is over.
3257  **/
3258 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3259 {
3260         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3261         struct lpfc_hba   *phba = vport->phba;
3262         int stat = 0;
3263 
3264         spin_lock_irq(shost->host_lock);
3265 
3266         if (vport->load_flag & FC_UNLOADING) {
3267                 stat = 1;
3268                 goto finished;
3269         }
3270         if (time >= msecs_to_jiffies(30 * 1000)) {
3271                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3272                                 "0461 Scanning longer than 30 "
3273                                 "seconds.  Continuing initialization\n");
3274                 stat = 1;
3275                 goto finished;
3276         }
3277         if (time >= msecs_to_jiffies(15 * 1000) &&
3278             phba->link_state <= LPFC_LINK_DOWN) {
3279                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3280                                 "0465 Link down longer than 15 "
3281                                 "seconds.  Continuing initialization\n");
3282                 stat = 1;
3283                 goto finished;
3284         }
3285 
3286         if (vport->port_state != LPFC_VPORT_READY)
3287                 goto finished;
3288         if (vport->num_disc_nodes || vport->fc_prli_sent)
3289                 goto finished;
3290         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3291                 goto finished;
3292         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3293                 goto finished;
3294 
3295         stat = 1;
3296 
3297 finished:
3298         spin_unlock_irq(shost->host_lock);
3299         return stat;
3300 }
3301 
3302 /**
3303  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3304  * @shost: pointer to SCSI host data structure.
3305  *
3306  * This routine initializes a given SCSI host attributes on a FC port. The
3307  * SCSI host can be either on top of a physical port or a virtual port.
3308  **/
3309 void lpfc_host_attrib_init(struct Scsi_Host *shost)
3310 {
3311         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3312         struct lpfc_hba   *phba = vport->phba;
3313         /*
3314          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
3315          */
3316 
3317         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3318         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3319         fc_host_supported_classes(shost) = FC_COS_CLASS3;
3320 
3321         memset(fc_host_supported_fc4s(shost), 0,
3322                sizeof(fc_host_supported_fc4s(shost)));
3323         fc_host_supported_fc4s(shost)[2] = 1;
3324         fc_host_supported_fc4s(shost)[7] = 1;
3325 
3326         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3327                                  sizeof fc_host_symbolic_name(shost));
3328 
3329         fc_host_supported_speeds(shost) = 0;
3330         if (phba->lmt & LMT_16Gb)
3331                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3332         if (phba->lmt & LMT_10Gb)
3333                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3334         if (phba->lmt & LMT_8Gb)
3335                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3336         if (phba->lmt & LMT_4Gb)
3337                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3338         if (phba->lmt & LMT_2Gb)
3339                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3340         if (phba->lmt & LMT_1Gb)
3341                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3342 
3343         fc_host_maxframe_size(shost) =
3344                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3345                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3346 
3347         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3348 
3349         /* This value is also unchanging */
3350         memset(fc_host_active_fc4s(shost), 0,
3351                sizeof(fc_host_active_fc4s(shost)));
3352         fc_host_active_fc4s(shost)[2] = 1;
3353         fc_host_active_fc4s(shost)[7] = 1;
3354 
3355         fc_host_max_npiv_vports(shost) = phba->max_vpi;
3356         spin_lock_irq(shost->host_lock);
3357         vport->load_flag &= ~FC_LOADING;
3358         spin_unlock_irq(shost->host_lock);
3359 }
3360 
3361 /**
3362  * lpfc_stop_port_s3 - Stop SLI3 device port
3363  * @phba: pointer to lpfc hba data structure.
3364  *
3365  * This routine is invoked to stop an SLI3 device port, it stops the device
3366  * from generating interrupts and stops the device driver's timers for the
3367  * device.
3368  **/
3369 static void
3370 lpfc_stop_port_s3(struct lpfc_hba *phba)
3371 {
3372         /* Clear all interrupt enable conditions */
3373         writel(0, phba->HCregaddr);
3374         readl(phba->HCregaddr); /* flush */
3375         /* Clear all pending interrupts */
3376         writel(0xffffffff, phba->HAregaddr);
3377         readl(phba->HAregaddr); /* flush */
3378 
3379         /* Reset some HBA SLI setup states */
3380         lpfc_stop_hba_timers(phba);
3381         phba->pport->work_port_events = 0;
3382 }
3383 
3384 /**
3385  * lpfc_stop_port_s4 - Stop SLI4 device port
3386  * @phba: pointer to lpfc hba data structure.
3387  *
3388  * This routine is invoked to stop an SLI4 device port, it stops the device
3389  * from generating interrupts and stops the device driver's timers for the
3390  * device.
3391  **/
3392 static void
3393 lpfc_stop_port_s4(struct lpfc_hba *phba)
3394 {
3395         /* Reset some HBA SLI4 setup states */
3396         lpfc_stop_hba_timers(phba);
3397         phba->pport->work_port_events = 0;
3398         phba->sli4_hba.intr_enable = 0;
3399 }
3400 
3401 /**
3402  * lpfc_stop_port - Wrapper function for stopping hba port
3403  * @phba: Pointer to HBA context object.
3404  *
3405  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3406  * the API jump table function pointer from the lpfc_hba struct.
3407  **/
3408 void
3409 lpfc_stop_port(struct lpfc_hba *phba)
3410 {
3411         phba->lpfc_stop_port(phba);
3412 }
3413 
3414 /**
3415  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3416  * @phba: Pointer to hba for which this call is being executed.
3417  *
3418  * This routine starts the timer waiting for the FCF rediscovery to complete.
3419  **/
3420 void
3421 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3422 {
3423         unsigned long fcf_redisc_wait_tmo =
3424                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3425         /* Start fcf rediscovery wait period timer */
3426         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3427         spin_lock_irq(&phba->hbalock);
3428         /* Allow action to new fcf asynchronous event */
3429         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3430         /* Mark the FCF rediscovery pending state */
3431         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3432         spin_unlock_irq(&phba->hbalock);
3433 }
3434 
3435 /**
3436  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3437  * @ptr: Map to lpfc_hba data structure pointer.
3438  *
3439  * This routine is invoked when waiting for FCF table rediscover has been
3440  * timed out. If new FCF record(s) has (have) been discovered during the
3441  * wait period, a new FCF event shall be added to the FCOE async event
3442  * list, and then worker thread shall be waked up for processing from the
3443  * worker thread context.
3444  **/
3445 void
3446 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3447 {
3448         struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3449 
3450         /* Don't send FCF rediscovery event if timer cancelled */
3451         spin_lock_irq(&phba->hbalock);
3452         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3453                 spin_unlock_irq(&phba->hbalock);
3454                 return;
3455         }
3456         /* Clear FCF rediscovery timer pending flag */
3457         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3458         /* FCF rediscovery event to worker thread */
3459         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3460         spin_unlock_irq(&phba->hbalock);
3461         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3462                         "2776 FCF rediscover quiescent timer expired\n");
3463         /* wake up worker thread */
3464         lpfc_worker_wake_up(phba);
3465 }
3466 
3467 /**
3468  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3469  * @phba: pointer to lpfc hba data structure.
3470  * @acqe_link: pointer to the async link completion queue entry.
3471  *
3472  * This routine is to parse the SLI4 link-attention link fault code and
3473  * translate it into the base driver's read link attention mailbox command
3474  * status.
3475  *
3476  * Return: Link-attention status in terms of base driver's coding.
3477  **/
3478 static uint16_t
3479 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3480                            struct lpfc_acqe_link *acqe_link)
3481 {
3482         uint16_t latt_fault;
3483 
3484         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3485         case LPFC_ASYNC_LINK_FAULT_NONE:
3486         case LPFC_ASYNC_LINK_FAULT_LOCAL:
3487         case LPFC_ASYNC_LINK_FAULT_REMOTE:
3488                 latt_fault = 0;
3489                 break;
3490         default:
3491                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3492                                 "0398 Invalid link fault code: x%x\n",
3493                                 bf_get(lpfc_acqe_link_fault, acqe_link));
3494                 latt_fault = MBXERR_ERROR;
3495                 break;
3496         }
3497         return latt_fault;
3498 }
3499 
3500 /**
3501  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3502  * @phba: pointer to lpfc hba data structure.
3503  * @acqe_link: pointer to the async link completion queue entry.
3504  *
3505  * This routine is to parse the SLI4 link attention type and translate it
3506  * into the base driver's link attention type coding.
3507  *
3508  * Return: Link attention type in terms of base driver's coding.
3509  **/
3510 static uint8_t
3511 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3512                           struct lpfc_acqe_link *acqe_link)
3513 {
3514         uint8_t att_type;
3515 
3516         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3517         case LPFC_ASYNC_LINK_STATUS_DOWN:
3518         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3519                 att_type = LPFC_ATT_LINK_DOWN;
3520                 break;
3521         case LPFC_ASYNC_LINK_STATUS_UP:
3522                 /* Ignore physical link up events - wait for logical link up */
3523                 att_type = LPFC_ATT_RESERVED;
3524                 break;
3525         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3526                 att_type = LPFC_ATT_LINK_UP;
3527                 break;
3528         default:
3529                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3530                                 "0399 Invalid link attention type: x%x\n",
3531                                 bf_get(lpfc_acqe_link_status, acqe_link));
3532                 att_type = LPFC_ATT_RESERVED;
3533                 break;
3534         }
3535         return att_type;
3536 }
3537 
3538 /**
3539  * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3540  * @phba: pointer to lpfc hba data structure.
3541  * @acqe_link: pointer to the async link completion queue entry.
3542  *
3543  * This routine is to parse the SLI4 link-attention link speed and translate
3544  * it into the base driver's link-attention link speed coding.
3545  *
3546  * Return: Link-attention link speed in terms of base driver's coding.
3547  **/
3548 static uint8_t
3549 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3550                                 struct lpfc_acqe_link *acqe_link)
3551 {
3552         uint8_t link_speed;
3553 
3554         switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3555         case LPFC_ASYNC_LINK_SPEED_ZERO:
3556         case LPFC_ASYNC_LINK_SPEED_10MBPS:
3557         case LPFC_ASYNC_LINK_SPEED_100MBPS:
3558                 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3559                 break;
3560         case LPFC_ASYNC_LINK_SPEED_1GBPS:
3561                 link_speed = LPFC_LINK_SPEED_1GHZ;
3562                 break;
3563         case LPFC_ASYNC_LINK_SPEED_10GBPS:
3564                 link_speed = LPFC_LINK_SPEED_10GHZ;
3565                 break;
3566         default:
3567                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3568                                 "0483 Invalid link-attention link speed: x%x\n",
3569                                 bf_get(lpfc_acqe_link_speed, acqe_link));
3570                 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3571                 break;
3572         }
3573         return link_speed;
3574 }
3575 
3576 /**
3577  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3578  * @phba: pointer to lpfc hba data structure.
3579  *
3580  * This routine is to get an SLI3 FC port's link speed in Mbps.
3581  *
3582  * Return: link speed in terms of Mbps.
3583  **/
3584 uint32_t
3585 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3586 {
3587         uint32_t link_speed;
3588 
3589         if (!lpfc_is_link_up(phba))
3590                 return 0;
3591 
3592         switch (phba->fc_linkspeed) {
3593         case LPFC_LINK_SPEED_1GHZ:
3594                 link_speed = 1000;
3595                 break;
3596         case LPFC_LINK_SPEED_2GHZ:
3597                 link_speed = 2000;
3598                 break;
3599         case LPFC_LINK_SPEED_4GHZ:
3600                 link_speed = 4000;
3601                 break;
3602         case LPFC_LINK_SPEED_8GHZ:
3603                 link_speed = 8000;
3604                 break;
3605         case LPFC_LINK_SPEED_10GHZ:
3606                 link_speed = 10000;
3607                 break;
3608         case LPFC_LINK_SPEED_16GHZ:
3609                 link_speed = 16000;
3610                 break;
3611         default:
3612                 link_speed = 0;
3613         }
3614         return link_speed;
3615 }
3616 
3617 /**
3618  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3619  * @phba: pointer to lpfc hba data structure.
3620  * @evt_code: asynchronous event code.
3621  * @speed_code: asynchronous event link speed code.
3622  *
3623  * This routine is to parse the giving SLI4 async event link speed code into
3624  * value of Mbps for the link speed.
3625  *
3626  * Return: link speed in terms of Mbps.
3627  **/
3628 static uint32_t
3629 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3630                            uint8_t speed_code)
3631 {
3632         uint32_t port_speed;
3633 
3634         switch (evt_code) {
3635         case LPFC_TRAILER_CODE_LINK:
3636                 switch (speed_code) {
3637                 case LPFC_EVT_CODE_LINK_NO_LINK:
3638                         port_speed = 0;
3639                         break;
3640                 case LPFC_EVT_CODE_LINK_10_MBIT:
3641                         port_speed = 10;
3642                         break;
3643                 case LPFC_EVT_CODE_LINK_100_MBIT:
3644                         port_speed = 100;
3645                         break;
3646                 case LPFC_EVT_CODE_LINK_1_GBIT:
3647                         port_speed = 1000;
3648                         break;
3649                 case LPFC_EVT_CODE_LINK_10_GBIT:
3650                         port_speed = 10000;
3651                         break;
3652                 default:
3653                         port_speed = 0;
3654                 }
3655                 break;
3656         case LPFC_TRAILER_CODE_FC:
3657                 switch (speed_code) {
3658                 case LPFC_EVT_CODE_FC_NO_LINK:
3659                         port_speed = 0;
3660                         break;
3661                 case LPFC_EVT_CODE_FC_1_GBAUD:
3662                         port_speed = 1000;
3663                         break;
3664                 case LPFC_EVT_CODE_FC_2_GBAUD:
3665                         port_speed = 2000;
3666                         break;
3667                 case LPFC_EVT_CODE_FC_4_GBAUD:
3668                         port_speed = 4000;
3669                         break;
3670                 case LPFC_EVT_CODE_FC_8_GBAUD:
3671                         port_speed = 8000;
3672                         break;
3673                 case LPFC_EVT_CODE_FC_10_GBAUD:
3674                         port_speed = 10000;
3675                         break;
3676                 case LPFC_EVT_CODE_FC_16_GBAUD:
3677                         port_speed = 16000;
3678                         break;
3679                 default:
3680                         port_speed = 0;
3681                 }
3682                 break;
3683         default:
3684                 port_speed = 0;
3685         }
3686         return port_speed;
3687 }
3688 
3689 /**
3690  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3691  * @phba: pointer to lpfc hba data structure.
3692  * @acqe_link: pointer to the async link completion queue entry.
3693  *
3694  * This routine is to handle the SLI4 asynchronous FCoE link event.
3695  **/
3696 static void
3697 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3698                          struct lpfc_acqe_link *acqe_link)
3699 {
3700         struct lpfc_dmabuf *mp;
3701         LPFC_MBOXQ_t *pmb;
3702         MAILBOX_t *mb;
3703         struct lpfc_mbx_read_top *la;
3704         uint8_t att_type;
3705         int rc;
3706 
3707         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3708         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3709                 return;
3710         phba->fcoe_eventtag = acqe_link->event_tag;
3711         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3712         if (!pmb) {
3713                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3714                                 "0395 The mboxq allocation failed\n");
3715                 return;
3716         }
3717         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3718         if (!mp) {
3719                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3720                                 "0396 The lpfc_dmabuf allocation failed\n");
3721                 goto out_free_pmb;
3722         }
3723         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3724         if (!mp->virt) {
3725                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3726                                 "0397 The mbuf allocation failed\n");
3727                 goto out_free_dmabuf;
3728         }
3729 
3730         /* Cleanup any outstanding ELS commands */
3731         lpfc_els_flush_all_cmd(phba);
3732 
3733         /* Block ELS IOCBs until we have done process link event */
3734         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3735 
3736         /* Update link event statistics */
3737         phba->sli.slistat.link_event++;
3738 
3739         /* Create lpfc_handle_latt mailbox command from link ACQE */
3740         lpfc_read_topology(phba, pmb, mp);
3741         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3742         pmb->vport = phba->pport;
3743 
3744         /* Keep the link status for extra SLI4 state machine reference */
3745         phba->sli4_hba.link_state.speed =
3746                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3747                                 bf_get(lpfc_acqe_link_speed, acqe_link));
3748         phba->sli4_hba.link_state.duplex =
3749                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
3750         phba->sli4_hba.link_state.status =
3751                                 bf_get(lpfc_acqe_link_status, acqe_link);
3752         phba->sli4_hba.link_state.type =
3753                                 bf_get(lpfc_acqe_link_type, acqe_link);
3754         phba->sli4_hba.link_state.number =
3755                                 bf_get(lpfc_acqe_link_number, acqe_link);
3756         phba->sli4_hba.link_state.fault =
3757                                 bf_get(lpfc_acqe_link_fault, acqe_link);
3758         phba->sli4_hba.link_state.logical_speed =
3759                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3760 
3761         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3762                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
3763                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3764                         "Logical speed:%dMbps Fault:%d\n",
3765                         phba->sli4_hba.link_state.speed,
3766                         phba->sli4_hba.link_state.topology,
3767                         phba->sli4_hba.link_state.status,
3768                         phba->sli4_hba.link_state.type,
3769                         phba->sli4_hba.link_state.number,
3770                         phba->sli4_hba.link_state.logical_speed,
3771                         phba->sli4_hba.link_state.fault);
3772         /*
3773          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3774          * topology info. Note: Optional for non FC-AL ports.
3775          */
3776         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3777                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3778                 if (rc == MBX_NOT_FINISHED)
3779                         goto out_free_dmabuf;
3780                 return;
3781         }
3782         /*
3783          * For FCoE Mode: fill in all the topology information we need and call
3784          * the READ_TOPOLOGY completion routine to continue without actually
3785          * sending the READ_TOPOLOGY mailbox command to the port.
3786          */
3787         /* Parse and translate status field */
3788         mb = &pmb->u.mb;
3789         mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3790 
3791         /* Parse and translate link attention fields */
3792         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3793         la->eventTag = acqe_link->event_tag;
3794         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3795         bf_set(lpfc_mbx_read_top_link_spd, la,
3796                lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3797 
3798         /* Fake the the following irrelvant fields */
3799         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3800         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3801         bf_set(lpfc_mbx_read_top_il, la, 0);
3802         bf_set(lpfc_mbx_read_top_pb, la, 0);
3803         bf_set(lpfc_mbx_read_top_fa, la, 0);
3804         bf_set(lpfc_mbx_read_top_mm, la, 0);
3805 
3806         /* Invoke the lpfc_handle_latt mailbox command callback function */
3807         lpfc_mbx_cmpl_read_topology(phba, pmb);
3808 
3809         return;
3810 
3811 out_free_dmabuf:
3812         kfree(mp);
3813 out_free_pmb:
3814         mempool_free(pmb, phba->mbox_mem_pool);
3815 }
3816 
3817 /**
3818  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3819  * @phba: pointer to lpfc hba data structure.
3820  * @acqe_fc: pointer to the async fc completion queue entry.
3821  *
3822  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3823  * that the event was received and then issue a read_topology mailbox command so
3824  * that the rest of the driver will treat it the same as SLI3.
3825  **/
3826 static void
3827 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3828 {
3829         struct lpfc_dmabuf *mp;
3830         LPFC_MBOXQ_t *pmb;
3831         int rc;
3832 
3833         if (bf_get(lpfc_trailer_type, acqe_fc) !=
3834             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3835                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3836                                 "2895 Non FC link Event detected.(%d)\n",
3837                                 bf_get(lpfc_trailer_type, acqe_fc));
3838                 return;
3839         }
3840         /* Keep the link status for extra SLI4 state machine reference */
3841         phba->sli4_hba.link_state.speed =
3842                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3843                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3844         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3845         phba->sli4_hba.link_state.topology =
3846                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3847         phba->sli4_hba.link_state.status =
3848                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3849         phba->sli4_hba.link_state.type =
3850                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3851         phba->sli4_hba.link_state.number =
3852                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3853         phba->sli4_hba.link_state.fault =
3854                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
3855         phba->sli4_hba.link_state.logical_speed =
3856                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3857         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3858                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3859                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3860                         "%dMbps Fault:%d\n",
3861                         phba->sli4_hba.link_state.speed,
3862                         phba->sli4_hba.link_state.topology,
3863                         phba->sli4_hba.link_state.status,
3864                         phba->sli4_hba.link_state.type,
3865                         phba->sli4_hba.link_state.number,
3866                         phba->sli4_hba.link_state.logical_speed,
3867                         phba->sli4_hba.link_state.fault);
3868         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3869         if (!pmb) {
3870                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871                                 "2897 The mboxq allocation failed\n");
3872                 return;
3873         }
3874         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3875         if (!mp) {
3876                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3877                                 "2898 The lpfc_dmabuf allocation failed\n");
3878                 goto out_free_pmb;
3879         }
3880         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3881         if (!mp->virt) {
3882                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883                                 "2899 The mbuf allocation failed\n");
3884                 goto out_free_dmabuf;
3885         }
3886 
3887         /* Cleanup any outstanding ELS commands */
3888         lpfc_els_flush_all_cmd(phba);
3889 
3890         /* Block ELS IOCBs until we have done process link event */
3891         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3892 
3893         /* Update link event statistics */
3894         phba->sli.slistat.link_event++;
3895 
3896         /* Create lpfc_handle_latt mailbox command from link ACQE */
3897         lpfc_read_topology(phba, pmb, mp);
3898         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3899         pmb->vport = phba->pport;
3900 
3901         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3902         if (rc == MBX_NOT_FINISHED)
3903                 goto out_free_dmabuf;
3904         return;
3905 
3906 out_free_dmabuf:
3907         kfree(mp);
3908 out_free_pmb:
3909         mempool_free(pmb, phba->mbox_mem_pool);
3910 }
3911 
3912 /**
3913  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3914  * @phba: pointer to lpfc hba data structure.
3915  * @acqe_fc: pointer to the async SLI completion queue entry.
3916  *
3917  * This routine is to handle the SLI4 asynchronous SLI events.
3918  **/
3919 static void
3920 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3921 {
3922         char port_name;
3923         char message[128];
3924         uint8_t status;
3925         struct lpfc_acqe_misconfigured_event *misconfigured;
3926 
3927         /* special case misconfigured event as it contains data for all ports */
3928         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3929                  LPFC_SLI_INTF_IF_TYPE_2) ||
3930                 (bf_get(lpfc_trailer_type, acqe_sli) !=
3931                         LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
3932                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3933                                 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3934                                 "x%08x SLI Event Type:%d\n",
3935                                 acqe_sli->event_data1, acqe_sli->event_data2,
3936                                 bf_get(lpfc_trailer_type, acqe_sli));
3937                 return;
3938         }
3939 
3940         port_name = phba->Port[0];
3941         if (port_name == 0x00)
3942                 port_name = '?'; /* get port name is empty */
3943 
3944         misconfigured = (struct lpfc_acqe_misconfigured_event *)
3945                                         &acqe_sli->event_data1;
3946 
3947         /* fetch the status for this port */
3948         switch (phba->sli4_hba.lnk_info.lnk_no) {
3949         case LPFC_LINK_NUMBER_0:
3950                 status = bf_get(lpfc_sli_misconfigured_port0,
3951                                         &misconfigured->theEvent);
3952                 break;
3953         case LPFC_LINK_NUMBER_1:
3954                 status = bf_get(lpfc_sli_misconfigured_port1,
3955                                         &misconfigured->theEvent);
3956                 break;
3957         case LPFC_LINK_NUMBER_2:
3958                 status = bf_get(lpfc_sli_misconfigured_port2,
3959                                         &misconfigured->theEvent);
3960                 break;
3961         case LPFC_LINK_NUMBER_3:
3962                 status = bf_get(lpfc_sli_misconfigured_port3,
3963                                         &misconfigured->theEvent);
3964                 break;
3965         default:
3966                 status = ~LPFC_SLI_EVENT_STATUS_VALID;
3967                 break;
3968         }
3969 
3970         switch (status) {
3971         case LPFC_SLI_EVENT_STATUS_VALID:
3972                 return; /* no message if the sfp is okay */
3973         case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3974                 sprintf(message, "Optics faulted/incorrectly installed/not " \
3975                                 "installed - Reseat optics, if issue not "
3976                                 "resolved, replace.");
3977                 break;
3978         case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3979                 sprintf(message,
3980                         "Optics of two types installed - Remove one optic or " \
3981                         "install matching pair of optics.");
3982                 break;
3983         case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3984                 sprintf(message, "Incompatible optics - Replace with " \
3985                                 "compatible optics for card to function.");
3986                 break;
3987         default:
3988                 /* firmware is reporting a status we don't know about */
3989                 sprintf(message, "Unknown event status x%02x", status);
3990                 break;
3991         }
3992 
3993         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3994                         "3176 Misconfigured Physical Port - "
3995                         "Port Name %c %s\n", port_name, message);
3996 }
3997 
3998 /**
3999  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4000  * @vport: pointer to vport data structure.
4001  *
4002  * This routine is to perform Clear Virtual Link (CVL) on a vport in
4003  * response to a CVL event.
4004  *
4005  * Return the pointer to the ndlp with the vport if successful, otherwise
4006  * return NULL.
4007  **/
4008 static struct lpfc_nodelist *
4009 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4010 {
4011         struct lpfc_nodelist *ndlp;
4012         struct Scsi_Host *shost;
4013         struct lpfc_hba *phba;
4014 
4015         if (!vport)
4016                 return NULL;
4017         phba = vport->phba;
4018         if (!phba)
4019                 return NULL;
4020         ndlp = lpfc_findnode_did(vport, Fabric_DID);
4021         if (!ndlp) {
4022                 /* Cannot find existing Fabric ndlp, so allocate a new one */
4023                 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4024                 if (!ndlp)
4025                         return 0;
4026                 lpfc_nlp_init(vport, ndlp, Fabric_DID);
4027                 /* Set the node type */
4028                 ndlp->nlp_type |= NLP_FABRIC;
4029                 /* Put ndlp onto node list */
4030                 lpfc_enqueue_node(vport, ndlp);
4031         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4032                 /* re-setup ndlp without removing from node list */
4033                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4034                 if (!ndlp)
4035                         return 0;
4036         }
4037         if ((phba->pport->port_state < LPFC_FLOGI) &&
4038                 (phba->pport->port_state != LPFC_VPORT_FAILED))
4039                 return NULL;
4040         /* If virtual link is not yet instantiated ignore CVL */
4041         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4042                 && (vport->port_state != LPFC_VPORT_FAILED))
4043                 return NULL;
4044         shost = lpfc_shost_from_vport(vport);
4045         if (!shost)
4046                 return NULL;
4047         lpfc_linkdown_port(vport);
4048         lpfc_cleanup_pending_mbox(vport);
4049         spin_lock_irq(shost->host_lock);
4050         vport->fc_flag |= FC_VPORT_CVL_RCVD;
4051         spin_unlock_irq(shost->host_lock);
4052 
4053         return ndlp;
4054 }
4055 
4056 /**
4057  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4058  * @vport: pointer to lpfc hba data structure.
4059  *
4060  * This routine is to perform Clear Virtual Link (CVL) on all vports in
4061  * response to a FCF dead event.
4062  **/
4063 static void
4064 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4065 {
4066         struct lpfc_vport **vports;
4067         int i;
4068 
4069         vports = lpfc_create_vport_work_array(phba);
4070         if (vports)
4071                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4072                         lpfc_sli4_perform_vport_cvl(vports[i]);
4073         lpfc_destroy_vport_work_array(phba, vports);
4074 }
4075 
4076 /**
4077  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4078  * @phba: pointer to lpfc hba data structure.
4079  * @acqe_link: pointer to the async fcoe completion queue entry.
4080  *
4081  * This routine is to handle the SLI4 asynchronous fcoe event.
4082  **/
4083 static void
4084 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4085                         struct lpfc_acqe_fip *acqe_fip)
4086 {
4087         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4088         int rc;
4089         struct lpfc_vport *vport;
4090         struct lpfc_nodelist *ndlp;
4091         struct Scsi_Host  *shost;
4092         int active_vlink_present;
4093         struct lpfc_vport **vports;
4094         int i;
4095 
4096         phba->fc_eventTag = acqe_fip->event_tag;
4097         phba->fcoe_eventtag = acqe_fip->event_tag;
4098         switch (event_type) {
4099         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4100         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4101                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4102                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4103                                         LOG_DISCOVERY,
4104                                         "2546 New FCF event, evt_tag:x%x, "
4105                                         "index:x%x\n",
4106                                         acqe_fip->event_tag,
4107                                         acqe_fip->index);
4108                 else
4109                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4110                                         LOG_DISCOVERY,
4111                                         "2788 FCF param modified event, "
4112                                         "evt_tag:x%x, index:x%x\n",
4113                                         acqe_fip->event_tag,
4114                                         acqe_fip->index);
4115                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4116                         /*
4117                          * During period of FCF discovery, read the FCF
4118                          * table record indexed by the event to update
4119                          * FCF roundrobin failover eligible FCF bmask.
4120                          */
4121                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4122                                         LOG_DISCOVERY,
4123                                         "2779 Read FCF (x%x) for updating "
4124                                         "roundrobin FCF failover bmask\n",
4125                                         acqe_fip->index);
4126                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4127                 }
4128 
4129                 /* If the FCF discovery is in progress, do nothing. */
4130                 spin_lock_irq(&phba->hbalock);
4131                 if (phba->hba_flag & FCF_TS_INPROG) {
4132                         spin_unlock_irq(&phba->hbalock);
4133                         break;
4134                 }
4135                 /* If fast FCF failover rescan event is pending, do nothing */
4136                 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4137                         spin_unlock_irq(&phba->hbalock);
4138                         break;
4139                 }
4140 
4141                 /* If the FCF has been in discovered state, do nothing. */
4142                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4143                         spin_unlock_irq(&phba->hbalock);
4144                         break;
4145                 }
4146                 spin_unlock_irq(&phba->hbalock);
4147 
4148                 /* Otherwise, scan the entire FCF table and re-discover SAN */
4149                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4150                                 "2770 Start FCF table scan per async FCF "
4151                                 "event, evt_tag:x%x, index:x%x\n",
4152                                 acqe_fip->event_tag, acqe_fip->index);
4153                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4154                                                      LPFC_FCOE_FCF_GET_FIRST);
4155                 if (rc)
4156                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4157                                         "2547 Issue FCF scan read FCF mailbox "
4158                                         "command failed (x%x)\n", rc);
4159                 break;
4160 
4161         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4162                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4163                         "2548 FCF Table full count 0x%x tag 0x%x\n",
4164                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4165                         acqe_fip->event_tag);
4166                 break;
4167 
4168         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4169                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4170                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4171                         "2549 FCF (x%x) disconnected from network, "
4172                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4173                 /*
4174                  * If we are in the middle of FCF failover process, clear
4175                  * the corresponding FCF bit in the roundrobin bitmap.
4176                  */
4177                 spin_lock_irq(&phba->hbalock);
4178                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4179                         spin_unlock_irq(&phba->hbalock);
4180                         /* Update FLOGI FCF failover eligible FCF bmask */
4181                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4182                         break;
4183                 }
4184                 spin_unlock_irq(&phba->hbalock);
4185 
4186                 /* If the event is not for currently used fcf do nothing */
4187                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4188                         break;
4189 
4190                 /*
4191                  * Otherwise, request the port to rediscover the entire FCF
4192                  * table for a fast recovery from case that the current FCF
4193                  * is no longer valid as we are not in the middle of FCF
4194                  * failover process already.
4195                  */
4196                 spin_lock_irq(&phba->hbalock);
4197                 /* Mark the fast failover process in progress */
4198                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4199                 spin_unlock_irq(&phba->hbalock);
4200 
4201                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4202                                 "2771 Start FCF fast failover process due to "
4203                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4204                                 "\n", acqe_fip->event_tag, acqe_fip->index);
4205                 rc = lpfc_sli4_redisc_fcf_table(phba);
4206                 if (rc) {
4207                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4208                                         LOG_DISCOVERY,
4209                                         "2772 Issue FCF rediscover mabilbox "
4210                                         "command failed, fail through to FCF "
4211                                         "dead event\n");
4212                         spin_lock_irq(&phba->hbalock);
4213                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4214                         spin_unlock_irq(&phba->hbalock);
4215                         /*
4216                          * Last resort will fail over by treating this
4217                          * as a link down to FCF registration.
4218                          */
4219                         lpfc_sli4_fcf_dead_failthrough(phba);
4220                 } else {
4221                         /* Reset FCF roundrobin bmask for new discovery */
4222                         lpfc_sli4_clear_fcf_rr_bmask(phba);
4223                         /*
4224                          * Handling fast FCF failover to a DEAD FCF event is
4225                          * considered equalivant to receiving CVL to all vports.
4226                          */
4227                         lpfc_sli4_perform_all_vport_cvl(phba);
4228                 }
4229                 break;
4230         case LPFC_FIP_EVENT_TYPE_CVL:
4231                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4232                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4233                         "2718 Clear Virtual Link Received for VPI 0x%x"
4234                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4235 
4236                 vport = lpfc_find_vport_by_vpid(phba,
4237                                                 acqe_fip->index);
4238                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4239                 if (!ndlp)
4240                         break;
4241                 active_vlink_present = 0;
4242 
4243                 vports = lpfc_create_vport_work_array(phba);
4244                 if (vports) {
4245                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4246                                         i++) {
4247                                 if ((!(vports[i]->fc_flag &
4248                                         FC_VPORT_CVL_RCVD)) &&
4249                                         (vports[i]->port_state > LPFC_FDISC)) {
4250                                         active_vlink_present = 1;
4251                                         break;
4252                                 }
4253                         }
4254                         lpfc_destroy_vport_work_array(phba, vports);
4255                 }
4256 
4257                 if (active_vlink_present) {
4258                         /*
4259                          * If there are other active VLinks present,
4260                          * re-instantiate the Vlink using FDISC.
4261                          */
4262                         mod_timer(&ndlp->nlp_delayfunc,
4263                                   jiffies + msecs_to_jiffies(1000));
4264                         shost = lpfc_shost_from_vport(vport);
4265                         spin_lock_irq(shost->host_lock);
4266                         ndlp->nlp_flag |= NLP_DELAY_TMO;
4267                         spin_unlock_irq(shost->host_lock);
4268                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4269                         vport->port_state = LPFC_FDISC;
4270                 } else {
4271                         /*
4272                          * Otherwise, we request port to rediscover
4273                          * the entire FCF table for a fast recovery
4274                          * from possible case that the current FCF
4275                          * is no longer valid if we are not already
4276                          * in the FCF failover process.
4277                          */
4278                         spin_lock_irq(&phba->hbalock);
4279                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4280                                 spin_unlock_irq(&phba->hbalock);
4281                                 break;
4282                         }
4283                         /* Mark the fast failover process in progress */
4284                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4285                         spin_unlock_irq(&phba->hbalock);
4286                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4287                                         LOG_DISCOVERY,
4288                                         "2773 Start FCF failover per CVL, "
4289                                         "evt_tag:x%x\n", acqe_fip->event_tag);
4290                         rc = lpfc_sli4_redisc_fcf_table(phba);
4291                         if (rc) {
4292                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4293                                                 LOG_DISCOVERY,
4294                                                 "2774 Issue FCF rediscover "
4295                                                 "mabilbox command failed, "
4296                                                 "through to CVL event\n");
4297                                 spin_lock_irq(&phba->hbalock);
4298                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4299                                 spin_unlock_irq(&phba->hbalock);
4300                                 /*
4301                                  * Last resort will be re-try on the
4302                                  * the current registered FCF entry.
4303                                  */
4304                                 lpfc_retry_pport_discovery(phba);
4305                         } else
4306                                 /*
4307                                  * Reset FCF roundrobin bmask for new
4308                                  * discovery.
4309                                  */
4310                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
4311                 }
4312                 break;
4313         default:
4314                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4315                         "0288 Unknown FCoE event type 0x%x event tag "
4316                         "0x%x\n", event_type, acqe_fip->event_tag);
4317                 break;
4318         }
4319 }
4320 
4321 /**
4322  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4323  * @phba: pointer to lpfc hba data structure.
4324  * @acqe_link: pointer to the async dcbx completion queue entry.
4325  *
4326  * This routine is to handle the SLI4 asynchronous dcbx event.
4327  **/
4328 static void
4329 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4330                          struct lpfc_acqe_dcbx *acqe_dcbx)
4331 {
4332         phba->fc_eventTag = acqe_dcbx->event_tag;
4333         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4334                         "0290 The SLI4 DCBX asynchronous event is not "
4335                         "handled yet\n");
4336 }
4337 
4338 /**
4339  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4340  * @phba: pointer to lpfc hba data structure.
4341  * @acqe_link: pointer to the async grp5 completion queue entry.
4342  *
4343  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4344  * is an asynchronous notified of a logical link speed change.  The Port
4345  * reports the logical link speed in units of 10Mbps.
4346  **/
4347 static void
4348 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4349                          struct lpfc_acqe_grp5 *acqe_grp5)
4350 {
4351         uint16_t prev_ll_spd;
4352 
4353         phba->fc_eventTag = acqe_grp5->event_tag;
4354         phba->fcoe_eventtag = acqe_grp5->event_tag;
4355         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4356         phba->sli4_hba.link_state.logical_speed =
4357                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4358         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4359                         "2789 GRP5 Async Event: Updating logical link speed "
4360                         "from %dMbps to %dMbps\n", prev_ll_spd,
4361                         phba->sli4_hba.link_state.logical_speed);
4362 }
4363 
4364 /**
4365  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4366  * @phba: pointer to lpfc hba data structure.
4367  *
4368  * This routine is invoked by the worker thread to process all the pending
4369  * SLI4 asynchronous events.
4370  **/
4371 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4372 {
4373         struct lpfc_cq_event *cq_event;
4374 
4375         /* First, declare the async event has been handled */
4376         spin_lock_irq(&phba->hbalock);
4377         phba->hba_flag &= ~ASYNC_EVENT;
4378         spin_unlock_irq(&phba->hbalock);
4379         /* Now, handle all the async events */
4380         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4381                 /* Get the first event from the head of the event queue */
4382                 spin_lock_irq(&phba->hbalock);
4383                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4384                                  cq_event, struct lpfc_cq_event, list);
4385                 spin_unlock_irq(&phba->hbalock);
4386                 /* Process the asynchronous event */
4387                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4388                 case LPFC_TRAILER_CODE_LINK:
4389                         lpfc_sli4_async_link_evt(phba,
4390                                                  &cq_event->cqe.acqe_link);
4391                         break;
4392                 case LPFC_TRAILER_CODE_FCOE:
4393                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4394                         break;
4395                 case LPFC_TRAILER_CODE_DCBX:
4396                         lpfc_sli4_async_dcbx_evt(phba,
4397                                                  &cq_event->cqe.acqe_dcbx);
4398                         break;
4399                 case LPFC_TRAILER_CODE_GRP5:
4400                         lpfc_sli4_async_grp5_evt(phba,
4401                                                  &cq_event->cqe.acqe_grp5);
4402                         break;
4403                 case LPFC_TRAILER_CODE_FC:
4404                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4405                         break;
4406                 case LPFC_TRAILER_CODE_SLI:
4407                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4408                         break;
4409                 default:
4410                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4411                                         "1804 Invalid asynchrous event code: "
4412                                         "x%x\n", bf_get(lpfc_trailer_code,
4413                                         &cq_event->cqe.mcqe_cmpl));
4414                         break;
4415                 }
4416                 /* Free the completion event processed to the free pool */
4417                 lpfc_sli4_cq_event_release(phba, cq_event);
4418         }
4419 }
4420 
4421 /**
4422  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4423  * @phba: pointer to lpfc hba data structure.
4424  *
4425  * This routine is invoked by the worker thread to process FCF table
4426  * rediscovery pending completion event.
4427  **/
4428 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4429 {
4430         int rc;
4431 
4432         spin_lock_irq(&phba->hbalock);
4433         /* Clear FCF rediscovery timeout event */
4434         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4435         /* Clear driver fast failover FCF record flag */
4436         phba->fcf.failover_rec.flag = 0;
4437         /* Set state for FCF fast failover */
4438         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4439         spin_unlock_irq(&phba->hbalock);
4440 
4441         /* Scan FCF table from the first entry to re-discover SAN */
4442         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4443                         "2777 Start post-quiescent FCF table scan\n");
4444         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4445         if (rc)
4446                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4447                                 "2747 Issue FCF scan read FCF mailbox "
4448                                 "command failed 0x%x\n", rc);
4449 }
4450 
4451 /**
4452  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4453  * @phba: pointer to lpfc hba data structure.
4454  * @dev_grp: The HBA PCI-Device group number.
4455  *
4456  * This routine is invoked to set up the per HBA PCI-Device group function
4457  * API jump table entries.
4458  *
4459  * Return: 0 if success, otherwise -ENODEV
4460  **/
4461 int
4462 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4463 {
4464         int rc;
4465 
4466         /* Set up lpfc PCI-device group */
4467         phba->pci_dev_grp = dev_grp;
4468 
4469         /* The LPFC_PCI_DEV_OC uses SLI4 */
4470         if (dev_grp == LPFC_PCI_DEV_OC)
4471                 phba->sli_rev = LPFC_SLI_REV4;
4472 
4473         /* Set up device INIT API function jump table */
4474         rc = lpfc_init_api_table_setup(phba, dev_grp);
4475         if (rc)
4476                 return -ENODEV;
4477         /* Set up SCSI API function jump table */
4478         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4479         if (rc)
4480                 return -ENODEV;
4481         /* Set up SLI API function jump table */
4482         rc = lpfc_sli_api_table_setup(phba, dev_grp);
4483         if (rc)
4484                 return -ENODEV;
4485         /* Set up MBOX API function jump table */
4486         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4487         if (rc)
4488                 return -ENODEV;
4489 
4490         return 0;
4491 }
4492 
4493 /**
4494  * lpfc_log_intr_mode - Log the active interrupt mode
4495  * @phba: pointer to lpfc hba data structure.
4496  * @intr_mode: active interrupt mode adopted.
4497  *
4498  * This routine it invoked to log the currently used active interrupt mode
4499  * to the device.
4500  **/
4501 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4502 {
4503         switch (intr_mode) {
4504         case 0:
4505                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4506                                 "0470 Enable INTx interrupt mode.\n");
4507                 break;
4508         case 1:
4509                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4510                                 "0481 Enabled MSI interrupt mode.\n");
4511                 break;
4512         case 2:
4513                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4514                                 "0480 Enabled MSI-X interrupt mode.\n");
4515                 break;
4516         default:
4517                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4518                                 "0482 Illegal interrupt mode.\n");
4519                 break;
4520         }
4521         return;
4522 }
4523 
4524 /**
4525  * lpfc_enable_pci_dev - Enable a generic PCI device.
4526  * @phba: pointer to lpfc hba data structure.
4527  *
4528  * This routine is invoked to enable the PCI device that is common to all
4529  * PCI devices.
4530  *
4531  * Return codes
4532  *      0 - successful
4533  *      other values - error
4534  **/
4535 static int
4536 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4537 {
4538         struct pci_dev *pdev;
4539         int bars = 0;
4540 
4541         /* Obtain PCI device reference */
4542         if (!phba->pcidev)
4543                 goto out_error;
4544         else
4545                 pdev = phba->pcidev;
4546         /* Select PCI BARs */
4547         bars = pci_select_bars(pdev, IORESOURCE_MEM);
4548         /* Enable PCI device */
4549         if (pci_enable_device_mem(pdev))
4550                 goto out_error;
4551         /* Request PCI resource for the device */
4552         if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4553                 goto out_disable_device;
4554         /* Set up device as PCI master and save state for EEH */
4555         pci_set_master(pdev);
4556         pci_try_set_mwi(pdev);
4557         pci_save_state(pdev);
4558 
4559         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4560         if (pci_is_pcie(pdev))
4561                 pdev->needs_freset = 1;
4562 
4563         return 0;
4564 
4565 out_disable_device:
4566         pci_disable_device(pdev);
4567 out_error:
4568         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4569                         "1401 Failed to enable pci device, bars:x%x\n", bars);
4570         return -ENODEV;
4571 }
4572 
4573 /**
4574  * lpfc_disable_pci_dev - Disable a generic PCI device.
4575  * @phba: pointer to lpfc hba data structure.
4576  *
4577  * This routine is invoked to disable the PCI device that is common to all
4578  * PCI devices.
4579  **/
4580 static void
4581 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4582 {
4583         struct pci_dev *pdev;
4584         int bars;
4585 
4586         /* Obtain PCI device reference */
4587         if (!phba->pcidev)
4588                 return;
4589         else
4590                 pdev = phba->pcidev;
4591         /* Select PCI BARs */
4592         bars = pci_select_bars(pdev, IORESOURCE_MEM);
4593         /* Release PCI resource and disable PCI device */
4594         pci_release_selected_regions(pdev, bars);
4595         pci_disable_device(pdev);
4596 
4597         return;
4598 }
4599 
4600 /**
4601  * lpfc_reset_hba - Reset a hba
4602  * @phba: pointer to lpfc hba data structure.
4603  *
4604  * This routine is invoked to reset a hba device. It brings the HBA
4605  * offline, performs a board restart, and then brings the board back
4606  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4607  * on outstanding mailbox commands.
4608  **/
4609 void
4610 lpfc_reset_hba(struct lpfc_hba *phba)
4611 {
4612         /* If resets are disabled then set error state and return. */
4613         if (!phba->cfg_enable_hba_reset) {
4614                 phba->link_state = LPFC_HBA_ERROR;
4615                 return;
4616         }
4617         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4618         lpfc_offline(phba);
4619         lpfc_sli_brdrestart(phba);
4620         lpfc_online(phba);
4621         lpfc_unblock_mgmt_io(phba);
4622 }
4623 
4624 /**
4625  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4626  * @phba: pointer to lpfc hba data structure.
4627  *
4628  * This function enables the PCI SR-IOV virtual functions to a physical
4629  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4630  * enable the number of virtual functions to the physical function. As
4631  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4632  * API call does not considered as an error condition for most of the device.
4633  **/
4634 uint16_t
4635 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4636 {
4637         struct pci_dev *pdev = phba->pcidev;
4638         uint16_t nr_virtfn;
4639         int pos;
4640 
4641         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4642         if (pos == 0)
4643                 return 0;
4644 
4645         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4646         return nr_virtfn;
4647 }
4648 
4649 /**
4650  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4651  * @phba: pointer to lpfc hba data structure.
4652  * @nr_vfn: number of virtual functions to be enabled.
4653  *
4654  * This function enables the PCI SR-IOV virtual functions to a physical
4655  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4656  * enable the number of virtual functions to the physical function. As
4657  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4658  * API call does not considered as an error condition for most of the device.
4659  **/
4660 int
4661 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4662 {
4663         struct pci_dev *pdev = phba->pcidev;
4664         uint16_t max_nr_vfn;
4665         int rc;
4666 
4667         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4668         if (nr_vfn > max_nr_vfn) {
4669                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4670                                 "3057 Requested vfs (%d) greater than "
4671                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4672                 return -EINVAL;
4673         }
4674 
4675         rc = pci_enable_sriov(pdev, nr_vfn);
4676         if (rc) {
4677                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4678                                 "2806 Failed to enable sriov on this device "
4679                                 "with vfn number nr_vf:%d, rc:%d\n",
4680                                 nr_vfn, rc);
4681         } else
4682                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4683                                 "2807 Successful enable sriov on this device "
4684                                 "with vfn number nr_vf:%d\n", nr_vfn);
4685         return rc;
4686 }
4687 
4688 /**
4689  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4690  * @phba: pointer to lpfc hba data structure.
4691  *
4692  * This routine is invoked to set up the driver internal resources specific to
4693  * support the SLI-3 HBA device it attached to.
4694  *
4695  * Return codes
4696  *      0 - successful
4697  *      other values - error
4698  **/
4699 static int
4700 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4701 {
4702         struct lpfc_sli *psli;
4703         int rc;
4704 
4705         /*
4706          * Initialize timers used by driver
4707          */
4708 
4709         /* Heartbeat timer */
4710         init_timer(&phba->hb_tmofunc);
4711         phba->hb_tmofunc.function = lpfc_hb_timeout;
4712         phba->hb_tmofunc.data = (unsigned long)phba;
4713 
4714         psli = &phba->sli;
4715         /* MBOX heartbeat timer */
4716         init_timer(&psli->mbox_tmo);
4717         psli->mbox_tmo.function = lpfc_mbox_timeout;
4718         psli->mbox_tmo.data = (unsigned long) phba;
4719         /* FCP polling mode timer */
4720         init_timer(&phba->fcp_poll_timer);
4721         phba->fcp_poll_timer.function = lpfc_poll_timeout;
4722         phba->fcp_poll_timer.data = (unsigned long) phba;
4723         /* Fabric block timer */
4724         init_timer(&phba->fabric_block_timer);
4725         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4726         phba->fabric_block_timer.data = (unsigned long) phba;
4727         /* EA polling mode timer */
4728         init_timer(&phba->eratt_poll);
4729         phba->eratt_poll.function = lpfc_poll_eratt;
4730         phba->eratt_poll.data = (unsigned long) phba;
4731 
4732         /* Host attention work mask setup */
4733         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4734         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4735 
4736         /* Get all the module params for configuring this host */
4737         lpfc_get_cfgparam(phba);
4738         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4739                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4740                 /* check for menlo minimum sg count */
4741                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4742                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4743         }
4744 
4745         if (!phba->sli.ring)
4746                 phba->sli.ring = (struct lpfc_sli_ring *)
4747                         kzalloc(LPFC_SLI3_MAX_RING *
4748                         sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4749         if (!phba->sli.ring)
4750                 return -ENOMEM;
4751 
4752         /*
4753          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4754          * used to create the sg_dma_buf_pool must be dynamically calculated.
4755          */
4756 
4757         /* Initialize the host templates the configured values. */
4758         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4759         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4760 
4761         /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4762         if (phba->cfg_enable_bg) {
4763                 /*
4764                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4765                  * the FCP rsp, and a BDE for each. Sice we have no control
4766                  * over how many protection data segments the SCSI Layer
4767                  * will hand us (ie: there could be one for every block
4768                  * in the IO), we just allocate enough BDEs to accomidate
4769                  * our max amount and we need to limit lpfc_sg_seg_cnt to
4770                  * minimize the risk of running out.
4771                  */
4772                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4773                         sizeof(struct fcp_rsp) +
4774                         (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4775 
4776                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4777                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4778 
4779                 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4780                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4781         } else {
4782                 /*
4783                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
4784                  * the FCP rsp, a BDE for each, and a BDE for up to
4785                  * cfg_sg_seg_cnt data segments.
4786                  */
4787                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4788                         sizeof(struct fcp_rsp) +
4789                         ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4790 
4791                 /* Total BDEs in BPL for scsi_sg_list */
4792                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4793         }
4794 
4795         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4796                         "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4797                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4798                         phba->cfg_total_seg_cnt);
4799 
4800         phba->max_vpi = LPFC_MAX_VPI;
4801         /* This will be set to correct value after config_port mbox */
4802         phba->max_vports = 0;
4803 
4804         /*
4805          * Initialize the SLI Layer to run with lpfc HBAs.
4806          */
4807         lpfc_sli_setup(phba);
4808         lpfc_sli_queue_setup(phba);
4809 
4810         /* Allocate device driver memory */
4811         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4812                 return -ENOMEM;
4813 
4814         /*
4815          * Enable sr-iov virtual functions if supported and configured
4816          * through the module parameter.
4817          */
4818         if (phba->cfg_sriov_nr_virtfn > 0) {
4819                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4820                                                  phba->cfg_sriov_nr_virtfn);
4821                 if (rc) {
4822                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4823                                         "2808 Requested number of SR-IOV "
4824                                         "virtual functions (%d) is not "
4825                                         "supported\n",
4826                                         phba->cfg_sriov_nr_virtfn);
4827                         phba->cfg_sriov_nr_virtfn = 0;
4828                 }
4829         }
4830 
4831         return 0;
4832 }
4833 
4834 /**
4835  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4836  * @phba: pointer to lpfc hba data structure.
4837  *
4838  * This routine is invoked to unset the driver internal resources set up
4839  * specific for supporting the SLI-3 HBA device it attached to.
4840  **/
4841 static void
4842 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4843 {
4844         /* Free device driver memory allocated */
4845         lpfc_mem_free_all(phba);
4846 
4847         return;
4848 }
4849 
4850 /**
4851  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4852  * @phba: pointer to lpfc hba data structure.
4853  *
4854  * This routine is invoked to set up the driver internal resources specific to
4855  * support the SLI-4 HBA device it attached to.
4856  *
4857  * Return codes
4858  *      0 - successful
4859  *      other values - error
4860  **/
4861 static int
4862 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4863 {
4864         struct lpfc_vector_map_info *cpup;
4865         struct lpfc_sli *psli;
4866         LPFC_MBOXQ_t *mboxq;
4867         int rc, i, hbq_count, max_buf_size;
4868         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4869         struct lpfc_mqe *mqe;
4870         int longs;
4871         int fof_vectors = 0;
4872 
4873         /* Get all the module params for configuring this host */
4874         lpfc_get_cfgparam(phba);
4875 
4876         /* Before proceed, wait for POST done and device ready */
4877         rc = lpfc_sli4_post_status_check(phba);
4878         if (rc)
4879                 return -ENODEV;
4880 
4881         /*
4882          * Initialize timers used by driver
4883          */
4884 
4885         /* Heartbeat timer */
4886         init_timer(&phba->hb_tmofunc);
4887         phba->hb_tmofunc.function = lpfc_hb_timeout;
4888         phba->hb_tmofunc.data = (unsigned long)phba;
4889         init_timer(&phba->rrq_tmr);
4890         phba->rrq_tmr.function = lpfc_rrq_timeout;
4891         phba->rrq_tmr.data = (unsigned long)phba;
4892 
4893         psli = &phba->sli;
4894         /* MBOX heartbeat timer */
4895         init_timer(&psli->mbox_tmo);
4896         psli->mbox_tmo.function = lpfc_mbox_timeout;
4897         psli->mbox_tmo.data = (unsigned long) phba;
4898         /* Fabric block timer */
4899         init_timer(&phba->fabric_block_timer);
4900         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4901         phba->fabric_block_timer.data = (unsigned long) phba;
4902         /* EA polling mode timer */
4903         init_timer(&phba->eratt_poll);
4904         phba->eratt_poll.function = lpfc_poll_eratt;
4905         phba->eratt_poll.data = (unsigned long) phba;
4906         /* FCF rediscover timer */
4907         init_timer(&phba->fcf.redisc_wait);
4908         phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4909         phba->fcf.redisc_wait.data = (unsigned long)phba;
4910 
4911         /*
4912          * Control structure for handling external multi-buffer mailbox
4913          * command pass-through.
4914          */
4915         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4916                 sizeof(struct lpfc_mbox_ext_buf_ctx));
4917         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4918 
4919         phba->max_vpi = LPFC_MAX_VPI;
4920 
4921         /* This will be set to correct value after the read_config mbox */
4922         phba->max_vports = 0;
4923 
4924         /* Program the default value of vlan_id and fc_map */
4925         phba->valid_vlan = 0;
4926         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4927         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4928         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4929 
4930         /*
4931          * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4932          * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4933          */
4934         if (!phba->sli.ring)
4935                 phba->sli.ring = kzalloc(
4936                         (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4937                         sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4938         if (!phba->sli.ring)
4939                 return -ENOMEM;
4940 
4941         /*
4942          * It doesn't matter what family our adapter is in, we are
4943          * limited to 2 Pages, 512 SGEs, for our SGL.
4944          * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4945          */
4946         max_buf_size = (2 * SLI4_PAGE_SIZE);
4947         if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4948                 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4949 
4950         /*
4951          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4952          * used to create the sg_dma_buf_pool must be dynamically calculated.
4953          */
4954 
4955         if (phba->cfg_enable_bg) {
4956                 /*
4957                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4958                  * the FCP rsp, and a SGE for each. Sice we have no control
4959                  * over how many protection data segments the SCSI Layer
4960                  * will hand us (ie: there could be one for every block
4961                  * in the IO), we just allocate enough SGEs to accomidate
4962                  * our max amount and we need to limit lpfc_sg_seg_cnt to
4963                  * minimize the risk of running out.
4964                  */
4965                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4966                         sizeof(struct fcp_rsp) + max_buf_size;
4967 
4968                 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
4969                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4970 
4971                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4972                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4973         } else {
4974                 /*
4975                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
4976                  * the FCP rsp, a SGE for each, and a SGE for up to
4977                  * cfg_sg_seg_cnt data segments.
4978                  */
4979                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4980                         sizeof(struct fcp_rsp) +
4981                         ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4982 
4983                 /* Total SGEs for scsi_sg_list */
4984                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4985                 /*
4986                  * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
4987                  * to post 1 page for the SGL.
4988                  */
4989         }
4990 
4991         /* Initialize the host templates with the updated values. */
4992         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4993         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4994 
4995         if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
4996                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4997         else
4998                 phba->cfg_sg_dma_buf_size =
4999                         SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5000 
5001         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5002                         "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5003                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5004                         phba->cfg_total_seg_cnt);
5005 
5006         /* Initialize buffer queue management fields */
5007         hbq_count = lpfc_sli_hbq_count();
5008         for (i = 0; i < hbq_count; ++i)
5009                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5010         INIT_LIST_HEAD(&phba->rb_pend_list);
5011         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5012         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
5013 
5014         /*
5015          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5016          */
5017         /* Initialize the Abort scsi buffer list used by driver */
5018         spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5019         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5020         /* This abort list used by worker thread */
5021         spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
5022 
5023         /*
5024          * Initialize driver internal slow-path work queues
5025          */
5026 
5027         /* Driver internel slow-path CQ Event pool */
5028         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5029         /* Response IOCB work queue list */
5030         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5031         /* Asynchronous event CQ Event work queue list */
5032         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5033         /* Fast-path XRI aborted CQ Event work queue list */
5034         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5035         /* Slow-path XRI aborted CQ Event work queue list */
5036         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5037         /* Receive queue CQ Event work queue list */
5038         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5039 
5040         /* Initialize extent block lists. */
5041         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5042         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5043         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5044         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5045 
5046         /* Initialize the driver internal SLI layer lists. */
5047         lpfc_sli_setup(phba);
5048         lpfc_sli_queue_setup(phba);
5049 
5050         /* Allocate device driver memory */
5051         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5052         if (rc)
5053                 return -ENOMEM;
5054 
5055         /* IF Type 2 ports get initialized now. */
5056         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5057             LPFC_SLI_INTF_IF_TYPE_2) {
5058                 rc = lpfc_pci_function_reset(phba);
5059                 if (unlikely(rc))
5060                         return -ENODEV;
5061         }
5062 
5063         /* Create the bootstrap mailbox command */
5064         rc = lpfc_create_bootstrap_mbox(phba);
5065         if (unlikely(rc))
5066                 goto out_free_mem;
5067 
5068         /* Set up the host's endian order with the device. */
5069         rc = lpfc_setup_endian_order(phba);
5070         if (unlikely(rc))
5071                 goto out_free_bsmbx;
5072 
5073         /* Set up the hba's configuration parameters. */
5074         rc = lpfc_sli4_read_config(phba);
5075         if (unlikely(rc))
5076                 goto out_free_bsmbx;
5077         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5078         if (unlikely(rc))
5079                 goto out_free_bsmbx;
5080 
5081         /* IF Type 0 ports get initialized now. */
5082         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5083             LPFC_SLI_INTF_IF_TYPE_0) {
5084                 rc = lpfc_pci_function_reset(phba);
5085                 if (unlikely(rc))
5086                         goto out_free_bsmbx;
5087         }
5088 
5089         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5090                                                        GFP_KERNEL);
5091         if (!mboxq) {
5092                 rc = -ENOMEM;
5093                 goto out_free_bsmbx;
5094         }
5095 
5096         /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5097         lpfc_supported_pages(mboxq);
5098         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5099         if (!rc) {
5100                 mqe = &mboxq->u.mqe;
5101                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5102                        LPFC_MAX_SUPPORTED_PAGES);
5103                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5104                         switch (pn_page[i]) {
5105                         case LPFC_SLI4_PARAMETERS:
5106                                 phba->sli4_hba.pc_sli4_params.supported = 1;
5107                                 break;
5108                         default:
5109                                 break;
5110                         }
5111                 }
5112                 /* Read the port's SLI4 Parameters capabilities if supported. */
5113                 if (phba->sli4_hba.pc_sli4_params.supported)
5114                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
5115                 if (rc) {
5116                         mempool_free(mboxq, phba->mbox_mem_pool);
5117                         rc = -EIO;
5118                         goto out_free_bsmbx;
5119                 }
5120         }
5121         /*
5122          * Get sli4 parameters that override parameters from Port capabilities.
5123          * If this call fails, it isn't critical unless the SLI4 parameters come
5124          * back in conflict.
5125          */
5126         rc = lpfc_get_sli4_parameters(phba, mboxq);
5127         if (rc) {
5128                 if (phba->sli4_hba.extents_in_use &&
5129                     phba->sli4_hba.rpi_hdrs_in_use) {
5130                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5131                                 "2999 Unsupported SLI4 Parameters "
5132                                 "Extents and RPI headers enabled.\n");
5133                         goto out_free_bsmbx;
5134                 }
5135         }
5136         mempool_free(mboxq, phba->mbox_mem_pool);
5137 
5138         /* Verify OAS is supported */
5139         lpfc_sli4_oas_verify(phba);
5140         if (phba->cfg_fof)
5141                 fof_vectors = 1;
5142 
5143         /* Verify all the SLI4 queues */
5144         rc = lpfc_sli4_queue_verify(phba);
5145         if (rc)
5146                 goto out_free_bsmbx;
5147 
5148         /* Create driver internal CQE event pool */
5149         rc = lpfc_sli4_cq_event_pool_create(phba);
5150         if (rc)
5151                 goto out_free_bsmbx;
5152 
5153         /* Initialize sgl lists per host */
5154         lpfc_init_sgl_list(phba);
5155 
5156         /* Allocate and initialize active sgl array */
5157         rc = lpfc_init_active_sgl_array(phba);
5158         if (rc) {
5159                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5160                                 "1430 Failed to initialize sgl list.\n");
5161                 goto out_destroy_cq_event_pool;
5162         }
5163         rc = lpfc_sli4_init_rpi_hdrs(phba);
5164         if (rc) {
5165                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5166                                 "1432 Failed to initialize rpi headers.\n");
5167                 goto out_free_active_sgl;
5168         }
5169 
5170         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5171         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5172         phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5173                                          GFP_KERNEL);
5174         if (!phba->fcf.fcf_rr_bmask) {
5175                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5176                                 "2759 Failed allocate memory for FCF round "
5177                                 "robin failover bmask\n");
5178                 rc = -ENOMEM;
5179                 goto out_remove_rpi_hdrs;
5180         }
5181 
5182         phba->sli4_hba.fcp_eq_hdl =
5183                         kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5184                             (fof_vectors + phba->cfg_fcp_io_channel)),
5185                             GFP_KERNEL);
5186         if (!phba->sli4_hba.fcp_eq_hdl) {
5187                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5188                                 "2572 Failed allocate memory for "
5189                                 "fast-path per-EQ handle array\n");
5190                 rc = -ENOMEM;
5191                 goto out_free_fcf_rr_bmask;
5192         }
5193 
5194         phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5195                                   (fof_vectors +
5196                                    phba->cfg_fcp_io_channel)), GFP_KERNEL);
5197         if (!phba->sli4_hba.msix_entries) {
5198                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5199                                 "2573 Failed allocate memory for msi-x "
5200                                 "interrupt vector entries\n");
5201                 rc = -ENOMEM;
5202                 goto out_free_fcp_eq_hdl;
5203         }
5204 
5205         phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5206                                          phba->sli4_hba.num_present_cpu),
5207                                          GFP_KERNEL);
5208         if (!phba->sli4_hba.cpu_map) {
5209                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5210                                 "3327 Failed allocate memory for msi-x "
5211                                 "interrupt vector mapping\n");
5212                 rc = -ENOMEM;
5213                 goto out_free_msix;
5214         }
5215         if (lpfc_used_cpu == NULL) {
5216                 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5217                                          GFP_KERNEL);
5218                 if (!lpfc_used_cpu) {
5219                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5220                                         "3335 Failed allocate memory for msi-x "
5221                                         "interrupt vector mapping\n");
5222                         kfree(phba->sli4_hba.cpu_map);
5223                         rc = -ENOMEM;
5224                         goto out_free_msix;
5225                 }
5226                 for (i = 0; i < lpfc_present_cpu; i++)
5227                         lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5228         }
5229 
5230         /* Initialize io channels for round robin */
5231         cpup = phba->sli4_hba.cpu_map;
5232         rc = 0;
5233         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5234                 cpup->channel_id = rc;
5235                 rc++;
5236                 if (rc >= phba->cfg_fcp_io_channel)
5237                         rc = 0;
5238         }
5239 
5240         /*
5241          * Enable sr-iov virtual functions if supported and configured
5242          * through the module parameter.
5243          */
5244         if (phba->cfg_sriov_nr_virtfn > 0) {
5245                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5246                                                  phba->cfg_sriov_nr_virtfn);
5247                 if (rc) {
5248                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5249                                         "3020 Requested number of SR-IOV "
5250                                         "virtual functions (%d) is not "
5251                                         "supported\n",
5252                                         phba->cfg_sriov_nr_virtfn);
5253                         phba->cfg_sriov_nr_virtfn = 0;
5254                 }
5255         }
5256 
5257         return 0;
5258 
5259 out_free_msix:
5260         kfree(phba->sli4_hba.msix_entries);
5261 out_free_fcp_eq_hdl:
5262         kfree(phba->sli4_hba.fcp_eq_hdl);
5263 out_free_fcf_rr_bmask:
5264         kfree(phba->fcf.fcf_rr_bmask);
5265 out_remove_rpi_hdrs:
5266         lpfc_sli4_remove_rpi_hdrs(phba);
5267 out_free_active_sgl:
5268         lpfc_free_active_sgl(phba);
5269 out_destroy_cq_event_pool:
5270         lpfc_sli4_cq_event_pool_destroy(phba);
5271 out_free_bsmbx:
5272         lpfc_destroy_bootstrap_mbox(phba);
5273 out_free_mem:
5274         lpfc_mem_free(phba);
5275         return rc;
5276 }
5277 
5278 /**
5279  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5280  * @phba: pointer to lpfc hba data structure.
5281  *
5282  * This routine is invoked to unset the driver internal resources set up
5283  * specific for supporting the SLI-4 HBA device it attached to.
5284  **/
5285 static void
5286 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5287 {
5288         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5289 
5290         /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5291         kfree(phba->sli4_hba.cpu_map);
5292         phba->sli4_hba.num_present_cpu = 0;
5293         phba->sli4_hba.num_online_cpu = 0;
5294         phba->sli4_hba.curr_disp_cpu = 0;
5295 
5296         /* Free memory allocated for msi-x interrupt vector entries */
5297         kfree(phba->sli4_hba.msix_entries);
5298 
5299         /* Free memory allocated for fast-path work queue handles */
5300         kfree(phba->sli4_hba.fcp_eq_hdl);
5301 
5302         /* Free the allocated rpi headers. */
5303         lpfc_sli4_remove_rpi_hdrs(phba);
5304         lpfc_sli4_remove_rpis(phba);
5305 
5306         /* Free eligible FCF index bmask */
5307         kfree(phba->fcf.fcf_rr_bmask);
5308 
5309         /* Free the ELS sgl list */
5310         lpfc_free_active_sgl(phba);
5311         lpfc_free_els_sgl_list(phba);
5312 
5313         /* Free the completion queue EQ event pool */
5314         lpfc_sli4_cq_event_release_all(phba);
5315         lpfc_sli4_cq_event_pool_destroy(phba);
5316 
5317         /* Release resource identifiers. */
5318         lpfc_sli4_dealloc_resource_identifiers(phba);
5319 
5320         /* Free the bsmbx region. */
5321         lpfc_destroy_bootstrap_mbox(phba);
5322 
5323         /* Free the SLI Layer memory with SLI4 HBAs */
5324         lpfc_mem_free_all(phba);
5325 
5326         /* Free the current connect table */
5327         list_for_each_entry_safe(conn_entry, next_conn_entry,
5328                 &phba->fcf_conn_rec_list, list) {
5329                 list_del_init(&conn_entry->list);
5330                 kfree(conn_entry);
5331         }
5332 
5333         return;
5334 }
5335 
5336 /**
5337  * lpfc_init_api_table_setup - Set up init api function jump table
5338  * @phba: The hba struct for which this call is being executed.
5339  * @dev_grp: The HBA PCI-Device group number.
5340  *
5341  * This routine sets up the device INIT interface API function jump table
5342  * in @phba struct.
5343  *
5344  * Returns: 0 - success, -ENODEV - failure.
5345  **/
5346 int
5347 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5348 {
5349         phba->lpfc_hba_init_link = lpfc_hba_init_link;
5350         phba->lpfc_hba_down_link = lpfc_hba_down_link;
5351         phba->lpfc_selective_reset = lpfc_selective_reset;
5352         switch (dev_grp) {
5353         case LPFC_PCI_DEV_LP:
5354                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5355                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5356                 phba->lpfc_stop_port = lpfc_stop_port_s3;
5357                 break;
5358         case LPFC_PCI_DEV_OC:
5359                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5360                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5361                 phba->lpfc_stop_port = lpfc_stop_port_s4;
5362                 break;
5363         default:
5364                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5365                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
5366                                 dev_grp);
5367                 return -ENODEV;
5368                 break;
5369         }
5370         return 0;
5371 }
5372 
5373 /**
5374  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5375  * @phba: pointer to lpfc hba data structure.
5376  *
5377  * This routine is invoked to set up the driver internal resources before the
5378  * device specific resource setup to support the HBA device it attached to.
5379  *
5380  * Return codes
5381  *      0 - successful
5382  *      other values - error
5383  **/
5384 static int
5385 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5386 {
5387         /*
5388          * Driver resources common to all SLI revisions
5389          */
5390         atomic_set(&phba->fast_event_count, 0);
5391         spin_lock_init(&phba->hbalock);
5392 
5393         /* Initialize ndlp management spinlock */
5394         spin_lock_init(&phba->ndlp_lock);
5395 
5396         INIT_LIST_HEAD(&phba->port_list);
5397         INIT_LIST_HEAD(&phba->work_list);
5398         init_waitqueue_head(&phba->wait_4_mlo_m_q);
5399 
5400         /* Initialize the wait queue head for the kernel thread */
5401         init_waitqueue_head(&phba->work_waitq);
5402 
5403         /* Initialize the scsi buffer list used by driver for scsi IO */
5404         spin_lock_init(&phba->scsi_buf_list_get_lock);
5405         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5406         spin_lock_init(&phba->scsi_buf_list_put_lock);
5407         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5408 
5409         /* Initialize the fabric iocb list */
5410         INIT_LIST_HEAD(&phba->fabric_iocb_list);
5411 
5412         /* Initialize list to save ELS buffers */
5413         INIT_LIST_HEAD(&phba->elsbuf);
5414 
5415         /* Initialize FCF connection rec list */
5416         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5417 
5418         /* Initialize OAS configuration list */
5419         spin_lock_init(&phba->devicelock);
5420         INIT_LIST_HEAD(&phba->luns);
5421 
5422         return 0;
5423 }
5424 
5425 /**
5426  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5427  * @phba: pointer to lpfc hba data structure.
5428  *
5429  * This routine is invoked to set up the driver internal resources after the
5430  * device specific resource setup to support the HBA device it attached to.
5431  *
5432  * Return codes
5433  *      0 - successful
5434  *      other values - error
5435  **/
5436 static int
5437 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5438 {
5439         int error;
5440 
5441         /* Startup the kernel thread for this host adapter. */
5442         phba->worker_thread = kthread_run(lpfc_do_work, phba,
5443                                           "lpfc_worker_%d", phba->brd_no);
5444         if (IS_ERR(phba->worker_thread)) {
5445                 error = PTR_ERR(phba->worker_thread);
5446                 return error;
5447         }
5448 
5449         return 0;
5450 }
5451 
5452 /**
5453  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5454  * @phba: pointer to lpfc hba data structure.
5455  *
5456  * This routine is invoked to unset the driver internal resources set up after
5457  * the device specific resource setup for supporting the HBA device it
5458  * attached to.
5459  **/
5460 static void
5461 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5462 {
5463         /* Stop kernel worker thread */
5464         kthread_stop(phba->worker_thread);
5465 }
5466 
5467 /**
5468  * lpfc_free_iocb_list - Free iocb list.
5469  * @phba: pointer to lpfc hba data structure.
5470  *
5471  * This routine is invoked to free the driver's IOCB list and memory.
5472  **/
5473 static void
5474 lpfc_free_iocb_list(struct lpfc_hba *phba)
5475 {
5476         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5477 
5478         spin_lock_irq(&phba->hbalock);
5479         list_for_each_entry_safe(iocbq_entry, iocbq_next,
5480                                  &phba->lpfc_iocb_list, list) {
5481                 list_del(&iocbq_entry->list);
5482                 kfree(iocbq_entry);
5483                 phba->total_iocbq_bufs--;
5484         }
5485         spin_unlock_irq(&phba->hbalock);
5486 
5487         return;
5488 }
5489 
5490 /**
5491  * lpfc_init_iocb_list - Allocate and initialize iocb list.
5492  * @phba: pointer to lpfc hba data structure.
5493  *
5494  * This routine is invoked to allocate and initizlize the driver's IOCB
5495  * list and set up the IOCB tag array accordingly.
5496  *
5497  * Return codes
5498  *      0 - successful
5499  *      other values - error
5500  **/
5501 static int
5502 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5503 {
5504         struct lpfc_iocbq *iocbq_entry = NULL;
5505         uint16_t iotag;
5506         int i;
5507 
5508         /* Initialize and populate the iocb list per host.  */
5509         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5510         for (i = 0; i < iocb_count; i++) {
5511                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5512                 if (iocbq_entry == NULL) {
5513                         printk(KERN_ERR "%s: only allocated %d iocbs of "
5514                                 "expected %d count. Unloading driver.\n",
5515                                 __func__, i, LPFC_IOCB_LIST_CNT);
5516                         goto out_free_iocbq;
5517                 }
5518 
5519                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5520                 if (iotag == 0) {
5521                         kfree(iocbq_entry);
5522                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
5523                                 "Unloading driver.\n", __func__);
5524                         goto out_free_iocbq;
5525                 }
5526                 iocbq_entry->sli4_lxritag = NO_XRI;
5527                 iocbq_entry->sli4_xritag = NO_XRI;
5528 
5529                 spin_lock_irq(&phba->hbalock);
5530                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5531                 phba->total_iocbq_bufs++;
5532                 spin_unlock_irq(&phba->hbalock);
5533         }
5534 
5535         return 0;
5536 
5537 out_free_iocbq:
5538         lpfc_free_iocb_list(phba);
5539 
5540         return -ENOMEM;
5541 }
5542 
5543 /**
5544  * lpfc_free_sgl_list - Free a given sgl list.
5545  * @phba: pointer to lpfc hba data structure.
5546  * @sglq_list: pointer to the head of sgl list.
5547  *
5548  * This routine is invoked to free a give sgl list and memory.
5549  **/
5550 void
5551 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5552 {
5553         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5554 
5555         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5556                 list_del(&sglq_entry->list);
5557                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5558                 kfree(sglq_entry);
5559         }
5560 }
5561 
5562 /**
5563  * lpfc_free_els_sgl_list - Free els sgl list.
5564  * @phba: pointer to lpfc hba data structure.
5565  *
5566  * This routine is invoked to free the driver's els sgl list and memory.
5567  **/
5568 static void
5569 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5570 {
5571         LIST_HEAD(sglq_list);
5572 
5573         /* Retrieve all els sgls from driver list */
5574         spin_lock_irq(&phba->hbalock);
5575         list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5576         spin_unlock_irq(&phba->hbalock);
5577 
5578         /* Now free the sgl list */
5579         lpfc_free_sgl_list(phba, &sglq_list);
5580 }
5581 
5582 /**
5583  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5584  * @phba: pointer to lpfc hba data structure.
5585  *
5586  * This routine is invoked to allocate the driver's active sgl memory.
5587  * This array will hold the sglq_entry's for active IOs.
5588  **/
5589 static int
5590 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5591 {
5592         int size;
5593         size = sizeof(struct lpfc_sglq *);
5594         size *= phba->sli4_hba.max_cfg_param.max_xri;
5595 
5596         phba->sli4_hba.lpfc_sglq_active_list =
5597                 kzalloc(size, GFP_KERNEL);
5598         if (!phba->sli4_hba.lpfc_sglq_active_list)
5599                 return -ENOMEM;
5600         return 0;
5601 }
5602 
5603 /**
5604  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5605  * @phba: pointer to lpfc hba data structure.
5606  *
5607  * This routine is invoked to walk through the array of active sglq entries
5608  * and free all of the resources.
5609  * This is just a place holder for now.
5610  **/
5611 static void
5612 lpfc_free_active_sgl(struct lpfc_hba *phba)
5613 {
5614         kfree(phba->sli4_hba.lpfc_sglq_active_list);
5615 }
5616 
5617 /**
5618  * lpfc_init_sgl_list - Allocate and initialize sgl list.
5619  * @phba: pointer to lpfc hba data structure.
5620  *
5621  * This routine is invoked to allocate and initizlize the driver's sgl
5622  * list and set up the sgl xritag tag array accordingly.
5623  *
5624  **/
5625 static void
5626 lpfc_init_sgl_list(struct lpfc_hba *phba)
5627 {
5628         /* Initialize and populate the sglq list per host/VF. */
5629         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5630         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5631 
5632         /* els xri-sgl book keeping */
5633         phba->sli4_hba.els_xri_cnt = 0;
5634 
5635         /* scsi xri-buffer book keeping */
5636         phba->sli4_hba.scsi_xri_cnt = 0;
5637 }
5638 
5639 /**
5640  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5641  * @phba: pointer to lpfc hba data structure.
5642  *
5643  * This routine is invoked to post rpi header templates to the
5644  * port for those SLI4 ports that do not support extents.  This routine
5645  * posts a PAGE_SIZE memory region to the port to hold up to
5646  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5647  * and should be called only when interrupts are disabled.
5648  *
5649  * Return codes
5650  *      0 - successful
5651  *      -ERROR - otherwise.
5652  **/
5653 int
5654 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5655 {
5656         int rc = 0;
5657         struct lpfc_rpi_hdr *rpi_hdr;
5658 
5659         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5660         if (!phba->sli4_hba.rpi_hdrs_in_use)
5661                 return rc;
5662         if (phba->sli4_hba.extents_in_use)
5663                 return -EIO;
5664 
5665         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5666         if (!rpi_hdr) {
5667                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5668                                 "0391 Error during rpi post operation\n");
5669                 lpfc_sli4_remove_rpis(phba);
5670                 rc = -ENODEV;
5671         }
5672 
5673         return rc;
5674 }
5675 
5676 /**
5677  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5678  * @phba: pointer to lpfc hba data structure.
5679  *
5680  * This routine is invoked to allocate a single 4KB memory region to
5681  * support rpis and stores them in the phba.  This single region
5682  * provides support for up to 64 rpis.  The region is used globally
5683  * by the device.
5684  *
5685  * Returns:
5686  *   A valid rpi hdr on success.
5687  *   A NULL pointer on any failure.
5688  **/
5689 struct lpfc_rpi_hdr *
5690 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5691 {
5692         uint16_t rpi_limit, curr_rpi_range;
5693         struct lpfc_dmabuf *dmabuf;
5694         struct lpfc_rpi_hdr *rpi_hdr;
5695         uint32_t rpi_count;
5696 
5697         /*
5698          * If the SLI4 port supports extents, posting the rpi header isn't
5699          * required.  Set the expected maximum count and let the actual value
5700          * get set when extents are fully allocated.
5701          */
5702         if (!phba->sli4_hba.rpi_hdrs_in_use)
5703                 return NULL;
5704         if (phba->sli4_hba.extents_in_use)
5705                 return NULL;
5706 
5707         /* The limit on the logical index is just the max_rpi count. */
5708         rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5709         phba->sli4_hba.max_cfg_param.max_rpi - 1;
5710 
5711         spin_lock_irq(&phba->hbalock);
5712         /*
5713          * Establish the starting RPI in this header block.  The starting
5714          * rpi is normalized to a zero base because the physical rpi is
5715          * port based.
5716          */
5717         curr_rpi_range = phba->sli4_hba.next_rpi;
5718         spin_unlock_irq(&phba->hbalock);
5719 
5720         /*
5721          * The port has a limited number of rpis. The increment here
5722          * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5723          * and to allow the full max_rpi range per port.
5724          */
5725         if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5726                 rpi_count = rpi_limit - curr_rpi_range;
5727         else
5728                 rpi_count = LPFC_RPI_HDR_COUNT;
5729 
5730         if (!rpi_count)
5731                 return NULL;
5732         /*
5733          * First allocate the protocol header region for the port.  The
5734          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5735          */
5736         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5737         if (!dmabuf)
5738                 return NULL;
5739 
5740         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5741                                           LPFC_HDR_TEMPLATE_SIZE,
5742                                           &dmabuf->phys,
5743                                           GFP_KERNEL);
5744         if (!dmabuf->virt) {
5745                 rpi_hdr = NULL;
5746                 goto err_free_dmabuf;
5747         }
5748 
5749         memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5750         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5751                 rpi_hdr = NULL;
5752                 goto err_free_coherent;
5753         }
5754 
5755         /* Save the rpi header data for cleanup later. */
5756         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5757         if (!rpi_hdr)
5758                 goto err_free_coherent;
5759 
5760         rpi_hdr->dmabuf = dmabuf;
5761         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5762         rpi_hdr->page_count = 1;
5763         spin_lock_irq(&phba->hbalock);
5764 
5765         /* The rpi_hdr stores the logical index only. */
5766         rpi_hdr->start_rpi = curr_rpi_range;
5767         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5768 
5769         /*
5770          * The next_rpi stores the next logical module-64 rpi value used
5771          * to post physical rpis in subsequent rpi postings.
5772          */
5773         phba->sli4_hba.next_rpi += rpi_count;
5774         spin_unlock_irq(&phba->hbalock);
5775         return rpi_hdr;
5776 
5777  err_free_coherent:
5778         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5779                           dmabuf->virt, dmabuf->phys);
5780  err_free_dmabuf:
5781         kfree(dmabuf);
5782         return NULL;
5783 }
5784 
5785 /**
5786  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5787  * @phba: pointer to lpfc hba data structure.
5788  *
5789  * This routine is invoked to remove all memory resources allocated
5790  * to support rpis for SLI4 ports not supporting extents. This routine
5791  * presumes the caller has released all rpis consumed by fabric or port
5792  * logins and is prepared to have the header pages removed.
5793  **/
5794 void
5795 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5796 {
5797         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5798 
5799         if (!phba->sli4_hba.rpi_hdrs_in_use)
5800                 goto exit;
5801 
5802         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5803                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5804                 list_del(&rpi_hdr->list);
5805                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5806                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5807                 kfree(rpi_hdr->dmabuf);
5808                 kfree(rpi_hdr);
5809         }
5810  exit:
5811         /* There are no rpis available to the port now. */
5812         phba->sli4_hba.next_rpi = 0;
5813 }
5814 
5815 /**
5816  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5817  * @pdev: pointer to pci device data structure.
5818  *
5819  * This routine is invoked to allocate the driver hba data structure for an
5820  * HBA device. If the allocation is successful, the phba reference to the
5821  * PCI device data structure is set.
5822  *
5823  * Return codes
5824  *      pointer to @phba - successful
5825  *      NULL - error
5826  **/
5827 static struct lpfc_hba *
5828 lpfc_hba_alloc(struct pci_dev *pdev)
5829 {
5830         struct lpfc_hba *phba;
5831 
5832         /* Allocate memory for HBA structure */
5833         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5834         if (!phba) {
5835                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5836                 return NULL;
5837         }
5838 
5839         /* Set reference to PCI device in HBA structure */
5840         phba->pcidev = pdev;
5841 
5842         /* Assign an unused board number */
5843         phba->brd_no = lpfc_get_instance();
5844         if (phba->brd_no < 0) {
5845                 kfree(phba);
5846                 return NULL;
5847         }
5848 
5849         spin_lock_init(&phba->ct_ev_lock);
5850         INIT_LIST_HEAD(&phba->ct_ev_waiters);
5851 
5852         return phba;
5853 }
5854 
5855 /**
5856  * lpfc_hba_free - Free driver hba data structure with a device.
5857  * @phba: pointer to lpfc hba data structure.
5858  *
5859  * This routine is invoked to free the driver hba data structure with an
5860  * HBA device.
5861  **/
5862 static void
5863 lpfc_hba_free(struct lpfc_hba *phba)
5864 {
5865         /* Release the driver assigned board number */
5866         idr_remove(&lpfc_hba_index, phba->brd_no);
5867 
5868         /* Free memory allocated with sli rings */
5869         kfree(phba->sli.ring);
5870         phba->sli.ring = NULL;
5871 
5872         kfree(phba);
5873         return;
5874 }
5875 
5876 /**
5877  * lpfc_create_shost - Create hba physical port with associated scsi host.
5878  * @phba: pointer to lpfc hba data structure.
5879  *
5880  * This routine is invoked to create HBA physical port and associate a SCSI
5881  * host with it.
5882  *
5883  * Return codes
5884  *      0 - successful
5885  *      other values - error
5886  **/
5887 static int
5888 lpfc_create_shost(struct lpfc_hba *phba)
5889 {
5890         struct lpfc_vport *vport;
5891         struct Scsi_Host  *shost;
5892 
5893         /* Initialize HBA FC structure */
5894         phba->fc_edtov = FF_DEF_EDTOV;
5895         phba->fc_ratov = FF_DEF_RATOV;
5896         phba->fc_altov = FF_DEF_ALTOV;
5897         phba->fc_arbtov = FF_DEF_ARBTOV;
5898 
5899         atomic_set(&phba->sdev_cnt, 0);
5900         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5901         if (!vport)
5902                 return -ENODEV;
5903 
5904         shost = lpfc_shost_from_vport(vport);
5905         phba->pport = vport;
5906         lpfc_debugfs_initialize(vport);
5907         /* Put reference to SCSI host to driver's device private data */
5908         pci_set_drvdata(phba->pcidev, shost);
5909 
5910         return 0;
5911 }
5912 
5913 /**
5914  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5915  * @phba: pointer to lpfc hba data structure.
5916  *
5917  * This routine is invoked to destroy HBA physical port and the associated
5918  * SCSI host.
5919  **/
5920 static void
5921 lpfc_destroy_shost(struct lpfc_hba *phba)
5922 {
5923         struct lpfc_vport *vport = phba->pport;
5924 
5925         /* Destroy physical port that associated with the SCSI host */
5926         destroy_port(vport);
5927 
5928         return;
5929 }
5930 
5931 /**
5932  * lpfc_setup_bg - Setup Block guard structures and debug areas.
5933  * @phba: pointer to lpfc hba data structure.
5934  * @shost: the shost to be used to detect Block guard settings.
5935  *
5936  * This routine sets up the local Block guard protocol settings for @shost.
5937  * This routine also allocates memory for debugging bg buffers.
5938  **/
5939 static void
5940 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5941 {
5942         uint32_t old_mask;
5943         uint32_t old_guard;
5944 
5945         int pagecnt = 10;
5946         if (lpfc_prot_mask && lpfc_prot_guard) {
5947                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5948                                 "1478 Registering BlockGuard with the "
5949                                 "SCSI layer\n");
5950 
5951                 old_mask = lpfc_prot_mask;
5952                 old_guard = lpfc_prot_guard;
5953 
5954                 /* Only allow supported values */
5955                 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
5956                         SHOST_DIX_TYPE0_PROTECTION |
5957                         SHOST_DIX_TYPE1_PROTECTION);
5958                 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
5959 
5960                 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
5961                 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
5962                         lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
5963 
5964                 if (lpfc_prot_mask && lpfc_prot_guard) {
5965                         if ((old_mask != lpfc_prot_mask) ||
5966                                 (old_guard != lpfc_prot_guard))
5967                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5968                                         "1475 Registering BlockGuard with the "
5969                                         "SCSI layer: mask %d  guard %d\n",
5970                                         lpfc_prot_mask, lpfc_prot_guard);
5971 
5972                         scsi_host_set_prot(shost, lpfc_prot_mask);
5973                         scsi_host_set_guard(shost, lpfc_prot_guard);
5974                 } else
5975                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5976                                 "1479 Not Registering BlockGuard with the SCSI "
5977                                 "layer, Bad protection parameters: %d %d\n",
5978                                 old_mask, old_guard);
5979         }
5980 
5981         if (!_dump_buf_data) {
5982                 while (pagecnt) {
5983                         spin_lock_init(&_dump_buf_lock);
5984                         _dump_buf_data =
5985                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5986                         if (_dump_buf_data) {
5987                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5988                                         "9043 BLKGRD: allocated %d pages for "
5989                                        "_dump_buf_data at 0x%p\n",
5990                                        (1 << pagecnt), _dump_buf_data);
5991                                 _dump_buf_data_order = pagecnt;
5992                                 memset(_dump_buf_data, 0,
5993                                        ((1 << PAGE_SHIFT) << pagecnt));
5994                                 break;
5995                         } else
5996                                 --pagecnt;
5997                 }
5998                 if (!_dump_buf_data_order)
5999                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6000                                 "9044 BLKGRD: ERROR unable to allocate "
6001                                "memory for hexdump\n");
6002         } else
6003                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6004                         "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6005                        "\n", _dump_buf_data);
6006         if (!_dump_buf_dif) {
6007                 while (pagecnt) {
6008                         _dump_buf_dif =
6009                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6010                         if (_dump_buf_dif) {
6011                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6012                                         "9046 BLKGRD: allocated %d pages for "
6013                                        "_dump_buf_dif at 0x%p\n",
6014                                        (1 << pagecnt), _dump_buf_dif);
6015                                 _dump_buf_dif_order = pagecnt;
6016                                 memset(_dump_buf_dif, 0,
6017                                        ((1 << PAGE_SHIFT) << pagecnt));
6018                                 break;
6019                         } else
6020                                 --pagecnt;
6021                 }
6022                 if (!_dump_buf_dif_order)
6023                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6024                         "9047 BLKGRD: ERROR unable to allocate "
6025                                "memory for hexdump\n");
6026         } else
6027                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6028                         "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6029                        _dump_buf_dif);
6030 }
6031 
6032 /**
6033  * lpfc_post_init_setup - Perform necessary device post initialization setup.
6034  * @phba: pointer to lpfc hba data structure.
6035  *
6036  * This routine is invoked to perform all the necessary post initialization
6037  * setup for the device.
6038  **/
6039 static void
6040 lpfc_post_init_setup(struct lpfc_hba *phba)
6041 {
6042         struct Scsi_Host  *shost;
6043         struct lpfc_adapter_event_header adapter_event;
6044 
6045         /* Get the default values for Model Name and Description */
6046         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6047 
6048         /*
6049          * hba setup may have changed the hba_queue_depth so we need to
6050          * adjust the value of can_queue.
6051          */
6052         shost = pci_get_drvdata(phba->pcidev);
6053         shost->can_queue = phba->cfg_hba_queue_depth - 10;
6054         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6055                 lpfc_setup_bg(phba, shost);
6056 
6057         lpfc_host_attrib_init(shost);
6058 
6059         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6060                 spin_lock_irq(shost->host_lock);
6061                 lpfc_poll_start_timer(phba);
6062                 spin_unlock_irq(shost->host_lock);
6063         }
6064 
6065         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6066                         "0428 Perform SCSI scan\n");
6067         /* Send board arrival event to upper layer */
6068         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6069         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6070         fc_host_post_vendor_event(shost, fc_get_event_number(),
6071                                   sizeof(adapter_event),
6072                                   (char *) &adapter_event,
6073                                   LPFC_NL_VENDOR_ID);
6074         return;
6075 }
6076 
6077 /**
6078  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6079  * @phba: pointer to lpfc hba data structure.
6080  *
6081  * This routine is invoked to set up the PCI device memory space for device
6082  * with SLI-3 interface spec.
6083  *
6084  * Return codes
6085  *      0 - successful
6086  *      other values - error
6087  **/
6088 static int
6089 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6090 {
6091         struct pci_dev *pdev;
6092         unsigned long bar0map_len, bar2map_len;
6093         int i, hbq_count;
6094         void *ptr;
6095         int error = -ENODEV;
6096 
6097         /* Obtain PCI device reference */
6098         if (!phba->pcidev)
6099                 return error;
6100         else
6101                 pdev = phba->pcidev;
6102 
6103         /* Set the device DMA mask size */
6104         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6105          || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6106                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6107                  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6108                         return error;
6109                 }
6110         }
6111 
6112         /* Get the bus address of Bar0 and Bar2 and the number of bytes
6113          * required by each mapping.
6114          */
6115         phba->pci_bar0_map = pci_resource_start(pdev, 0);
6116         bar0map_len = pci_resource_len(pdev, 0);
6117 
6118         phba->pci_bar2_map = pci_resource_start(pdev, 2);
6119         bar2map_len = pci_resource_len(pdev, 2);
6120 
6121         /* Map HBA SLIM to a kernel virtual address. */
6122         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6123         if (!phba->slim_memmap_p) {
6124                 dev_printk(KERN_ERR, &pdev->dev,
6125                            "ioremap failed for SLIM memory.\n");
6126                 goto out;
6127         }
6128 
6129         /* Map HBA Control Registers to a kernel virtual address. */
6130         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6131         if (!phba->ctrl_regs_memmap_p) {
6132                 dev_printk(KERN_ERR, &pdev->dev,
6133                            "ioremap failed for HBA control registers.\n");
6134                 goto out_iounmap_slim;
6135         }
6136 
6137         /* Allocate memory for SLI-2 structures */
6138         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
6139                                                SLI2_SLIM_SIZE,
6140                                                &phba->slim2p.phys,
6141                                                GFP_KERNEL);
6142         if (!phba->slim2p.virt)
6143                 goto out_iounmap;
6144 
6145         memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
6146         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6147         phba->mbox_ext = (phba->slim2p.virt +
6148                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6149         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6150         phba->IOCBs = (phba->slim2p.virt +
6151                        offsetof(struct lpfc_sli2_slim, IOCBs));
6152 
6153         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6154                                                  lpfc_sli_hbq_size(),
6155                                                  &phba->hbqslimp.phys,
6156                                                  GFP_KERNEL);
6157         if (!phba->hbqslimp.virt)
6158                 goto out_free_slim;
6159 
6160         hbq_count = lpfc_sli_hbq_count();
6161         ptr = phba->hbqslimp.virt;
6162         for (i = 0; i < hbq_count; ++i) {
6163                 phba->hbqs[i].hbq_virt = ptr;
6164                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6165                 ptr += (lpfc_hbq_defs[i]->entry_count *
6166                         sizeof(struct lpfc_hbq_entry));
6167         }
6168         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6169         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6170 
6171         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6172 
6173         INIT_LIST_HEAD(&phba->rb_pend_list);
6174 
6175         phba->MBslimaddr = phba->slim_memmap_p;
6176         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6177         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6178         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6179         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6180 
6181         return 0;
6182 
6183 out_free_slim:
6184         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6185                           phba->slim2p.virt, phba->slim2p.phys);
6186 out_iounmap:
6187         iounmap(phba->ctrl_regs_memmap_p);
6188 out_iounmap_slim:
6189         iounmap(phba->slim_memmap_p);
6190 out:
6191         return error;
6192 }
6193 
6194 /**
6195  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6196  * @phba: pointer to lpfc hba data structure.
6197  *
6198  * This routine is invoked to unset the PCI device memory space for device
6199  * with SLI-3 interface spec.
6200  **/
6201 static void
6202 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6203 {
6204         struct pci_dev *pdev;
6205 
6206         /* Obtain PCI device reference */
6207         if (!phba->pcidev)
6208                 return;
6209         else
6210                 pdev = phba->pcidev;
6211 
6212         /* Free coherent DMA memory allocated */
6213         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6214                           phba->hbqslimp.virt, phba->hbqslimp.phys);
6215         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6216                           phba->slim2p.virt, phba->slim2p.phys);
6217 
6218         /* I/O memory unmap */
6219         iounmap(phba->ctrl_regs_memmap_p);
6220         iounmap(phba->slim_memmap_p);
6221 
6222         return;
6223 }
6224 
6225 /**
6226  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6227  * @phba: pointer to lpfc hba data structure.
6228  *
6229  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6230  * done and check status.
6231  *
6232  * Return 0 if successful, otherwise -ENODEV.
6233  **/
6234 int
6235 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6236 {
6237         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6238         struct lpfc_register reg_data;
6239         int i, port_error = 0;
6240         uint32_t if_type;
6241 
6242         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6243         memset(&reg_data, 0, sizeof(reg_data));
6244         if (!phba->sli4_hba.PSMPHRregaddr)
6245                 return -ENODEV;
6246 
6247         /* Wait up to 30 seconds for the SLI Port POST done and ready */
6248         for (i = 0; i < 3000; i++) {
6249                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6250                         &portsmphr_reg.word0) ||
6251                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6252                         /* Port has a fatal POST error, break out */
6253                         port_error = -ENODEV;
6254                         break;
6255                 }
6256                 if (LPFC_POST_STAGE_PORT_READY ==
6257                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6258                         break;
6259                 msleep(10);
6260         }
6261 
6262         /*
6263          * If there was a port error during POST, then don't proceed with
6264          * other register reads as the data may not be valid.  Just exit.
6265          */
6266         if (port_error) {
6267                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6268                         "1408 Port Failed POST - portsmphr=0x%x, "
6269                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6270                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6271                         portsmphr_reg.word0,
6272                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6273                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6274                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6275                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6276                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6277                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6278                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6279                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6280         } else {
6281                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6282                                 "2534 Device Info: SLIFamily=0x%x, "
6283                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6284                                 "SLIHint_2=0x%x, FT=0x%x\n",
6285                                 bf_get(lpfc_sli_intf_sli_family,
6286                                        &phba->sli4_hba.sli_intf),
6287                                 bf_get(lpfc_sli_intf_slirev,
6288                                        &phba->sli4_hba.sli_intf),
6289                                 bf_get(lpfc_sli_intf_if_type,
6290                                        &phba->sli4_hba.sli_intf),
6291                                 bf_get(lpfc_sli_intf_sli_hint1,
6292                                        &phba->sli4_hba.sli_intf),
6293                                 bf_get(lpfc_sli_intf_sli_hint2,
6294                                        &phba->sli4_hba.sli_intf),
6295                                 bf_get(lpfc_sli_intf_func_type,
6296                                        &phba->sli4_hba.sli_intf));
6297                 /*
6298                  * Check for other Port errors during the initialization
6299                  * process.  Fail the load if the port did not come up
6300                  * correctly.
6301                  */
6302                 if_type = bf_get(lpfc_sli_intf_if_type,
6303                                  &phba->sli4_hba.sli_intf);
6304                 switch (if_type) {
6305                 case LPFC_SLI_INTF_IF_TYPE_0:
6306                         phba->sli4_hba.ue_mask_lo =
6307                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6308                         phba->sli4_hba.ue_mask_hi =
6309                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6310                         uerrlo_reg.word0 =
6311                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6312                         uerrhi_reg.word0 =
6313                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6314                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6315                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6316                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6317                                                 "1422 Unrecoverable Error "
6318                                                 "Detected during POST "
6319                                                 "uerr_lo_reg=0x%x, "
6320                                                 "uerr_hi_reg=0x%x, "
6321                                                 "ue_mask_lo_reg=0x%x, "
6322                                                 "ue_mask_hi_reg=0x%x\n",
6323                                                 uerrlo_reg.word0,
6324                                                 uerrhi_reg.word0,
6325                                                 phba->sli4_hba.ue_mask_lo,
6326                                                 phba->sli4_hba.ue_mask_hi);
6327                                 port_error = -ENODEV;
6328                         }
6329                         break;
6330                 case LPFC_SLI_INTF_IF_TYPE_2:
6331                         /* Final checks.  The port status should be clean. */
6332                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6333                                 &reg_data.word0) ||
6334                                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
6335                                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
6336                                 phba->work_status[0] =
6337                                         readl(phba->sli4_hba.u.if_type2.
6338                                               ERR1regaddr);
6339                                 phba->work_status[1] =
6340                                         readl(phba->sli4_hba.u.if_type2.
6341                                               ERR2regaddr);
6342                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6343                                         "2888 Unrecoverable port error "
6344                                         "following POST: port status reg "
6345                                         "0x%x, port_smphr reg 0x%x, "
6346                                         "error 1=0x%x, error 2=0x%x\n",
6347                                         reg_data.word0,
6348                                         portsmphr_reg.word0,
6349                                         phba->work_status[0],
6350                                         phba->work_status[1]);
6351                                 port_error = -ENODEV;
6352                         }
6353                         break;
6354                 case LPFC_SLI_INTF_IF_TYPE_1:
6355                 default:
6356                         break;
6357                 }
6358         }
6359         return port_error;
6360 }
6361 
6362 /**
6363  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6364  * @phba: pointer to lpfc hba data structure.
6365  * @if_type:  The SLI4 interface type getting configured.
6366  *
6367  * This routine is invoked to set up SLI4 BAR0 PCI config space register
6368  * memory map.
6369  **/
6370 static void
6371 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6372 {
6373         switch (if_type) {
6374         case LPFC_SLI_INTF_IF_TYPE_0:
6375                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6376                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6377                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6378                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6379                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6380                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6381                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6382                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6383                 phba->sli4_hba.SLIINTFregaddr =
6384                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6385                 break;
6386         case LPFC_SLI_INTF_IF_TYPE_2:
6387                 phba->sli4_hba.u.if_type2.ERR1regaddr =
6388                         phba->sli4_hba.conf_regs_memmap_p +
6389                                                 LPFC_CTL_PORT_ER1_OFFSET;
6390                 phba->sli4_hba.u.if_type2.ERR2regaddr =
6391                         phba->sli4_hba.conf_regs_memmap_p +
6392                                                 LPFC_CTL_PORT_ER2_OFFSET;
6393                 phba->sli4_hba.u.if_type2.CTRLregaddr =
6394                         phba->sli4_hba.conf_regs_memmap_p +
6395                                                 LPFC_CTL_PORT_CTL_OFFSET;
6396                 phba->sli4_hba.u.if_type2.STATUSregaddr =
6397                         phba->sli4_hba.conf_regs_memmap_p +
6398                                                 LPFC_CTL_PORT_STA_OFFSET;
6399                 phba->sli4_hba.SLIINTFregaddr =
6400                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6401                 phba->sli4_hba.PSMPHRregaddr =
6402                         phba->sli4_hba.conf_regs_memmap_p +
6403                                                 LPFC_CTL_PORT_SEM_OFFSET;
6404                 phba->sli4_hba.RQDBregaddr =
6405                         phba->sli4_hba.conf_regs_memmap_p +
6406                                                 LPFC_ULP0_RQ_DOORBELL;
6407                 phba->sli4_hba.WQDBregaddr =
6408                         phba->sli4_hba.conf_regs_memmap_p +
6409                                                 LPFC_ULP0_WQ_DOORBELL;
6410                 phba->sli4_hba.EQCQDBregaddr =
6411                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6412                 phba->sli4_hba.MQDBregaddr =
6413                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6414                 phba->sli4_hba.BMBXregaddr =
6415                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6416                 break;
6417         case LPFC_SLI_INTF_IF_TYPE_1:
6418         default:
6419                 dev_printk(KERN_ERR, &phba->pcidev->dev,
6420                            "FATAL - unsupported SLI4 interface type - %d\n",
6421                            if_type);
6422                 break;
6423         }
6424 }
6425 
6426 /**
6427  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6428  * @phba: pointer to lpfc hba data structure.
6429  *
6430  * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6431  * memory map.
6432  **/
6433 static void
6434 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6435 {
6436         phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6437                 LPFC_SLIPORT_IF0_SMPHR;
6438         phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6439                 LPFC_HST_ISR0;
6440         phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6441                 LPFC_HST_IMR0;
6442         phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6443                 LPFC_HST_ISCR0;
6444 }
6445 
6446 /**
6447  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6448  * @phba: pointer to lpfc hba data structure.
6449  * @vf: virtual function number
6450  *
6451  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6452  * based on the given viftual function number, @vf.
6453  *
6454  * Return 0 if successful, otherwise -ENODEV.
6455  **/
6456 static int
6457 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6458 {
6459         if (vf > LPFC_VIR_FUNC_MAX)
6460                 return -ENODEV;
6461 
6462         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6463                                 vf * LPFC_VFR_PAGE_SIZE +
6464                                         LPFC_ULP0_RQ_DOORBELL);
6465         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6466                                 vf * LPFC_VFR_PAGE_SIZE +
6467                                         LPFC_ULP0_WQ_DOORBELL);
6468         phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6469                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6470         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6471                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6472         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6473                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6474         return 0;
6475 }
6476 
6477 /**
6478  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6479  * @phba: pointer to lpfc hba data structure.
6480  *
6481  * This routine is invoked to create the bootstrap mailbox
6482  * region consistent with the SLI-4 interface spec.  This
6483  * routine allocates all memory necessary to communicate
6484  * mailbox commands to the port and sets up all alignment
6485  * needs.  No locks are expected to be held when calling
6486  * this routine.
6487  *
6488  * Return codes
6489  *      0 - successful
6490  *      -ENOMEM - could not allocated memory.
6491  **/
6492 static int
6493 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6494 {
6495         uint32_t bmbx_size;
6496         struct lpfc_dmabuf *dmabuf;
6497         struct dma_address *dma_address;
6498         uint32_t pa_addr;
6499         uint64_t phys_addr;
6500 
6501         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6502         if (!dmabuf)
6503                 return -ENOMEM;
6504 
6505         /*
6506          * The bootstrap mailbox region is comprised of 2 parts
6507          * plus an alignment restriction of 16 bytes.
6508          */
6509         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6510         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6511                                           bmbx_size,
6512                                           &dmabuf->phys,
6513                                           GFP_KERNEL);
6514         if (!dmabuf->virt) {
6515                 kfree(dmabuf);
6516                 return -ENOMEM;
6517         }
6518         memset(dmabuf->virt, 0, bmbx_size);
6519 
6520         /*
6521          * Initialize the bootstrap mailbox pointers now so that the register
6522          * operations are simple later.  The mailbox dma address is required
6523          * to be 16-byte aligned.  Also align the virtual memory as each
6524          * maibox is copied into the bmbx mailbox region before issuing the
6525          * command to the port.
6526          */
6527         phba->sli4_hba.bmbx.dmabuf = dmabuf;
6528         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6529 
6530         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6531                                               LPFC_ALIGN_16_BYTE);
6532         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6533                                               LPFC_ALIGN_16_BYTE);
6534 
6535         /*
6536          * Set the high and low physical addresses now.  The SLI4 alignment
6537          * requirement is 16 bytes and the mailbox is posted to the port
6538          * as two 30-bit addresses.  The other data is a bit marking whether
6539          * the 30-bit address is the high or low address.
6540          * Upcast bmbx aphys to 64bits so shift instruction compiles
6541          * clean on 32 bit machines.
6542          */
6543         dma_address = &phba->sli4_hba.bmbx.dma_address;
6544         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6545         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6546         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6547                                            LPFC_BMBX_BIT1_ADDR_HI);
6548 
6549         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6550         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6551                                            LPFC_BMBX_BIT1_ADDR_LO);
6552         return 0;
6553 }
6554 
6555 /**
6556  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6557  * @phba: pointer to lpfc hba data structure.
6558  *
6559  * This routine is invoked to teardown the bootstrap mailbox
6560  * region and release all host resources. This routine requires
6561  * the caller to ensure all mailbox commands recovered, no
6562  * additional mailbox comands are sent, and interrupts are disabled
6563  * before calling this routine.
6564  *
6565  **/
6566 static void
6567 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6568 {
6569         dma_free_coherent(&phba->pcidev->dev,
6570                           phba->sli4_hba.bmbx.bmbx_size,
6571                           phba->sli4_hba.bmbx.dmabuf->virt,
6572                           phba->sli4_hba.bmbx.dmabuf->phys);
6573 
6574         kfree(phba->sli4_hba.bmbx.dmabuf);
6575         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6576 }
6577 
6578 /**
6579  * lpfc_sli4_read_config - Get the config parameters.
6580  * @phba: pointer to lpfc hba data structure.
6581  *
6582  * This routine is invoked to read the configuration parameters from the HBA.
6583  * The configuration parameters are used to set the base and maximum values
6584  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6585  * allocation for the port.
6586  *
6587  * Return codes
6588  *      0 - successful
6589  *      -ENOMEM - No available memory
6590  *      -EIO - The mailbox failed to complete successfully.
6591  **/
6592 int
6593 lpfc_sli4_read_config(struct lpfc_hba *phba)
6594 {
6595         LPFC_MBOXQ_t *pmb;
6596         struct lpfc_mbx_read_config *rd_config;
6597         union  lpfc_sli4_cfg_shdr *shdr;
6598         uint32_t shdr_status, shdr_add_status;
6599         struct lpfc_mbx_get_func_cfg *get_func_cfg;
6600         struct lpfc_rsrc_desc_fcfcoe *desc;
6601         char *pdesc_0;
6602         uint32_t desc_count;
6603         int length, i, rc = 0, rc2;
6604 
6605         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6606         if (!pmb) {
6607                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6608                                 "2011 Unable to allocate memory for issuing "
6609                                 "SLI_CONFIG_SPECIAL mailbox command\n");
6610                 return -ENOMEM;
6611         }
6612 
6613         lpfc_read_config(phba, pmb);
6614 
6615         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6616         if (rc != MBX_SUCCESS) {
6617                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6618                         "2012 Mailbox failed , mbxCmd x%x "
6619                         "READ_CONFIG, mbxStatus x%x\n",
6620                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
6621                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
6622                 rc = -EIO;
6623         } else {
6624                 rd_config = &pmb->u.mqe.un.rd_config;
6625                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6626                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6627                         phba->sli4_hba.lnk_info.lnk_tp =
6628                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6629                         phba->sli4_hba.lnk_info.lnk_no =
6630                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6631                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6632                                         "3081 lnk_type:%d, lnk_numb:%d\n",
6633                                         phba->sli4_hba.lnk_info.lnk_tp,
6634                                         phba->sli4_hba.lnk_info.lnk_no);
6635                 } else
6636                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6637                                         "3082 Mailbox (x%x) returned ldv:x0\n",
6638                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
6639                 phba->sli4_hba.extents_in_use =
6640                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6641                 phba->sli4_hba.max_cfg_param.max_xri =
6642                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6643                 phba->sli4_hba.max_cfg_param.xri_base =
6644                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6645                 phba->sli4_hba.max_cfg_param.max_vpi =
6646                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6647                 phba->sli4_hba.max_cfg_param.vpi_base =
6648                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6649                 phba->sli4_hba.max_cfg_param.max_rpi =
6650                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6651                 phba->sli4_hba.max_cfg_param.rpi_base =
6652                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6653                 phba->sli4_hba.max_cfg_param.max_vfi =
6654                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6655                 phba->sli4_hba.max_cfg_param.vfi_base =
6656                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6657                 phba->sli4_hba.max_cfg_param.max_fcfi =
6658                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6659                 phba->sli4_hba.max_cfg_param.max_eq =
6660                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6661                 phba->sli4_hba.max_cfg_param.max_rq =
6662                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6663                 phba->sli4_hba.max_cfg_param.max_wq =
6664                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6665                 phba->sli4_hba.max_cfg_param.max_cq =
6666                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6667                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6668                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6669                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6670                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6671                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6672                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6673                 phba->max_vports = phba->max_vpi;
6674                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6675                                 "2003 cfg params Extents? %d "
6676                                 "XRI(B:%d M:%d), "
6677                                 "VPI(B:%d M:%d) "
6678                                 "VFI(B:%d M:%d) "
6679                                 "RPI(B:%d M:%d) "
6680                                 "FCFI(Count:%d)\n",
6681                                 phba->sli4_hba.extents_in_use,
6682                                 phba->sli4_hba.max_cfg_param.xri_base,
6683                                 phba->sli4_hba.max_cfg_param.max_xri,
6684                                 phba->sli4_hba.max_cfg_param.vpi_base,
6685                                 phba->sli4_hba.max_cfg_param.max_vpi,
6686                                 phba->sli4_hba.max_cfg_param.vfi_base,
6687                                 phba->sli4_hba.max_cfg_param.max_vfi,
6688                                 phba->sli4_hba.max_cfg_param.rpi_base,
6689                                 phba->sli4_hba.max_cfg_param.max_rpi,
6690                                 phba->sli4_hba.max_cfg_param.max_fcfi);
6691         }
6692 
6693         if (rc)
6694                 goto read_cfg_out;
6695 
6696         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6697         length = phba->sli4_hba.max_cfg_param.max_xri -
6698                         lpfc_sli4_get_els_iocb_cnt(phba);
6699         if (phba->cfg_hba_queue_depth > length) {
6700                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6701                                 "3361 HBA queue depth changed from %d to %d\n",
6702                                 phba->cfg_hba_queue_depth, length);
6703                 phba->cfg_hba_queue_depth = length;
6704         }
6705 
6706         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6707             LPFC_SLI_INTF_IF_TYPE_2)
6708                 goto read_cfg_out;
6709 
6710         /* get the pf# and vf# for SLI4 if_type 2 port */
6711         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6712                   sizeof(struct lpfc_sli4_cfg_mhdr));
6713         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6714                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6715                          length, LPFC_SLI4_MBX_EMBED);
6716 
6717         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6718         shdr = (union lpfc_sli4_cfg_shdr *)
6719                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6720         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6721         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6722         if (rc2 || shdr_status || shdr_add_status) {
6723                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6724                                 "3026 Mailbox failed , mbxCmd x%x "
6725                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6726                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6727                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6728                 goto read_cfg_out;
6729         }
6730 
6731         /* search for fc_fcoe resrouce descriptor */
6732         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6733         desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6734 
6735         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6736         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6737         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6738         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6739                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6740         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6741                 goto read_cfg_out;
6742 
6743         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6744                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6745                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6746                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6747                         phba->sli4_hba.iov.pf_number =
6748                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6749                         phba->sli4_hba.iov.vf_number =
6750                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6751                         break;
6752                 }
6753         }
6754 
6755         if (i < LPFC_RSRC_DESC_MAX_NUM)
6756                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6757                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6758                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6759                                 phba->sli4_hba.iov.vf_number);
6760         else
6761                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6762                                 "3028 GET_FUNCTION_CONFIG: failed to find "
6763                                 "Resrouce Descriptor:x%x\n",
6764                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
6765 
6766 read_cfg_out:
6767         mempool_free(pmb, phba->mbox_mem_pool);
6768         return rc;
6769 }
6770 
6771 /**
6772  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6773  * @phba: pointer to lpfc hba data structure.
6774  *
6775  * This routine is invoked to setup the port-side endian order when
6776  * the port if_type is 0.  This routine has no function for other
6777  * if_types.
6778  *
6779  * Return codes
6780  *      0 - successful
6781  *      -ENOMEM - No available memory
6782  *      -EIO - The mailbox failed to complete successfully.
6783  **/
6784 static int
6785 lpfc_setup_endian_order(struct lpfc_hba *phba)
6786 {
6787         LPFC_MBOXQ_t *mboxq;
6788         uint32_t if_type, rc = 0;
6789         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6790                                       HOST_ENDIAN_HIGH_WORD1};
6791 
6792         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6793         switch (if_type) {
6794         case LPFC_SLI_INTF_IF_TYPE_0:
6795                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6796                                                        GFP_KERNEL);
6797                 if (!mboxq) {
6798                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6799                                         "0492 Unable to allocate memory for "
6800                                         "issuing SLI_CONFIG_SPECIAL mailbox "
6801                                         "command\n");
6802                         return -ENOMEM;
6803                 }
6804 
6805                 /*
6806                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6807                  * two words to contain special data values and no other data.
6808                  */
6809                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6810                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6811                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6812                 if (rc != MBX_SUCCESS) {
6813                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6814                                         "0493 SLI_CONFIG_SPECIAL mailbox "
6815                                         "failed with status x%x\n",
6816                                         rc);
6817                         rc = -EIO;
6818                 }
6819                 mempool_free(mboxq, phba->mbox_mem_pool);
6820                 break;
6821         case LPFC_SLI_INTF_IF_TYPE_2:
6822         case LPFC_SLI_INTF_IF_TYPE_1:
6823         default:
6824                 break;
6825         }
6826         return rc;
6827 }
6828 
6829 /**
6830  * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6831  * @phba: pointer to lpfc hba data structure.
6832  *
6833  * This routine is invoked to check the user settable queue counts for EQs and
6834  * CQs. after this routine is called the counts will be set to valid values that
6835  * adhere to the constraints of the system's interrupt vectors and the port's
6836  * queue resources.
6837  *
6838  * Return codes
6839  *      0 - successful
6840  *      -ENOMEM - No available memory
6841  **/
6842 static int
6843 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6844 {
6845         int cfg_fcp_io_channel;
6846         uint32_t cpu;
6847         uint32_t i = 0;
6848         int fof_vectors = phba->cfg_fof ? 1 : 0;
6849 
6850         /*
6851          * Sanity check for configured queue parameters against the run-time
6852          * device parameters
6853          */
6854 
6855         /* Sanity check on HBA EQ parameters */
6856         cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6857 
6858         /* It doesn't make sense to have more io channels then online CPUs */
6859         for_each_present_cpu(cpu) {
6860                 if (cpu_online(cpu))
6861                         i++;
6862         }
6863         phba->sli4_hba.num_online_cpu = i;
6864         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6865         phba->sli4_hba.curr_disp_cpu = 0;
6866 
6867         if (i < cfg_fcp_io_channel) {
6868                 lpfc_printf_log(phba,
6869                                 KERN_ERR, LOG_INIT,
6870                                 "3188 Reducing IO channels to match number of "
6871                                 "online CPUs: from %d to %d\n",
6872                                 cfg_fcp_io_channel, i);
6873                 cfg_fcp_io_channel = i;
6874         }
6875 
6876         if (cfg_fcp_io_channel + fof_vectors >
6877             phba->sli4_hba.max_cfg_param.max_eq) {
6878                 if (phba->sli4_hba.max_cfg_param.max_eq <
6879                     LPFC_FCP_IO_CHAN_MIN) {
6880                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6881                                         "2574 Not enough EQs (%d) from the "
6882                                         "pci function for supporting FCP "
6883                                         "EQs (%d)\n",
6884                                         phba->sli4_hba.max_cfg_param.max_eq,
6885                                         phba->cfg_fcp_io_channel);
6886                         goto out_error;
6887                 }
6888                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6889                                 "2575 Reducing IO channels to match number of "
6890                                 "available EQs: from %d to %d\n",
6891                                 cfg_fcp_io_channel,
6892                                 phba->sli4_hba.max_cfg_param.max_eq);
6893                 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
6894                         fof_vectors;
6895         }
6896 
6897         /* The actual number of FCP event queues adopted */
6898         phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6899 
6900         /* Get EQ depth from module parameter, fake the default for now */
6901         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6902         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6903 
6904         /* Get CQ depth from module parameter, fake the default for now */
6905         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6906         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6907 
6908         return 0;
6909 out_error:
6910         return -ENOMEM;
6911 }
6912 
6913 /**
6914  * lpfc_sli4_queue_create - Create all the SLI4 queues
6915  * @phba: pointer to lpfc hba data structure.
6916  *
6917  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6918  * operation. For each SLI4 queue type, the parameters such as queue entry
6919  * count (queue depth) shall be taken from the module parameter. For now,
6920  * we just use some constant number as place holder.
6921  *
6922  * Return codes
6923  *      0 - successful
6924  *      -ENOMEM - No availble memory
6925  *      -EIO - The mailbox failed to complete successfully.
6926  **/
6927 int
6928 lpfc_sli4_queue_create(struct lpfc_hba *phba)
6929 {
6930         struct lpfc_queue *qdesc;
6931         int idx;
6932 
6933         /*
6934          * Create HBA Record arrays.
6935          */
6936         if (!phba->cfg_fcp_io_channel)
6937                 return -ERANGE;
6938 
6939         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6940         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6941         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6942         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6943         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6944         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6945 
6946         phba->sli4_hba.hba_eq =  kzalloc((sizeof(struct lpfc_queue *) *
6947                                 phba->cfg_fcp_io_channel), GFP_KERNEL);
6948         if (!phba->sli4_hba.hba_eq) {
6949                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6950                         "2576 Failed allocate memory for "
6951                         "fast-path EQ record array\n");
6952                 goto out_error;
6953         }
6954 
6955         phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6956                                 phba->cfg_fcp_io_channel), GFP_KERNEL);
6957         if (!phba->sli4_hba.fcp_cq) {
6958                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6959                                 "2577 Failed allocate memory for fast-path "
6960                                 "CQ record array\n");
6961                 goto out_error;
6962         }
6963 
6964         phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6965                                 phba->cfg_fcp_io_channel), GFP_KERNEL);
6966         if (!phba->sli4_hba.fcp_wq) {
6967                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6968                                 "2578 Failed allocate memory for fast-path "
6969                                 "WQ record array\n");
6970                 goto out_error;
6971         }
6972 
6973         /*
6974          * Since the first EQ can have multiple CQs associated with it,
6975          * this array is used to quickly see if we have a FCP fast-path
6976          * CQ match.
6977          */
6978         phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6979                                          phba->cfg_fcp_io_channel), GFP_KERNEL);
6980         if (!phba->sli4_hba.fcp_cq_map) {
6981                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6982                                 "2545 Failed allocate memory for fast-path "
6983                                 "CQ map\n");
6984                 goto out_error;
6985         }
6986 
6987         /*
6988          * Create HBA Event Queues (EQs).  The cfg_fcp_io_channel specifies
6989          * how many EQs to create.
6990          */
6991         for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6992 
6993                 /* Create EQs */
6994                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6995                                               phba->sli4_hba.eq_ecount);
6996                 if (!qdesc) {
6997                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6998                                         "0497 Failed allocate EQ (%d)\n", idx);
6999                         goto out_error;
7000                 }
7001                 phba->sli4_hba.hba_eq[idx] = qdesc;
7002 
7003                 /* Create Fast Path FCP CQs */
7004                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7005                                               phba->sli4_hba.cq_ecount);
7006                 if (!qdesc) {
7007                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7008                                         "0499 Failed allocate fast-path FCP "
7009                                         "CQ (%d)\n", idx);
7010                         goto out_error;
7011                 }
7012                 phba->sli4_hba.fcp_cq[idx] = qdesc;
7013 
7014                 /* Create Fast Path FCP WQs */
7015                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7016                                               phba->sli4_hba.wq_ecount);
7017                 if (!qdesc) {
7018                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7019                                         "0503 Failed allocate fast-path FCP "
7020                                         "WQ (%d)\n", idx);
7021                         goto out_error;
7022                 }
7023                 phba->sli4_hba.fcp_wq[idx] = qdesc;
7024         }
7025 
7026 
7027         /*
7028          * Create Slow Path Completion Queues (CQs)
7029          */
7030 
7031         /* Create slow-path Mailbox Command Complete Queue */
7032         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7033                                       phba->sli4_hba.cq_ecount);
7034         if (!qdesc) {
7035                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7036                                 "0500 Failed allocate slow-path mailbox CQ\n");
7037                 goto out_error;
7038         }
7039         phba->sli4_hba.mbx_cq = qdesc;
7040 
7041         /* Create slow-path ELS Complete Queue */
7042         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7043                                       phba->sli4_hba.cq_ecount);
7044         if (!qdesc) {
7045                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7046                                 "0501 Failed allocate slow-path ELS CQ\n");
7047                 goto out_error;
7048         }
7049         phba->sli4_hba.els_cq = qdesc;
7050 
7051 
7052         /*
7053          * Create Slow Path Work Queues (WQs)
7054          */
7055 
7056         /* Create Mailbox Command Queue */
7057 
7058         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7059                                       phba->sli4_hba.mq_ecount);
7060         if (!qdesc) {
7061                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7062                                 "0505 Failed allocate slow-path MQ\n");
7063                 goto out_error;
7064         }
7065         phba->sli4_hba.mbx_wq = qdesc;
7066 
7067         /*
7068          * Create ELS Work Queues
7069          */
7070 
7071         /* Create slow-path ELS Work Queue */
7072         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7073                                       phba->sli4_hba.wq_ecount);
7074         if (!qdesc) {
7075                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7076                                 "0504 Failed allocate slow-path ELS WQ\n");
7077                 goto out_error;
7078         }
7079         phba->sli4_hba.els_wq = qdesc;
7080 
7081         /*
7082          * Create Receive Queue (RQ)
7083          */
7084 
7085         /* Create Receive Queue for header */
7086         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7087                                       phba->sli4_hba.rq_ecount);
7088         if (!qdesc) {
7089                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7090                                 "0506 Failed allocate receive HRQ\n");
7091                 goto out_error;
7092         }
7093         phba->sli4_hba.hdr_rq = qdesc;
7094 
7095         /* Create Receive Queue for data */
7096         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7097                                       phba->sli4_hba.rq_ecount);
7098         if (!qdesc) {
7099                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7100                                 "0507 Failed allocate receive DRQ\n");
7101                 goto out_error;
7102         }
7103         phba->sli4_hba.dat_rq = qdesc;
7104 
7105         /* Create the Queues needed for Flash Optimized Fabric operations */
7106         if (phba->cfg_fof)
7107                 lpfc_fof_queue_create(phba);
7108         return 0;
7109 
7110 out_error:
7111         lpfc_sli4_queue_destroy(phba);
7112         return -ENOMEM;
7113 }
7114 
7115 /**
7116  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7117  * @phba: pointer to lpfc hba data structure.
7118  *
7119  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7120  * operation.
7121  *
7122  * Return codes
7123  *      0 - successful
7124  *      -ENOMEM - No available memory
7125  *      -EIO - The mailbox failed to complete successfully.
7126  **/
7127 void
7128 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7129 {
7130         int idx;
7131 
7132         if (phba->cfg_fof)
7133                 lpfc_fof_queue_destroy(phba);
7134 
7135         if (phba->sli4_hba.hba_eq != NULL) {
7136                 /* Release HBA event queue */
7137                 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7138                         if (phba->sli4_hba.hba_eq[idx] != NULL) {
7139                                 lpfc_sli4_queue_free(
7140                                         phba->sli4_hba.hba_eq[idx]);
7141                                 phba->sli4_hba.hba_eq[idx] = NULL;
7142                         }
7143                 }
7144                 kfree(phba->sli4_hba.hba_eq);
7145                 phba->sli4_hba.hba_eq = NULL;
7146         }
7147 
7148         if (phba->sli4_hba.fcp_cq != NULL) {
7149                 /* Release FCP completion queue */
7150                 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7151                         if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7152                                 lpfc_sli4_queue_free(
7153                                         phba->sli4_hba.fcp_cq[idx]);
7154                                 phba->sli4_hba.fcp_cq[idx] = NULL;
7155                         }
7156                 }
7157                 kfree(phba->sli4_hba.fcp_cq);
7158                 phba->sli4_hba.fcp_cq = NULL;
7159         }
7160 
7161         if (phba->sli4_hba.fcp_wq != NULL) {
7162                 /* Release FCP work queue */
7163                 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7164                         if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7165                                 lpfc_sli4_queue_free(
7166                                         phba->sli4_hba.fcp_wq[idx]);
7167                                 phba->sli4_hba.fcp_wq[idx] = NULL;
7168                         }
7169                 }
7170                 kfree(phba->sli4_hba.fcp_wq);
7171                 phba->sli4_hba.fcp_wq = NULL;
7172         }
7173 
7174         /* Release FCP CQ mapping array */
7175         if (phba->sli4_hba.fcp_cq_map != NULL) {
7176                 kfree(phba->sli4_hba.fcp_cq_map);
7177                 phba->sli4_hba.fcp_cq_map = NULL;
7178         }
7179 
7180         /* Release mailbox command work queue */
7181         if (phba->sli4_hba.mbx_wq != NULL) {
7182                 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7183                 phba->sli4_hba.mbx_wq = NULL;
7184         }
7185 
7186         /* Release ELS work queue */
7187         if (phba->sli4_hba.els_wq != NULL) {
7188                 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7189                 phba->sli4_hba.els_wq = NULL;
7190         }
7191 
7192         /* Release unsolicited receive queue */
7193         if (phba->sli4_hba.hdr_rq != NULL) {
7194                 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7195                 phba->sli4_hba.hdr_rq = NULL;
7196         }
7197         if (phba->sli4_hba.dat_rq != NULL) {
7198                 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7199                 phba->sli4_hba.dat_rq = NULL;
7200         }
7201 
7202         /* Release ELS complete queue */
7203         if (phba->sli4_hba.els_cq != NULL) {
7204                 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7205                 phba->sli4_hba.els_cq = NULL;
7206         }
7207 
7208         /* Release mailbox command complete queue */
7209         if (phba->sli4_hba.mbx_cq != NULL) {
7210                 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7211                 phba->sli4_hba.mbx_cq = NULL;
7212         }
7213 
7214         return;
7215 }
7216 
7217 /**
7218  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7219  * @phba: pointer to lpfc hba data structure.
7220  *
7221  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7222  * operation.
7223  *
7224  * Return codes
7225  *      0 - successful
7226  *      -ENOMEM - No available memory
7227  *      -EIO - The mailbox failed to complete successfully.
7228  **/
7229 int
7230 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7231 {
7232         struct lpfc_sli *psli = &phba->sli;
7233         struct lpfc_sli_ring *pring;
7234         int rc = -ENOMEM;
7235         int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7236         int fcp_cq_index = 0;
7237         uint32_t shdr_status, shdr_add_status;
7238         union lpfc_sli4_cfg_shdr *shdr;
7239         LPFC_MBOXQ_t *mboxq;
7240         uint32_t length;
7241 
7242         /* Check for dual-ULP support */
7243         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7244         if (!mboxq) {
7245                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7246                                 "3249 Unable to allocate memory for "
7247                                 "QUERY_FW_CFG mailbox command\n");
7248                 return -ENOMEM;
7249         }
7250         length = (sizeof(struct lpfc_mbx_query_fw_config) -
7251                   sizeof(struct lpfc_sli4_cfg_mhdr));
7252         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7253                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7254                          length, LPFC_SLI4_MBX_EMBED);
7255 
7256         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7257 
7258         shdr = (union lpfc_sli4_cfg_shdr *)
7259                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7260         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7261         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7262         if (shdr_status || shdr_add_status || rc) {
7263                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264                                 "3250 QUERY_FW_CFG mailbox failed with status "
7265                                 "x%x add_status x%x, mbx status x%x\n",
7266                                 shdr_status, shdr_add_status, rc);
7267                 if (rc != MBX_TIMEOUT)
7268                         mempool_free(mboxq, phba->mbox_mem_pool);
7269                 rc = -ENXIO;
7270                 goto out_error;
7271         }
7272 
7273         phba->sli4_hba.fw_func_mode =
7274                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7275         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7276         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7277         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7278                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7279                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7280                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7281 
7282         if (rc != MBX_TIMEOUT)
7283                 mempool_free(mboxq, phba->mbox_mem_pool);
7284 
7285         /*
7286          * Set up HBA Event Queues (EQs)
7287          */
7288 
7289         /* Set up HBA event queue */
7290         if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7291                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7292                                 "3147 Fast-path EQs not allocated\n");
7293                 rc = -ENOMEM;
7294                 goto out_error;
7295         }
7296         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7297                 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7298                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7299                                         "0522 Fast-path EQ (%d) not "
7300                                         "allocated\n", fcp_eqidx);
7301                         rc = -ENOMEM;
7302                         goto out_destroy_hba_eq;
7303                 }
7304                 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7305                          (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7306                 if (rc) {
7307                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7308                                         "0523 Failed setup of fast-path EQ "
7309                                         "(%d), rc = 0x%x\n", fcp_eqidx, rc);
7310                         goto out_destroy_hba_eq;
7311                 }
7312                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7313                                 "2584 HBA EQ setup: "
7314                                 "queue[%d]-id=%d\n", fcp_eqidx,
7315                                 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7316         }
7317 
7318         /* Set up fast-path FCP Response Complete Queue */
7319         if (!phba->sli4_hba.fcp_cq) {
7320                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7321                                 "3148 Fast-path FCP CQ array not "
7322                                 "allocated\n");
7323                 rc = -ENOMEM;
7324                 goto out_destroy_hba_eq;
7325         }
7326 
7327         for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7328                 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7329                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7330                                         "0526 Fast-path FCP CQ (%d) not "
7331                                         "allocated\n", fcp_cqidx);
7332                         rc = -ENOMEM;
7333                         goto out_destroy_fcp_cq;
7334                 }
7335                 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7336                         phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7337                 if (rc) {
7338                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7339                                         "0527 Failed setup of fast-path FCP "
7340                                         "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7341                         goto out_destroy_fcp_cq;
7342                 }
7343 
7344                 /* Setup fcp_cq_map for fast lookup */
7345                 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7346                                 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7347 
7348                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7349                                 "2588 FCP CQ setup: cq[%d]-id=%d, "
7350                                 "parent seq[%d]-id=%d\n",
7351                                 fcp_cqidx,
7352                                 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7353                                 fcp_cqidx,
7354                                 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7355         }
7356 
7357         /* Set up fast-path FCP Work Queue */
7358         if (!phba->sli4_hba.fcp_wq) {
7359                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7360                                 "3149 Fast-path FCP WQ array not "
7361                                 "allocated\n");
7362                 rc = -ENOMEM;
7363                 goto out_destroy_fcp_cq;
7364         }
7365 
7366         for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7367                 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7368                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7369                                         "0534 Fast-path FCP WQ (%d) not "
7370                                         "allocated\n", fcp_wqidx);
7371                         rc = -ENOMEM;
7372                         goto out_destroy_fcp_wq;
7373                 }
7374                 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7375                                     phba->sli4_hba.fcp_cq[fcp_wqidx],
7376                                     LPFC_FCP);
7377                 if (rc) {
7378                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7379                                         "0535 Failed setup of fast-path FCP "
7380                                         "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7381                         goto out_destroy_fcp_wq;
7382                 }
7383 
7384                 /* Bind this WQ to the next FCP ring */
7385                 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7386                 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7387                 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7388 
7389                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7390                                 "2591 FCP WQ setup: wq[%d]-id=%d, "
7391                                 "parent cq[%d]-id=%d\n",
7392                                 fcp_wqidx,
7393                                 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7394                                 fcp_cq_index,
7395                                 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7396         }
7397         /*
7398          * Set up Complete Queues (CQs)
7399          */
7400 
7401         /* Set up slow-path MBOX Complete Queue as the first CQ */
7402         if (!phba->sli4_hba.mbx_cq) {
7403                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7404                                 "0528 Mailbox CQ not allocated\n");
7405                 rc = -ENOMEM;
7406                 goto out_destroy_fcp_wq;
7407         }
7408         rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7409                         phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7410         if (rc) {
7411                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7412                                 "0529 Failed setup of slow-path mailbox CQ: "
7413                                 "rc = 0x%x\n", rc);
7414                 goto out_destroy_fcp_wq;
7415         }
7416         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7417                         "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7418                         phba->sli4_hba.mbx_cq->queue_id,
7419                         phba->sli4_hba.hba_eq[0]->queue_id);
7420 
7421         /* Set up slow-path ELS Complete Queue */
7422         if (!phba->sli4_hba.els_cq) {
7423                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7424                                 "0530 ELS CQ not allocated\n");
7425                 rc = -ENOMEM;
7426                 goto out_destroy_mbx_cq;
7427         }
7428         rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7429                         phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7430         if (rc) {
7431                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7432                                 "0531 Failed setup of slow-path ELS CQ: "
7433                                 "rc = 0x%x\n", rc);
7434                 goto out_destroy_mbx_cq;
7435         }
7436         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7437                         "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7438                         phba->sli4_hba.els_cq->queue_id,
7439                         phba->sli4_hba.hba_eq[0]->queue_id);
7440 
7441         /*
7442          * Set up all the Work Queues (WQs)
7443          */
7444 
7445         /* Set up Mailbox Command Queue */
7446         if (!phba->sli4_hba.mbx_wq) {
7447                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,