| /******************************************************************* |
| * This file is part of the Emulex Linux Device Driver for * |
| * Fibre Channel Host Bus Adapters. * |
| * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * |
| * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * |
| * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
| * EMULEX and SLI are trademarks of Emulex. * |
| * www.broadcom.com * |
| * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
| * * |
| * This program is free software; you can redistribute it and/or * |
| * modify it under the terms of version 2 of the GNU General * |
| * Public License as published by the Free Software Foundation. * |
| * This program is distributed in the hope that it will be useful. * |
| * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
| * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
| * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
| * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
| * TO BE LEGALLY INVALID. See the GNU General Public License for * |
| * more details, a copy of which can be found in the file COPYING * |
| * included with this package. * |
| *******************************************************************/ |
| /* See Fibre Channel protocol T11 FC-LS for details */ |
| #include <linux/blkdev.h> |
| #include <linux/pci.h> |
| #include <linux/slab.h> |
| #include <linux/interrupt.h> |
| |
| #include <scsi/scsi.h> |
| #include <scsi/scsi_device.h> |
| #include <scsi/scsi_host.h> |
| #include <scsi/scsi_transport_fc.h> |
| |
| #include "lpfc_hw4.h" |
| #include "lpfc_hw.h" |
| #include "lpfc_sli.h" |
| #include "lpfc_sli4.h" |
| #include "lpfc_nl.h" |
| #include "lpfc_disc.h" |
| #include "lpfc_scsi.h" |
| #include "lpfc.h" |
| #include "lpfc_logmsg.h" |
| #include "lpfc_crtn.h" |
| #include "lpfc_vport.h" |
| #include "lpfc_debugfs.h" |
| |
| static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, |
| struct lpfc_iocbq *); |
| static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, |
| struct lpfc_iocbq *); |
| static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); |
| static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, |
| struct lpfc_nodelist *ndlp, uint8_t retry); |
| static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, |
| struct lpfc_iocbq *iocb); |
| |
| static int lpfc_max_els_tries = 3; |
| |
| /** |
| * lpfc_els_chk_latt - Check host link attention event for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine checks whether there is an outstanding host link |
| * attention event during the discovery process with the @vport. It is done |
| * by reading the HBA's Host Attention (HA) register. If there is any host |
| * link attention events during this @vport's discovery process, the @vport |
| * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall |
| * be issued if the link state is not already in host link cleared state, |
| * and a return code shall indicate whether the host link attention event |
| * had happened. |
| * |
| * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport |
| * state in LPFC_VPORT_READY, the request for checking host link attention |
| * event will be ignored and a return code shall indicate no host link |
| * attention event had happened. |
| * |
| * Return codes |
| * 0 - no host link attention event happened |
| * 1 - host link attention event happened |
| **/ |
| int |
| lpfc_els_chk_latt(struct lpfc_vport *vport) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| uint32_t ha_copy; |
| |
| if (vport->port_state >= LPFC_VPORT_READY || |
| phba->link_state == LPFC_LINK_DOWN || |
| phba->sli_rev > LPFC_SLI_REV3) |
| return 0; |
| |
| /* Read the HBA Host Attention Register */ |
| if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
| return 1; |
| |
| if (!(ha_copy & HA_LATT)) |
| return 0; |
| |
| /* Pending Link Event during Discovery */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
| "0237 Pending Link Event during " |
| "Discovery: State x%x\n", |
| phba->pport->port_state); |
| |
| /* CLEAR_LA should re-enable link attention events and |
| * we should then immediately take a LATT event. The |
| * LATT processing should call lpfc_linkdown() which |
| * will cleanup any left over in-progress discovery |
| * events. |
| */ |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_ABORT_DISCOVERY; |
| spin_unlock_irq(shost->host_lock); |
| |
| if (phba->link_state != LPFC_CLEAR_LA) |
| lpfc_issue_clear_la(phba, vport); |
| |
| return 1; |
| } |
| |
| /** |
| * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @expectRsp: flag indicating whether response is expected. |
| * @cmdSize: size of the ELS command. |
| * @retry: number of retries to the command IOCB when it fails. |
| * @ndlp: pointer to a node-list data structure. |
| * @did: destination identifier. |
| * @elscmd: the ELS command code. |
| * |
| * This routine is used for allocating a lpfc-IOCB data structure from |
| * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters |
| * passed into the routine for discovery state machine to issue an Extended |
| * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation |
| * and preparation routine that is used by all the discovery state machine |
| * routines and the ELS command-specific fields will be later set up by |
| * the individual discovery machine routines after calling this routine |
| * allocating and preparing a generic IOCB data structure. It fills in the |
| * Buffer Descriptor Entries (BDEs), allocates buffers for both command |
| * payload and response payload (if expected). The reference count on the |
| * ndlp is incremented by 1 and the reference to the ndlp is put into |
| * context1 of the IOCB data structure for this IOCB to hold the ndlp |
| * reference for the command's callback function to access later. |
| * |
| * Return code |
| * Pointer to the newly allocated/prepared els iocb data structure |
| * NULL - when els iocb data structure allocation/preparation failed |
| **/ |
| struct lpfc_iocbq * |
| lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, |
| uint16_t cmdSize, uint8_t retry, |
| struct lpfc_nodelist *ndlp, uint32_t did, |
| uint32_t elscmd) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct lpfc_iocbq *elsiocb; |
| struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; |
| struct ulp_bde64 *bpl; |
| IOCB_t *icmd; |
| |
| |
| if (!lpfc_is_link_up(phba)) |
| return NULL; |
| |
| /* Allocate buffer for command iocb */ |
| elsiocb = lpfc_sli_get_iocbq(phba); |
| |
| if (elsiocb == NULL) |
| return NULL; |
| |
| /* |
| * If this command is for fabric controller and HBA running |
| * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. |
| */ |
| if ((did == Fabric_DID) && |
| (phba->hba_flag & HBA_FIP_SUPPORT) && |
| ((elscmd == ELS_CMD_FLOGI) || |
| (elscmd == ELS_CMD_FDISC) || |
| (elscmd == ELS_CMD_LOGO))) |
| switch (elscmd) { |
| case ELS_CMD_FLOGI: |
| elsiocb->iocb_flag |= |
| ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) |
| & LPFC_FIP_ELS_ID_MASK); |
| break; |
| case ELS_CMD_FDISC: |
| elsiocb->iocb_flag |= |
| ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) |
| & LPFC_FIP_ELS_ID_MASK); |
| break; |
| case ELS_CMD_LOGO: |
| elsiocb->iocb_flag |= |
| ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) |
| & LPFC_FIP_ELS_ID_MASK); |
| break; |
| } |
| else |
| elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; |
| |
| icmd = &elsiocb->iocb; |
| |
| /* fill in BDEs for command */ |
| /* Allocate buffer for command payload */ |
| pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| if (pcmd) |
| pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); |
| if (!pcmd || !pcmd->virt) |
| goto els_iocb_free_pcmb_exit; |
| |
| INIT_LIST_HEAD(&pcmd->list); |
| |
| /* Allocate buffer for response payload */ |
| if (expectRsp) { |
| prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| if (prsp) |
| prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, |
| &prsp->phys); |
| if (!prsp || !prsp->virt) |
| goto els_iocb_free_prsp_exit; |
| INIT_LIST_HEAD(&prsp->list); |
| } else |
| prsp = NULL; |
| |
| /* Allocate buffer for Buffer ptr list */ |
| pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| if (pbuflist) |
| pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, |
| &pbuflist->phys); |
| if (!pbuflist || !pbuflist->virt) |
| goto els_iocb_free_pbuf_exit; |
| |
| INIT_LIST_HEAD(&pbuflist->list); |
| |
| if (expectRsp) { |
| icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); |
| icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); |
| icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
| icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); |
| |
| icmd->un.elsreq64.remoteID = did; /* DID */ |
| icmd->ulpCommand = CMD_ELS_REQUEST64_CR; |
| if (elscmd == ELS_CMD_FLOGI) |
| icmd->ulpTimeout = FF_DEF_RATOV * 2; |
| else |
| icmd->ulpTimeout = phba->fc_ratov * 2; |
| } else { |
| icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); |
| icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); |
| icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
| icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); |
| icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ |
| icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; |
| } |
| icmd->ulpBdeCount = 1; |
| icmd->ulpLe = 1; |
| icmd->ulpClass = CLASS3; |
| |
| /* |
| * If we have NPIV enabled, we want to send ELS traffic by VPI. |
| * For SLI4, since the driver controls VPIs we also want to include |
| * all ELS pt2pt protocol traffic as well. |
| */ |
| if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || |
| ((phba->sli_rev == LPFC_SLI_REV4) && |
| (vport->fc_flag & FC_PT2PT))) { |
| |
| if (expectRsp) { |
| icmd->un.elsreq64.myID = vport->fc_myDID; |
| |
| /* For ELS_REQUEST64_CR, use the VPI by default */ |
| icmd->ulpContext = phba->vpi_ids[vport->vpi]; |
| } |
| |
| icmd->ulpCt_h = 0; |
| /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ |
| if (elscmd == ELS_CMD_ECHO) |
| icmd->ulpCt_l = 0; /* context = invalid RPI */ |
| else |
| icmd->ulpCt_l = 1; /* context = VPI */ |
| } |
| |
| bpl = (struct ulp_bde64 *) pbuflist->virt; |
| bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); |
| bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); |
| bpl->tus.f.bdeSize = cmdSize; |
| bpl->tus.f.bdeFlags = 0; |
| bpl->tus.w = le32_to_cpu(bpl->tus.w); |
| |
| if (expectRsp) { |
| bpl++; |
| bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); |
| bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); |
| bpl->tus.f.bdeSize = FCELSSIZE; |
| bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
| bpl->tus.w = le32_to_cpu(bpl->tus.w); |
| } |
| |
| /* prevent preparing iocb with NULL ndlp reference */ |
| elsiocb->context1 = lpfc_nlp_get(ndlp); |
| if (!elsiocb->context1) |
| goto els_iocb_free_pbuf_exit; |
| elsiocb->context2 = pcmd; |
| elsiocb->context3 = pbuflist; |
| elsiocb->retry = retry; |
| elsiocb->vport = vport; |
| elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; |
| |
| if (prsp) { |
| list_add(&prsp->list, &pcmd->list); |
| } |
| if (expectRsp) { |
| /* Xmit ELS command <elsCmd> to remote NPORT <did> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0116 Xmit ELS command x%x to remote " |
| "NPORT x%x I/O tag: x%x, port state:x%x" |
| " fc_flag:x%x\n", |
| elscmd, did, elsiocb->iotag, |
| vport->port_state, |
| vport->fc_flag); |
| } else { |
| /* Xmit ELS response <elsCmd> to remote NPORT <did> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0117 Xmit ELS response x%x to remote " |
| "NPORT x%x I/O tag: x%x, size: x%x " |
| "port_state x%x fc_flag x%x\n", |
| elscmd, ndlp->nlp_DID, elsiocb->iotag, |
| cmdSize, vport->port_state, |
| vport->fc_flag); |
| } |
| return elsiocb; |
| |
| els_iocb_free_pbuf_exit: |
| if (expectRsp) |
| lpfc_mbuf_free(phba, prsp->virt, prsp->phys); |
| kfree(pbuflist); |
| |
| els_iocb_free_prsp_exit: |
| lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); |
| kfree(prsp); |
| |
| els_iocb_free_pcmb_exit: |
| kfree(pcmd); |
| lpfc_sli_release_iocbq(phba, elsiocb); |
| return NULL; |
| } |
| |
| /** |
| * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine issues a fabric registration login for a @vport. An |
| * active ndlp node with Fabric_DID must already exist for this @vport. |
| * The routine invokes two mailbox commands to carry out fabric registration |
| * login through the HBA firmware: the first mailbox command requests the |
| * HBA to perform link configuration for the @vport; and the second mailbox |
| * command requests the HBA to perform the actual fabric registration login |
| * with the @vport. |
| * |
| * Return code |
| * 0 - successfully issued fabric registration login for @vport |
| * -ENXIO -- failed to issue fabric registration login for @vport |
| **/ |
| int |
| lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| LPFC_MBOXQ_t *mbox; |
| struct lpfc_dmabuf *mp; |
| struct lpfc_nodelist *ndlp; |
| struct serv_parm *sp; |
| int rc; |
| int err = 0; |
| |
| sp = &phba->fc_fabparam; |
| ndlp = lpfc_findnode_did(vport, Fabric_DID); |
| if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
| err = 1; |
| goto fail; |
| } |
| |
| mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (!mbox) { |
| err = 2; |
| goto fail; |
| } |
| |
| vport->port_state = LPFC_FABRIC_CFG_LINK; |
| lpfc_config_link(phba, mbox); |
| mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| mbox->vport = vport; |
| |
| rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
| if (rc == MBX_NOT_FINISHED) { |
| err = 3; |
| goto fail_free_mbox; |
| } |
| |
| mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (!mbox) { |
| err = 4; |
| goto fail; |
| } |
| rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, |
| ndlp->nlp_rpi); |
| if (rc) { |
| err = 5; |
| goto fail_free_mbox; |
| } |
| |
| mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; |
| mbox->vport = vport; |
| /* increment the reference count on ndlp to hold reference |
| * for the callback routine. |
| */ |
| mbox->context2 = lpfc_nlp_get(ndlp); |
| |
| rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
| if (rc == MBX_NOT_FINISHED) { |
| err = 6; |
| goto fail_issue_reg_login; |
| } |
| |
| return 0; |
| |
| fail_issue_reg_login: |
| /* decrement the reference count on ndlp just incremented |
| * for the failed mbox command. |
| */ |
| lpfc_nlp_put(ndlp); |
| mp = (struct lpfc_dmabuf *) mbox->context1; |
| lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| kfree(mp); |
| fail_free_mbox: |
| mempool_free(mbox, phba->mbox_mem_pool); |
| |
| fail: |
| lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "0249 Cannot issue Register Fabric login: Err %d\n", err); |
| return -ENXIO; |
| } |
| |
| /** |
| * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for |
| * the @vport. This mailbox command is necessary for SLI4 port only. |
| * |
| * Return code |
| * 0 - successfully issued REG_VFI for @vport |
| * A failure code otherwise. |
| **/ |
| int |
| lpfc_issue_reg_vfi(struct lpfc_vport *vport) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| LPFC_MBOXQ_t *mboxq = NULL; |
| struct lpfc_nodelist *ndlp; |
| struct lpfc_dmabuf *dmabuf = NULL; |
| int rc = 0; |
| |
| /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ |
| if ((phba->sli_rev == LPFC_SLI_REV4) && |
| !(phba->link_flag & LS_LOOPBACK_MODE) && |
| !(vport->fc_flag & FC_PT2PT)) { |
| ndlp = lpfc_findnode_did(vport, Fabric_DID); |
| if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
| rc = -ENODEV; |
| goto fail; |
| } |
| } |
| |
| mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (!mboxq) { |
| rc = -ENOMEM; |
| goto fail; |
| } |
| |
| /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ |
| if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { |
| dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| if (!dmabuf) { |
| rc = -ENOMEM; |
| goto fail; |
| } |
| dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); |
| if (!dmabuf->virt) { |
| rc = -ENOMEM; |
| goto fail; |
| } |
| memcpy(dmabuf->virt, &phba->fc_fabparam, |
| sizeof(struct serv_parm)); |
| } |
| |
| vport->port_state = LPFC_FABRIC_CFG_LINK; |
| if (dmabuf) |
| lpfc_reg_vfi(mboxq, vport, dmabuf->phys); |
| else |
| lpfc_reg_vfi(mboxq, vport, 0); |
| |
| mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; |
| mboxq->vport = vport; |
| mboxq->context1 = dmabuf; |
| rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
| if (rc == MBX_NOT_FINISHED) { |
| rc = -ENXIO; |
| goto fail; |
| } |
| return 0; |
| |
| fail: |
| if (mboxq) |
| mempool_free(mboxq, phba->mbox_mem_pool); |
| if (dmabuf) { |
| if (dmabuf->virt) |
| lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); |
| kfree(dmabuf); |
| } |
| |
| lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "0289 Issue Register VFI failed: Err %d\n", rc); |
| return rc; |
| } |
| |
| /** |
| * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for |
| * the @vport. This mailbox command is necessary for SLI4 port only. |
| * |
| * Return code |
| * 0 - successfully issued REG_VFI for @vport |
| * A failure code otherwise. |
| **/ |
| int |
| lpfc_issue_unreg_vfi(struct lpfc_vport *vport) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct Scsi_Host *shost; |
| LPFC_MBOXQ_t *mboxq; |
| int rc; |
| |
| mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (!mboxq) { |
| lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
| "2556 UNREG_VFI mbox allocation failed" |
| "HBA state x%x\n", phba->pport->port_state); |
| return -ENOMEM; |
| } |
| |
| lpfc_unreg_vfi(mboxq, vport); |
| mboxq->vport = vport; |
| mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; |
| |
| rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
| if (rc == MBX_NOT_FINISHED) { |
| lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
| "2557 UNREG_VFI issue mbox failed rc x%x " |
| "HBA state x%x\n", |
| rc, phba->pport->port_state); |
| mempool_free(mboxq, phba->mbox_mem_pool); |
| return -EIO; |
| } |
| |
| shost = lpfc_shost_from_vport(vport); |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_VFI_REGISTERED; |
| spin_unlock_irq(shost->host_lock); |
| return 0; |
| } |
| |
| /** |
| * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @sp: pointer to service parameter data structure. |
| * |
| * This routine is called from FLOGI/FDISC completion handler functions. |
| * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric |
| * node nodename is changed in the completion service parameter else return |
| * 0. This function also set flag in the vport data structure to delay |
| * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit |
| * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric |
| * node nodename is changed in the completion service parameter. |
| * |
| * Return code |
| * 0 - FCID and Fabric Nodename and Fabric portname is not changed. |
| * 1 - FCID or Fabric Nodename or Fabric portname is changed. |
| * |
| **/ |
| static uint8_t |
| lpfc_check_clean_addr_bit(struct lpfc_vport *vport, |
| struct serv_parm *sp) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| uint8_t fabric_param_changed = 0; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| |
| if ((vport->fc_prevDID != vport->fc_myDID) || |
| memcmp(&vport->fabric_portname, &sp->portName, |
| sizeof(struct lpfc_name)) || |
| memcmp(&vport->fabric_nodename, &sp->nodeName, |
| sizeof(struct lpfc_name)) || |
| (vport->vport_flag & FAWWPN_PARAM_CHG)) { |
| fabric_param_changed = 1; |
| vport->vport_flag &= ~FAWWPN_PARAM_CHG; |
| } |
| /* |
| * Word 1 Bit 31 in common service parameter is overloaded. |
| * Word 1 Bit 31 in FLOGI request is multiple NPort request |
| * Word 1 Bit 31 in FLOGI response is clean address bit |
| * |
| * If fabric parameter is changed and clean address bit is |
| * cleared delay nport discovery if |
| * - vport->fc_prevDID != 0 (not initial discovery) OR |
| * - lpfc_delay_discovery module parameter is set. |
| */ |
| if (fabric_param_changed && !sp->cmn.clean_address_bit && |
| (vport->fc_prevDID || phba->cfg_delay_discovery)) { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_DISC_DELAYED; |
| spin_unlock_irq(shost->host_lock); |
| } |
| |
| return fabric_param_changed; |
| } |
| |
| |
| /** |
| * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @sp: pointer to service parameter data structure. |
| * @irsp: pointer to the IOCB within the lpfc response IOCB. |
| * |
| * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback |
| * function to handle the completion of a Fabric Login (FLOGI) into a fabric |
| * port in a fabric topology. It properly sets up the parameters to the @ndlp |
| * from the IOCB response. It also check the newly assigned N_Port ID to the |
| * @vport against the previously assigned N_Port ID. If it is different from |
| * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine |
| * is invoked on all the remaining nodes with the @vport to unregister the |
| * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() |
| * is invoked to register login to the fabric. |
| * |
| * Return code |
| * 0 - Success (currently, always return 0) |
| **/ |
| static int |
| lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| struct serv_parm *sp, IOCB_t *irsp) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| struct lpfc_nodelist *np; |
| struct lpfc_nodelist *next_np; |
| uint8_t fabric_param_changed; |
| |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_FABRIC; |
| spin_unlock_irq(shost->host_lock); |
| |
| phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); |
| if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ |
| phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; |
| |
| phba->fc_edtovResol = sp->cmn.edtovResolution; |
| phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; |
| |
| if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_PUBLIC_LOOP; |
| spin_unlock_irq(shost->host_lock); |
| } |
| |
| vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; |
| memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); |
| memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); |
| ndlp->nlp_class_sup = 0; |
| if (sp->cls1.classValid) |
| ndlp->nlp_class_sup |= FC_COS_CLASS1; |
| if (sp->cls2.classValid) |
| ndlp->nlp_class_sup |= FC_COS_CLASS2; |
| if (sp->cls3.classValid) |
| ndlp->nlp_class_sup |= FC_COS_CLASS3; |
| if (sp->cls4.classValid) |
| ndlp->nlp_class_sup |= FC_COS_CLASS4; |
| ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | |
| sp->cmn.bbRcvSizeLsb; |
| |
| fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); |
| if (fabric_param_changed) { |
| /* Reset FDMI attribute masks based on config parameter */ |
| if (phba->cfg_enable_SmartSAN || |
| (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { |
| /* Setup appropriate attribute masks */ |
| vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; |
| if (phba->cfg_enable_SmartSAN) |
| vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; |
| else |
| vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; |
| } else { |
| vport->fdmi_hba_mask = 0; |
| vport->fdmi_port_mask = 0; |
| } |
| |
| } |
| memcpy(&vport->fabric_portname, &sp->portName, |
| sizeof(struct lpfc_name)); |
| memcpy(&vport->fabric_nodename, &sp->nodeName, |
| sizeof(struct lpfc_name)); |
| memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); |
| |
| if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { |
| if (sp->cmn.response_multiple_NPort) { |
| lpfc_printf_vlog(vport, KERN_WARNING, |
| LOG_ELS | LOG_VPORT, |
| "1816 FLOGI NPIV supported, " |
| "response data 0x%x\n", |
| sp->cmn.response_multiple_NPort); |
| spin_lock_irq(&phba->hbalock); |
| phba->link_flag |= LS_NPIV_FAB_SUPPORTED; |
| spin_unlock_irq(&phba->hbalock); |
| } else { |
| /* Because we asked f/w for NPIV it still expects us |
| to call reg_vnpid atleast for the physcial host */ |
| lpfc_printf_vlog(vport, KERN_WARNING, |
| LOG_ELS | LOG_VPORT, |
| "1817 Fabric does not support NPIV " |
| "- configuring single port mode.\n"); |
| spin_lock_irq(&phba->hbalock); |
| phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; |
| spin_unlock_irq(&phba->hbalock); |
| } |
| } |
| |
| /* |
| * For FC we need to do some special processing because of the SLI |
| * Port's default settings of the Common Service Parameters. |
| */ |
| if ((phba->sli_rev == LPFC_SLI_REV4) && |
| (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { |
| /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ |
| if (fabric_param_changed) |
| lpfc_unregister_fcf_prep(phba); |
| |
| /* This should just update the VFI CSPs*/ |
| if (vport->fc_flag & FC_VFI_REGISTERED) |
| lpfc_issue_reg_vfi(vport); |
| } |
| |
| if (fabric_param_changed && |
| !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { |
| |
| /* If our NportID changed, we need to ensure all |
| * remaining NPORTs get unreg_login'ed. |
| */ |
| list_for_each_entry_safe(np, next_np, |
| &vport->fc_nodes, nlp_listp) { |
| if (!NLP_CHK_NODE_ACT(np)) |
| continue; |
| if ((np->nlp_state != NLP_STE_NPR_NODE) || |
| !(np->nlp_flag & NLP_NPR_ADISC)) |
| continue; |
| spin_lock_irq(shost->host_lock); |
| np->nlp_flag &= ~NLP_NPR_ADISC; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_unreg_rpi(vport, np); |
| } |
| lpfc_cleanup_pending_mbox(vport); |
| |
| if (phba->sli_rev == LPFC_SLI_REV4) { |
| lpfc_sli4_unreg_all_rpis(vport); |
| lpfc_mbx_unreg_vpi(vport); |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
| spin_unlock_irq(shost->host_lock); |
| } |
| |
| /* |
| * For SLI3 and SLI4, the VPI needs to be reregistered in |
| * response to this fabric parameter change event. |
| */ |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
| spin_unlock_irq(shost->host_lock); |
| } else if ((phba->sli_rev == LPFC_SLI_REV4) && |
| !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { |
| /* |
| * Driver needs to re-reg VPI in order for f/w |
| * to update the MAC address. |
| */ |
| lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
| lpfc_register_new_vport(phba, vport, ndlp); |
| return 0; |
| } |
| |
| if (phba->sli_rev < LPFC_SLI_REV4) { |
| lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); |
| if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && |
| vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) |
| lpfc_register_new_vport(phba, vport, ndlp); |
| else |
| lpfc_issue_fabric_reglogin(vport); |
| } else { |
| ndlp->nlp_type |= NLP_FABRIC; |
| lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
| if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && |
| (vport->vpi_state & LPFC_VPI_REGISTERED)) { |
| lpfc_start_fdiscs(phba); |
| lpfc_do_scr_ns_plogi(phba, vport); |
| } else if (vport->fc_flag & FC_VFI_REGISTERED) |
| lpfc_issue_init_vpi(vport); |
| else { |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "3135 Need register VFI: (x%x/%x)\n", |
| vport->fc_prevDID, vport->fc_myDID); |
| lpfc_issue_reg_vfi(vport); |
| } |
| } |
| return 0; |
| } |
| |
| /** |
| * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @sp: pointer to service parameter data structure. |
| * |
| * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback |
| * function to handle the completion of a Fabric Login (FLOGI) into an N_Port |
| * in a point-to-point topology. First, the @vport's N_Port Name is compared |
| * with the received N_Port Name: if the @vport's N_Port Name is greater than |
| * the received N_Port Name lexicographically, this node shall assign local |
| * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and |
| * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, |
| * this node shall just wait for the remote node to issue PLOGI and assign |
| * N_Port IDs. |
| * |
| * Return code |
| * 0 - Success |
| * -ENXIO - Fail |
| **/ |
| static int |
| lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| struct serv_parm *sp) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| LPFC_MBOXQ_t *mbox; |
| int rc; |
| |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); |
| vport->fc_flag |= FC_PT2PT; |
| spin_unlock_irq(shost->host_lock); |
| |
| /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ |
| if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { |
| lpfc_unregister_fcf_prep(phba); |
| |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_VFI_REGISTERED; |
| spin_unlock_irq(shost->host_lock); |
| phba->fc_topology_changed = 0; |
| } |
| |
| rc = memcmp(&vport->fc_portname, &sp->portName, |
| sizeof(vport->fc_portname)); |
| |
| if (rc >= 0) { |
| /* This side will initiate the PLOGI */ |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag |= FC_PT2PT_PLOGI; |
| spin_unlock_irq(shost->host_lock); |
| |
| /* |
| * N_Port ID cannot be 0, set our Id to LocalID |
| * the other side will be RemoteID. |
| */ |
| |
| /* not equal */ |
| if (rc) |
| vport->fc_myDID = PT2PT_LocalID; |
| |
| /* Decrement ndlp reference count indicating that ndlp can be |
| * safely released when other references to it are done. |
| */ |
| lpfc_nlp_put(ndlp); |
| |
| ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); |
| if (!ndlp) { |
| /* |
| * Cannot find existing Fabric ndlp, so allocate a |
| * new one |
| */ |
| ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); |
| if (!ndlp) |
| goto fail; |
| } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
| ndlp = lpfc_enable_node(vport, ndlp, |
| NLP_STE_UNUSED_NODE); |
| if(!ndlp) |
| goto fail; |
| } |
| |
| memcpy(&ndlp->nlp_portname, &sp->portName, |
| sizeof(struct lpfc_name)); |
| memcpy(&ndlp->nlp_nodename, &sp->nodeName, |
| sizeof(struct lpfc_name)); |
| /* Set state will put ndlp onto node list if not already done */ |
| lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| } else |
| /* This side will wait for the PLOGI, decrement ndlp reference |
| * count indicating that ndlp can be released when other |
| * references to it are done. |
| */ |
| lpfc_nlp_put(ndlp); |
| |
| /* If we are pt2pt with another NPort, force NPIV off! */ |
| phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
| |
| mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (!mbox) |
| goto fail; |
| |
| lpfc_config_link(phba, mbox); |
| |
| mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
| mbox->vport = vport; |
| rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
| if (rc == MBX_NOT_FINISHED) { |
| mempool_free(mbox, phba->mbox_mem_pool); |
| goto fail; |
| } |
| |
| return 0; |
| fail: |
| return -ENXIO; |
| } |
| |
| /** |
| * lpfc_cmpl_els_flogi - Completion callback function for flogi |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is the top-level completion callback function for issuing |
| * a Fabric Login (FLOGI) command. If the response IOCB reported error, |
| * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If |
| * retry has been made (either immediately or delayed with lpfc_els_retry() |
| * returning 1), the command IOCB will be released and function returned. |
| * If the retry attempt has been given up (possibly reach the maximum |
| * number of retries), one additional decrement of ndlp reference shall be |
| * invoked before going out after releasing the command IOCB. This will |
| * actually release the remote node (Note, lpfc_els_free_iocb() will also |
| * invoke one decrement of ndlp reference count). If no error reported in |
| * the IOCB status, the command Port ID field is used to determine whether |
| * this is a point-to-point topology or a fabric topology: if the Port ID |
| * field is assigned, it is a fabric topology; otherwise, it is a |
| * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or |
| * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the |
| * specific topology completion conditions. |
| **/ |
| static void |
| lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| IOCB_t *irsp = &rspiocb->iocb; |
| struct lpfc_nodelist *ndlp = cmdiocb->context1; |
| struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; |
| struct serv_parm *sp; |
| uint16_t fcf_index; |
| int rc; |
| |
| /* Check to see if link went down during discovery */ |
| if (lpfc_els_chk_latt(vport)) { |
| /* One additional decrement on node reference count to |
| * trigger the release of the node |
| */ |
| lpfc_nlp_put(ndlp); |
| goto out; |
| } |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "FLOGI cmpl: status:x%x/x%x state:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| vport->port_state); |
| |
| if (irsp->ulpStatus) { |
| /* |
| * In case of FIP mode, perform roundrobin FCF failover |
| * due to new FCF discovery |
| */ |
| if ((phba->hba_flag & HBA_FIP_SUPPORT) && |
| (phba->fcf.fcf_flag & FCF_DISCOVERY)) { |
| if (phba->link_state < LPFC_LINK_UP) |
| goto stop_rr_fcf_flogi; |
| if ((phba->fcoe_cvl_eventtag_attn == |
| phba->fcoe_cvl_eventtag) && |
| (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
| ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == |
| IOERR_SLI_ABORTED)) |
| goto stop_rr_fcf_flogi; |
| else |
| phba->fcoe_cvl_eventtag_attn = |
| phba->fcoe_cvl_eventtag; |
| lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
| "2611 FLOGI failed on FCF (x%x), " |
| "status:x%x/x%x, tmo:x%x, perform " |
| "roundrobin FCF failover\n", |
| phba->fcf.current_rec.fcf_indx, |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout); |
| lpfc_sli4_set_fcf_flogi_fail(phba, |
| phba->fcf.current_rec.fcf_indx); |
| fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); |
| rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); |
| if (rc) |
| goto out; |
| } |
| |
| stop_rr_fcf_flogi: |
| /* FLOGI failure */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2858 FLOGI failure Status:x%x/x%x TMO:x%x " |
| "Data x%x x%x\n", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout, phba->hba_flag, |
| phba->fcf.fcf_flag); |
| |
| /* Check for retry */ |
| if (lpfc_els_retry(phba, cmdiocb, rspiocb)) |
| goto out; |
| |
| /* FLOGI failure */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout); |
| |
| |
| /* If this is not a loop open failure, bail out */ |
| if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && |
| ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == |
| IOERR_LOOP_OPEN_FAILURE))) |
| goto flogifail; |
| |
| /* FLOGI failed, so there is no fabric */ |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); |
| spin_unlock_irq(shost->host_lock); |
| |
| /* If private loop, then allow max outstanding els to be |
| * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no |
| * alpa map would take too long otherwise. |
| */ |
| if (phba->alpa_map[0] == 0) |
| vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; |
| if ((phba->sli_rev == LPFC_SLI_REV4) && |
| (!(vport->fc_flag & FC_VFI_REGISTERED) || |
| (vport->fc_prevDID != vport->fc_myDID) || |
| phba->fc_topology_changed)) { |
| if (vport->fc_flag & FC_VFI_REGISTERED) { |
| if (phba->fc_topology_changed) { |
| lpfc_unregister_fcf_prep(phba); |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_VFI_REGISTERED; |
| spin_unlock_irq(shost->host_lock); |
| phba->fc_topology_changed = 0; |
| } else { |
| lpfc_sli4_unreg_all_rpis(vport); |
| } |
| } |
| |
| /* Do not register VFI if the driver aborted FLOGI */ |
| if (!lpfc_error_lost_link(irsp)) |
| lpfc_issue_reg_vfi(vport); |
| lpfc_nlp_put(ndlp); |
| goto out; |
| } |
| goto flogifail; |
| } |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_VPORT_CVL_RCVD; |
| vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; |
| spin_unlock_irq(shost->host_lock); |
| |
| /* |
| * The FLogI succeeded. Sync the data for the CPU before |
| * accessing it. |
| */ |
| prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); |
| if (!prsp) |
| goto out; |
| sp = prsp->virt + sizeof(uint32_t); |
| |
| /* FLOGI completes successfully */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0101 FLOGI completes successfully, I/O tag:x%x, " |
| "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, |
| irsp->un.ulpWord[4], sp->cmn.e_d_tov, |
| sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, |
| vport->port_state, vport->fc_flag); |
| |
| if (vport->port_state == LPFC_FLOGI) { |
| /* |
| * If Common Service Parameters indicate Nport |
| * we are point to point, if Fport we are Fabric. |
| */ |
| if (sp->cmn.fPort) |
| rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); |
| else if (!(phba->hba_flag & HBA_FCOE_MODE)) |
| rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); |
| else { |
| lpfc_printf_vlog(vport, KERN_ERR, |
| LOG_FIP | LOG_ELS, |
| "2831 FLOGI response with cleared Fabric " |
| "bit fcf_index 0x%x " |
| "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " |
| "Fabric Name " |
| "%02x%02x%02x%02x%02x%02x%02x%02x\n", |
| phba->fcf.current_rec.fcf_indx, |
| phba->fcf.current_rec.switch_name[0], |
| phba->fcf.current_rec.switch_name[1], |
| phba->fcf.current_rec.switch_name[2], |
| phba->fcf.current_rec.switch_name[3], |
| phba->fcf.current_rec.switch_name[4], |
| phba->fcf.current_rec.switch_name[5], |
| phba->fcf.current_rec.switch_name[6], |
| phba->fcf.current_rec.switch_name[7], |
| phba->fcf.current_rec.fabric_name[0], |
| phba->fcf.current_rec.fabric_name[1], |
| phba->fcf.current_rec.fabric_name[2], |
| phba->fcf.current_rec.fabric_name[3], |
| phba->fcf.current_rec.fabric_name[4], |
| phba->fcf.current_rec.fabric_name[5], |
| phba->fcf.current_rec.fabric_name[6], |
| phba->fcf.current_rec.fabric_name[7]); |
| lpfc_nlp_put(ndlp); |
| spin_lock_irq(&phba->hbalock); |
| phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
| phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); |
| spin_unlock_irq(&phba->hbalock); |
| goto out; |
| } |
| if (!rc) { |
| /* Mark the FCF discovery process done */ |
| if (phba->hba_flag & HBA_FIP_SUPPORT) |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | |
| LOG_ELS, |
| "2769 FLOGI to FCF (x%x) " |
| "completed successfully\n", |
| phba->fcf.current_rec.fcf_indx); |
| spin_lock_irq(&phba->hbalock); |
| phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
| phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); |
| spin_unlock_irq(&phba->hbalock); |
| goto out; |
| } |
| } |
| |
| flogifail: |
| spin_lock_irq(&phba->hbalock); |
| phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
| spin_unlock_irq(&phba->hbalock); |
| |
| lpfc_nlp_put(ndlp); |
| |
| if (!lpfc_error_lost_link(irsp)) { |
| /* FLOGI failed, so just use loop map to make discovery list */ |
| lpfc_disc_list_loopmap(vport); |
| |
| /* Start discovery */ |
| lpfc_disc_start(vport); |
| } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || |
| (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != |
| IOERR_SLI_ABORTED) && |
| ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != |
| IOERR_SLI_DOWN))) && |
| (phba->link_state != LPFC_CLEAR_LA)) { |
| /* If FLOGI failed enable link interrupt. */ |
| lpfc_issue_clear_la(phba, vport); |
| } |
| out: |
| lpfc_els_free_iocb(phba, cmdiocb); |
| } |
| |
| /** |
| * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues a Fabric Login (FLOGI) Request ELS command |
| * for a @vport. The initiator service parameters are put into the payload |
| * of the FLOGI Request IOCB and the top-level callback function pointer |
| * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback |
| * function field. The lpfc_issue_fabric_iocb routine is invoked to send |
| * out FLOGI ELS command with one outstanding fabric IOCB at a time. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the FLOGI ELS command. |
| * |
| * Return code |
| * 0 - successfully issued flogi iocb for @vport |
| * 1 - failed to issue flogi iocb for @vport |
| **/ |
| static int |
| lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| uint8_t retry) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct serv_parm *sp; |
| IOCB_t *icmd; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| uint32_t tmo; |
| int rc; |
| |
| cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, ELS_CMD_FLOGI); |
| |
| if (!elsiocb) |
| return 1; |
| |
| icmd = &elsiocb->iocb; |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| /* For FLOGI request, remainder of payload is service parameters */ |
| *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; |
| pcmd += sizeof(uint32_t); |
| memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); |
| sp = (struct serv_parm *) pcmd; |
| |
| /* Setup CSPs accordingly for Fabric */ |
| sp->cmn.e_d_tov = 0; |
| sp->cmn.w2.r_a_tov = 0; |
| sp->cmn.virtual_fabric_support = 0; |
| sp->cls1.classValid = 0; |
| if (sp->cmn.fcphLow < FC_PH3) |
| sp->cmn.fcphLow = FC_PH3; |
| if (sp->cmn.fcphHigh < FC_PH3) |
| sp->cmn.fcphHigh = FC_PH3; |
| |
| if (phba->sli_rev == LPFC_SLI_REV4) { |
| if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
| LPFC_SLI_INTF_IF_TYPE_0) { |
| elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); |
| elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); |
| /* FLOGI needs to be 3 for WQE FCFI */ |
| /* Set the fcfi to the fcfi we registered with */ |
| elsiocb->iocb.ulpContext = phba->fcf.fcfi; |
| } |
| /* Can't do SLI4 class2 without support sequence coalescing */ |
| sp->cls2.classValid = 0; |
| sp->cls2.seqDelivery = 0; |
| } else { |
| /* Historical, setting sequential-delivery bit for SLI3 */ |
| sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; |
| sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; |
| if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { |
| sp->cmn.request_multiple_Nport = 1; |
| /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ |
| icmd->ulpCt_h = 1; |
| icmd->ulpCt_l = 0; |
| } else |
| sp->cmn.request_multiple_Nport = 0; |
| } |
| |
| if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { |
| icmd->un.elsreq64.myID = 0; |
| icmd->un.elsreq64.fl = 1; |
| } |
| |
| tmo = phba->fc_ratov; |
| phba->fc_ratov = LPFC_DISC_FLOGI_TMO; |
| lpfc_set_disctmo(vport); |
| phba->fc_ratov = tmo; |
| |
| phba->fc_stat.elsXmitFLOGI++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue FLOGI: opt:x%x", |
| phba->sli3_options, 0, 0); |
| |
| rc = lpfc_issue_fabric_iocb(phba, elsiocb); |
| if (rc == IOCB_ERROR) { |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs |
| * @phba: pointer to lpfc hba data structure. |
| * |
| * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs |
| * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq |
| * list and issues an abort IOCB commond on each outstanding IOCB that |
| * contains a active Fabric_DID ndlp. Note that this function is to issue |
| * the abort IOCB command on all the outstanding IOCBs, thus when this |
| * function returns, it does not guarantee all the IOCBs are actually aborted. |
| * |
| * Return code |
| * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) |
| **/ |
| int |
| lpfc_els_abort_flogi(struct lpfc_hba *phba) |
| { |
| struct lpfc_sli_ring *pring; |
| struct lpfc_iocbq *iocb, *next_iocb; |
| struct lpfc_nodelist *ndlp; |
| IOCB_t *icmd; |
| |
| /* Abort outstanding I/O on NPort <nlp_DID> */ |
| lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
| "0201 Abort outstanding I/O on NPort x%x\n", |
| Fabric_DID); |
| |
| pring = lpfc_phba_elsring(phba); |
| |
| /* |
| * Check the txcmplq for an iocb that matches the nport the driver is |
| * searching for. |
| */ |
| spin_lock_irq(&phba->hbalock); |
| list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { |
| icmd = &iocb->iocb; |
| if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { |
| ndlp = (struct lpfc_nodelist *)(iocb->context1); |
| if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
| (ndlp->nlp_DID == Fabric_DID)) |
| lpfc_sli_issue_abort_iotag(phba, pring, iocb); |
| } |
| } |
| spin_unlock_irq(&phba->hbalock); |
| |
| return 0; |
| } |
| |
| /** |
| * lpfc_initial_flogi - Issue an initial fabric login for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine issues an initial Fabric Login (FLOGI) for the @vport |
| * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from |
| * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and |
| * put it into the @vport's ndlp list. If an inactive ndlp found on the list, |
| * it will just be enabled and made active. The lpfc_issue_els_flogi() routine |
| * is then invoked with the @vport and the ndlp to perform the FLOGI for the |
| * @vport. |
| * |
| * Return code |
| * 0 - failed to issue initial flogi for @vport |
| * 1 - successfully issued initial flogi for @vport |
| **/ |
| int |
| lpfc_initial_flogi(struct lpfc_vport *vport) |
| { |
| struct lpfc_nodelist *ndlp; |
| |
| vport->port_state = LPFC_FLOGI; |
| lpfc_set_disctmo(vport); |
| |
| /* First look for the Fabric ndlp */ |
| ndlp = lpfc_findnode_did(vport, Fabric_DID); |
| if (!ndlp) { |
| /* Cannot find existing Fabric ndlp, so allocate a new one */ |
| ndlp = lpfc_nlp_init(vport, Fabric_DID); |
| if (!ndlp) |
| return 0; |
| /* Set the node type */ |
| ndlp->nlp_type |= NLP_FABRIC; |
| /* Put ndlp onto node list */ |
| lpfc_enqueue_node(vport, ndlp); |
| } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
| /* re-setup ndlp without removing from node list */ |
| ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
| if (!ndlp) |
| return 0; |
| } |
| |
| if (lpfc_issue_els_flogi(vport, ndlp, 0)) { |
| /* This decrement of reference count to node shall kick off |
| * the release of the node. |
| */ |
| lpfc_nlp_put(ndlp); |
| return 0; |
| } |
| return 1; |
| } |
| |
| /** |
| * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine issues an initial Fabric Discover (FDISC) for the @vport |
| * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from |
| * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and |
| * put it into the @vport's ndlp list. If an inactive ndlp found on the list, |
| * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine |
| * is then invoked with the @vport and the ndlp to perform the FDISC for the |
| * @vport. |
| * |
| * Return code |
| * 0 - failed to issue initial fdisc for @vport |
| * 1 - successfully issued initial fdisc for @vport |
| **/ |
| int |
| lpfc_initial_fdisc(struct lpfc_vport *vport) |
| { |
| struct lpfc_nodelist *ndlp; |
| |
| /* First look for the Fabric ndlp */ |
| ndlp = lpfc_findnode_did(vport, Fabric_DID); |
| if (!ndlp) { |
| /* Cannot find existing Fabric ndlp, so allocate a new one */ |
| ndlp = lpfc_nlp_init(vport, Fabric_DID); |
| if (!ndlp) |
| return 0; |
| /* Put ndlp onto node list */ |
| lpfc_enqueue_node(vport, ndlp); |
| } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
| /* re-setup ndlp without removing from node list */ |
| ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
| if (!ndlp) |
| return 0; |
| } |
| |
| if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { |
| /* decrement node reference count to trigger the release of |
| * the node. |
| */ |
| lpfc_nlp_put(ndlp); |
| return 0; |
| } |
| return 1; |
| } |
| |
| /** |
| * lpfc_more_plogi - Check and issue remaining plogis for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine checks whether there are more remaining Port Logins |
| * (PLOGI) to be issued for the @vport. If so, it will invoke the routine |
| * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes |
| * to issue ELS PLOGIs up to the configured discover threads with the |
| * @vport (@vport->cfg_discovery_threads). The function also decrement |
| * the @vport's num_disc_node by 1 if it is not already 0. |
| **/ |
| void |
| lpfc_more_plogi(struct lpfc_vport *vport) |
| { |
| if (vport->num_disc_nodes) |
| vport->num_disc_nodes--; |
| |
| /* Continue discovery with <num_disc_nodes> PLOGIs to go */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
| "0232 Continue discovery with %d PLOGIs to go " |
| "Data: x%x x%x x%x\n", |
| vport->num_disc_nodes, vport->fc_plogi_cnt, |
| vport->fc_flag, vport->port_state); |
| /* Check to see if there are more PLOGIs to be sent */ |
| if (vport->fc_flag & FC_NLP_MORE) |
| /* go thru NPR nodes and issue any remaining ELS PLOGIs */ |
| lpfc_els_disc_plogi(vport); |
| |
| return; |
| } |
| |
| /** |
| * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp |
| * @phba: pointer to lpfc hba data structure. |
| * @prsp: pointer to response IOCB payload. |
| * @ndlp: pointer to a node-list data structure. |
| * |
| * This routine checks and indicates whether the WWPN of an N_Port, retrieved |
| * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. |
| * The following cases are considered N_Port confirmed: |
| * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches |
| * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but |
| * it does not have WWPN assigned either. If the WWPN is confirmed, the |
| * pointer to the @ndlp will be returned. If the WWPN is not confirmed: |
| * 1) if there is a node on vport list other than the @ndlp with the same |
| * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked |
| * on that node to release the RPI associated with the node; 2) if there is |
| * no node found on vport list with the same WWPN of the N_Port PLOGI logged |
| * into, a new node shall be allocated (or activated). In either case, the |
| * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall |
| * be released and the new_ndlp shall be put on to the vport node list and |
| * its pointer returned as the confirmed node. |
| * |
| * Note that before the @ndlp got "released", the keepDID from not-matching |
| * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID |
| * of the @ndlp. This is because the release of @ndlp is actually to put it |
| * into an inactive state on the vport node list and the vport node list |
| * management algorithm does not allow two node with a same DID. |
| * |
| * Return code |
| * pointer to the PLOGI N_Port @ndlp |
| **/ |
| static struct lpfc_nodelist * |
| lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, |
| struct lpfc_nodelist *ndlp) |
| { |
| struct lpfc_vport *vport = ndlp->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_nodelist *new_ndlp; |
| struct lpfc_rport_data *rdata; |
| struct fc_rport *rport; |
| struct serv_parm *sp; |
| uint8_t name[sizeof(struct lpfc_name)]; |
| uint32_t rc, keepDID = 0, keep_nlp_flag = 0; |
| uint16_t keep_nlp_state; |
| struct lpfc_nvme_rport *keep_nrport = NULL; |
| int put_node; |
| int put_rport; |
| unsigned long *active_rrqs_xri_bitmap = NULL; |
| |
| /* Fabric nodes can have the same WWPN so we don't bother searching |
| * by WWPN. Just return the ndlp that was given to us. |
| */ |
| if (ndlp->nlp_type & NLP_FABRIC) |
| return ndlp; |
| |
| sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); |
| memset(name, 0, sizeof(struct lpfc_name)); |
| |
| /* Now we find out if the NPort we are logging into, matches the WWPN |
| * we have for that ndlp. If not, we have some work to do. |
| */ |
| new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); |
| |
| if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) |
| return ndlp; |
| if (phba->sli_rev == LPFC_SLI_REV4) { |
| active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, |
| GFP_KERNEL); |
| if (active_rrqs_xri_bitmap) |
| memset(active_rrqs_xri_bitmap, 0, |
| phba->cfg_rrq_xri_bitmap_sz); |
| } |
| |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", |
| ndlp, ndlp->nlp_DID, new_ndlp); |
| |
| if (!new_ndlp) { |
| rc = memcmp(&ndlp->nlp_portname, name, |
| sizeof(struct lpfc_name)); |
| if (!rc) { |
| if (active_rrqs_xri_bitmap) |
| mempool_free(active_rrqs_xri_bitmap, |
| phba->active_rrq_pool); |
| return ndlp; |
| } |
| new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); |
| if (!new_ndlp) { |
| if (active_rrqs_xri_bitmap) |
| mempool_free(active_rrqs_xri_bitmap, |
| phba->active_rrq_pool); |
| return ndlp; |
| } |
| } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { |
| rc = memcmp(&ndlp->nlp_portname, name, |
| sizeof(struct lpfc_name)); |
| if (!rc) { |
| if (active_rrqs_xri_bitmap) |
| mempool_free(active_rrqs_xri_bitmap, |
| phba->active_rrq_pool); |
| return ndlp; |
| } |
| new_ndlp = lpfc_enable_node(vport, new_ndlp, |
| NLP_STE_UNUSED_NODE); |
| if (!new_ndlp) { |
| if (active_rrqs_xri_bitmap) |
| mempool_free(active_rrqs_xri_bitmap, |
| phba->active_rrq_pool); |
| return ndlp; |
| } |
| keepDID = new_ndlp->nlp_DID; |
| if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) |
| memcpy(active_rrqs_xri_bitmap, |
| new_ndlp->active_rrqs_xri_bitmap, |
| phba->cfg_rrq_xri_bitmap_sz); |
| } else { |
| keepDID = new_ndlp->nlp_DID; |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| active_rrqs_xri_bitmap) |
| memcpy(active_rrqs_xri_bitmap, |
| new_ndlp->active_rrqs_xri_bitmap, |
| phba->cfg_rrq_xri_bitmap_sz); |
| } |
| |
| lpfc_unreg_rpi(vport, new_ndlp); |
| new_ndlp->nlp_DID = ndlp->nlp_DID; |
| new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; |
| if (phba->sli_rev == LPFC_SLI_REV4) |
| memcpy(new_ndlp->active_rrqs_xri_bitmap, |
| ndlp->active_rrqs_xri_bitmap, |
| phba->cfg_rrq_xri_bitmap_sz); |
| |
| spin_lock_irq(shost->host_lock); |
| keep_nlp_flag = new_ndlp->nlp_flag; |
| new_ndlp->nlp_flag = ndlp->nlp_flag; |
| ndlp->nlp_flag = keep_nlp_flag; |
| spin_unlock_irq(shost->host_lock); |
| |
| /* Set nlp_states accordingly */ |
| keep_nlp_state = new_ndlp->nlp_state; |
| lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); |
| |
| /* interchange the nvme remoteport structs */ |
| keep_nrport = new_ndlp->nrport; |
| new_ndlp->nrport = ndlp->nrport; |
| |
| /* Move this back to NPR state */ |
| if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { |
| /* The new_ndlp is replacing ndlp totally, so we need |
| * to put ndlp on UNUSED list and try to free it. |
| */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "3179 PLOGI confirm NEW: %x %x\n", |
| new_ndlp->nlp_DID, keepDID); |
| |
| /* Fix up the rport accordingly */ |
| rport = ndlp->rport; |
| if (rport) { |
| rdata = rport->dd_data; |
| if (rdata->pnode == ndlp) { |
| /* break the link before dropping the ref */ |
| ndlp->rport = NULL; |
| lpfc_nlp_put(ndlp); |
| rdata->pnode = lpfc_nlp_get(new_ndlp); |
| new_ndlp->rport = rport; |
| } |
| new_ndlp->nlp_type = ndlp->nlp_type; |
| } |
| |
| /* Fix up the nvme rport */ |
| if (ndlp->nrport) { |
| ndlp->nrport = NULL; |
| lpfc_nlp_put(ndlp); |
| } |
| |
| /* We shall actually free the ndlp with both nlp_DID and |
| * nlp_portname fields equals 0 to avoid any ndlp on the |
| * nodelist never to be used. |
| */ |
| if (ndlp->nlp_DID == 0) { |
| spin_lock_irq(&phba->ndlp_lock); |
| NLP_SET_FREE_REQ(ndlp); |
| spin_unlock_irq(&phba->ndlp_lock); |
| } |
| |
| /* Two ndlps cannot have the same did on the nodelist */ |
| ndlp->nlp_DID = keepDID; |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| active_rrqs_xri_bitmap) |
| memcpy(ndlp->active_rrqs_xri_bitmap, |
| active_rrqs_xri_bitmap, |
| phba->cfg_rrq_xri_bitmap_sz); |
| |
| if (!NLP_CHK_NODE_ACT(ndlp)) |
| lpfc_drop_node(vport, ndlp); |
| } |
| else { |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "3180 PLOGI confirm SWAP: %x %x\n", |
| new_ndlp->nlp_DID, keepDID); |
| |
| lpfc_unreg_rpi(vport, ndlp); |
| |
| /* Two ndlps cannot have the same did */ |
| ndlp->nlp_DID = keepDID; |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| active_rrqs_xri_bitmap) |
| memcpy(ndlp->active_rrqs_xri_bitmap, |
| active_rrqs_xri_bitmap, |
| phba->cfg_rrq_xri_bitmap_sz); |
| |
| /* Since we are switching over to the new_ndlp, |
| * reset the old ndlp state |
| */ |
| if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || |
| (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) |
| keep_nlp_state = NLP_STE_NPR_NODE; |
| lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); |
| |
| /* Previous ndlp no longer active with nvme host transport. |
| * Remove reference from earlier registration unless the |
| * nvme host took care of it. |
| */ |
| if (ndlp->nrport) |
| lpfc_nlp_put(ndlp); |
| ndlp->nrport = keep_nrport; |
| |
| /* Fix up the rport accordingly */ |
| rport = ndlp->rport; |
| if (rport) { |
| rdata = rport->dd_data; |
| put_node = rdata->pnode != NULL; |
| put_rport = ndlp->rport != NULL; |
| rdata->pnode = NULL; |
| ndlp->rport = NULL; |
| if (put_node) |
| lpfc_nlp_put(ndlp); |
| if (put_rport) |
| put_device(&rport->dev); |
| } |
| } |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| active_rrqs_xri_bitmap) |
| mempool_free(active_rrqs_xri_bitmap, |
| phba->active_rrq_pool); |
| return new_ndlp; |
| } |
| |
| /** |
| * lpfc_end_rscn - Check and handle more rscn for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine checks whether more Registration State Change |
| * Notifications (RSCNs) came in while the discovery state machine was in |
| * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be |
| * invoked to handle the additional RSCNs for the @vport. Otherwise, the |
| * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of |
| * handling the RSCNs. |
| **/ |
| void |
| lpfc_end_rscn(struct lpfc_vport *vport) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| |
| if (vport->fc_flag & FC_RSCN_MODE) { |
| /* |
| * Check to see if more RSCNs came in while we were |
| * processing this one. |
| */ |
| if (vport->fc_rscn_id_cnt || |
| (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) |
| lpfc_els_handle_rscn(vport); |
| else { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_RSCN_MODE; |
| spin_unlock_irq(shost->host_lock); |
| } |
| } |
| } |
| |
| /** |
| * lpfc_cmpl_els_rrq - Completion handled for els RRQs. |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine will call the clear rrq function to free the rrq and |
| * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not |
| * exist then the clear_rrq is still called because the rrq needs to |
| * be freed. |
| **/ |
| |
| static void |
| lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| IOCB_t *irsp; |
| struct lpfc_nodelist *ndlp; |
| struct lpfc_node_rrq *rrq; |
| |
| /* we pass cmdiocb to state machine which needs rspiocb as well */ |
| rrq = cmdiocb->context_un.rrq; |
| cmdiocb->context_un.rsp_iocb = rspiocb; |
| |
| irsp = &rspiocb->iocb; |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "RRQ cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->un.elsreq64.remoteID); |
| |
| ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); |
| if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2882 RRQ completes to NPort x%x " |
| "with no ndlp. Data: x%x x%x x%x\n", |
| irsp->un.elsreq64.remoteID, |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpIoTag); |
| goto out; |
| } |
| |
| /* rrq completes to NPort <nlp_DID> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "2880 RRQ completes to NPort x%x " |
| "Data: x%x x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout, rrq->xritag, rrq->rxid); |
| |
| if (irsp->ulpStatus) { |
| /* Check for retry */ |
| /* RRQ failed Don't print the vport to vport rjts */ |
| if (irsp->ulpStatus != IOSTAT_LS_RJT || |
| (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && |
| ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || |
| (phba)->pport->cfg_log_verbose & LOG_ELS) |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2881 RRQ failure DID:%06X Status:x%x/x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4]); |
| } |
| out: |
| if (rrq) |
| lpfc_clr_rrq_active(phba, rrq->xritag, rrq); |
| lpfc_els_free_iocb(phba, cmdiocb); |
| return; |
| } |
| /** |
| * lpfc_cmpl_els_plogi - Completion callback function for plogi |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is the completion callback function for issuing the Port |
| * Login (PLOGI) command. For PLOGI completion, there must be an active |
| * ndlp on the vport node list that matches the remote node ID from the |
| * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply |
| * ignored and command IOCB released. The PLOGI response IOCB status is |
| * checked for error conditons. If there is error status reported, PLOGI |
| * retry shall be attempted by invoking the lpfc_els_retry() routine. |
| * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on |
| * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine |
| * (DSM) is set for this PLOGI completion. Finally, it checks whether |
| * there are additional N_Port nodes with the vport that need to perform |
| * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition |
| * PLOGIs. |
| **/ |
| static void |
| lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| IOCB_t *irsp; |
| struct lpfc_nodelist *ndlp; |
| struct lpfc_dmabuf *prsp; |
| int disc, rc; |
| |
| /* we pass cmdiocb to state machine which needs rspiocb as well */ |
| cmdiocb->context_un.rsp_iocb = rspiocb; |
| |
| irsp = &rspiocb->iocb; |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "PLOGI cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->un.elsreq64.remoteID); |
| |
| ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); |
| if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "0136 PLOGI completes to NPort x%x " |
| "with no ndlp. Data: x%x x%x x%x\n", |
| irsp->un.elsreq64.remoteID, |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpIoTag); |
| goto out; |
| } |
| |
| /* Since ndlp can be freed in the disc state machine, note if this node |
| * is being used during discovery. |
| */ |
| spin_lock_irq(shost->host_lock); |
| disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); |
| ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| rc = 0; |
| |
| /* PLOGI completes to NPort <nlp_DID> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0102 PLOGI completes to NPort x%06x " |
| "Data: x%x x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, ndlp->nlp_fc4_type, |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| disc, vport->num_disc_nodes); |
| |
| /* Check to see if link went down during discovery */ |
| if (lpfc_els_chk_latt(vport)) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| goto out; |
| } |
| |
| if (irsp->ulpStatus) { |
| /* Check for retry */ |
| if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { |
| /* ELS command is being retried */ |
| if (disc) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| } |
| goto out; |
| } |
| /* PLOGI failed Don't print the vport to vport rjts */ |
| if (irsp->ulpStatus != IOSTAT_LS_RJT || |
| (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && |
| ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || |
| (phba)->pport->cfg_log_verbose & LOG_ELS) |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4]); |
| /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
| if (lpfc_error_lost_link(irsp)) |
| rc = NLP_STE_FREED_NODE; |
| else |
| rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_PLOGI); |
| } else { |
| /* Good status, call state machine */ |
| prsp = list_entry(((struct lpfc_dmabuf *) |
| cmdiocb->context2)->list.next, |
| struct lpfc_dmabuf, list); |
| ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); |
| rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_PLOGI); |
| } |
| |
| if (disc && vport->num_disc_nodes) { |
| /* Check to see if there are more PLOGIs to be sent */ |
| lpfc_more_plogi(vport); |
| |
| if (vport->num_disc_nodes == 0) { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_NDISC_ACTIVE; |
| spin_unlock_irq(shost->host_lock); |
| |
| lpfc_can_disctmo(vport); |
| lpfc_end_rscn(vport); |
| } |
| } |
| |
| out: |
| lpfc_els_free_iocb(phba, cmdiocb); |
| return; |
| } |
| |
| /** |
| * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @did: destination port identifier. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues a Port Login (PLOGI) command to a remote N_Port |
| * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, |
| * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. |
| * This routine constructs the proper feilds of the PLOGI IOCB and invokes |
| * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the PLOGI ELS command. |
| * |
| * Return code |
| * 0 - Successfully issued a plogi for @vport |
| * 1 - failed to issue a plogi for @vport |
| **/ |
| int |
| lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct Scsi_Host *shost; |
| struct serv_parm *sp; |
| struct lpfc_nodelist *ndlp; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| int ret; |
| |
| ndlp = lpfc_findnode_did(vport, did); |
| if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) |
| ndlp = NULL; |
| |
| /* If ndlp is not NULL, we will bump the reference count on it */ |
| cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, |
| ELS_CMD_PLOGI); |
| if (!elsiocb) |
| return 1; |
| |
| shost = lpfc_shost_from_vport(vport); |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; |
| spin_unlock_irq(shost->host_lock); |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| /* For PLOGI request, remainder of payload is service parameters */ |
| *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; |
| pcmd += sizeof(uint32_t); |
| memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); |
| sp = (struct serv_parm *) pcmd; |
| |
| /* |
| * If we are a N-port connected to a Fabric, fix-up paramm's so logins |
| * to device on remote loops work. |
| */ |
| if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) |
| sp->cmn.altBbCredit = 1; |
| |
| if (sp->cmn.fcphLow < FC_PH_4_3) |
| sp->cmn.fcphLow = FC_PH_4_3; |
| |
| if (sp->cmn.fcphHigh < FC_PH3) |
| sp->cmn.fcphHigh = FC_PH3; |
| |
| sp->cmn.valid_vendor_ver_level = 0; |
| memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); |
| sp->cmn.bbRcvSizeMsb &= 0xF; |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue PLOGI: did:x%x", |
| did, 0, 0); |
| |
| /* If our firmware supports this feature, convey that |
| * information to the target using the vendor specific field. |
| */ |
| if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { |
| sp->cmn.valid_vendor_ver_level = 1; |
| sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); |
| sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); |
| } |
| |
| phba->fc_stat.elsXmitPLOGI++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; |
| ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
| |
| if (ret == IOCB_ERROR) { |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * lpfc_cmpl_els_prli - Completion callback function for prli |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is the completion callback function for a Process Login |
| * (PRLI) ELS command. The PRLI response IOCB status is checked for error |
| * status. If there is error status reported, PRLI retry shall be attempted |
| * by invoking the lpfc_els_retry() routine. Otherwise, the state |
| * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this |
| * ndlp to mark the PRLI completion. |
| **/ |
| static void |
| lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| IOCB_t *irsp; |
| struct lpfc_nodelist *ndlp; |
| |
| /* we pass cmdiocb to state machine which needs rspiocb as well */ |
| cmdiocb->context_un.rsp_iocb = rspiocb; |
| |
| irsp = &(rspiocb->iocb); |
| ndlp = (struct lpfc_nodelist *) cmdiocb->context1; |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_PRLI_SND; |
| spin_unlock_irq(shost->host_lock); |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "PRLI cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| ndlp->nlp_DID); |
| |
| /* Ddriver supports multiple FC4 types. Counters matter. */ |
| vport->fc_prli_sent--; |
| |
| /* PRLI completes to NPort <nlp_DID> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0103 PRLI completes to NPort x%06x " |
| "Data: x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], |
| vport->num_disc_nodes, ndlp->fc4_prli_sent); |
| |
| /* Check to see if link went down during discovery */ |
| if (lpfc_els_chk_latt(vport)) |
| goto out; |
| |
| if (irsp->ulpStatus) { |
| /* Check for retry */ |
| ndlp->fc4_prli_sent--; |
| if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { |
| /* ELS command is being retried */ |
| goto out; |
| } |
| |
| /* PRLI failed */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2754 PRLI failure DID:%06X Status:x%x/x%x, " |
| "data: x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4], ndlp->fc4_prli_sent); |
| |
| /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
| if (lpfc_error_lost_link(irsp)) |
| goto out; |
| else |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_PRLI); |
| } else |
| /* Good status, call state machine. However, if another |
| * PRLI is outstanding, don't call the state machine |
| * because final disposition to Mapped or Unmapped is |
| * completed there. |
| */ |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_PRLI); |
| |
| out: |
| lpfc_els_free_iocb(phba, cmdiocb); |
| return; |
| } |
| |
| /** |
| * lpfc_issue_els_prli - Issue a prli iocb command for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues a Process Login (PRLI) ELS command for the |
| * @vport. The PRLI service parameters are set up in the payload of the |
| * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine |
| * is put to the IOCB completion callback func field before invoking the |
| * routine lpfc_sli_issue_iocb() to send out PRLI command. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the PRLI ELS command. |
| * |
| * Return code |
| * 0 - successfully issued prli iocb command for @vport |
| * 1 - failed to issue prli iocb command for @vport |
| **/ |
| int |
| lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| uint8_t retry) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| PRLI *npr; |
| struct lpfc_nvme_prli *npr_nvme; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| u32 local_nlp_type, elscmd; |
| |
| /* |
| * If we are in RSCN mode, the FC4 types supported from a |
| * previous GFT_ID command may not be accurate. So, if we |
| * are a NVME Initiator, always look for the possibility of |
| * the remote NPort beng a NVME Target. |
| */ |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| vport->fc_flag & FC_RSCN_MODE && |
| vport->nvmei_support) |
| ndlp->nlp_fc4_type |= NLP_FC4_NVME; |
| local_nlp_type = ndlp->nlp_fc4_type; |
| |
| send_next_prli: |
| if (local_nlp_type & NLP_FC4_FCP) { |
| /* Payload is 4 + 16 = 20 x14 bytes. */ |
| cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); |
| elscmd = ELS_CMD_PRLI; |
| } else if (local_nlp_type & NLP_FC4_NVME) { |
| /* Payload is 4 + 20 = 24 x18 bytes. */ |
| cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); |
| elscmd = ELS_CMD_NVMEPRLI; |
| } else { |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
| "3083 Unknown FC_TYPE x%x ndlp x%06x\n", |
| ndlp->nlp_fc4_type, ndlp->nlp_DID); |
| return 1; |
| } |
| |
| /* SLI3 ports don't support NVME. If this rport is a strict NVME |
| * FC4 type, implicitly LOGO. |
| */ |
| if (phba->sli_rev == LPFC_SLI_REV3 && |
| ndlp->nlp_fc4_type == NLP_FC4_NVME) { |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
| "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", |
| ndlp->nlp_type); |
| lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
| return 1; |
| } |
| |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, elscmd); |
| if (!elsiocb) |
| return 1; |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| /* For PRLI request, remainder of payload is service parameters */ |
| memset(pcmd, 0, cmdsize); |
| |
| if (local_nlp_type & NLP_FC4_FCP) { |
| /* Remainder of payload is FCP PRLI parameter page. |
| * Note: this data structure is defined as |
| * BE/LE in the structure definition so no |
| * byte swap call is made. |
| */ |
| *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; |
| pcmd += sizeof(uint32_t); |
| npr = (PRLI *)pcmd; |
| |
| /* |
| * If our firmware version is 3.20 or later, |
| * set the following bits for FC-TAPE support. |
| */ |
| if (phba->vpd.rev.feaLevelHigh >= 0x02) { |
| npr->ConfmComplAllowed = 1; |
| npr->Retry = 1; |
| npr->TaskRetryIdReq = 1; |
| } |
| npr->estabImagePair = 1; |
| npr->readXferRdyDis = 1; |
| if (vport->cfg_first_burst_size) |
| npr->writeXferRdyDis = 1; |
| |
| /* For FCP support */ |
| npr->prliType = PRLI_FCP_TYPE; |
| npr->initiatorFunc = 1; |
| elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; |
| |
| /* Remove FCP type - processed. */ |
| local_nlp_type &= ~NLP_FC4_FCP; |
| } else if (local_nlp_type & NLP_FC4_NVME) { |
| /* Remainder of payload is NVME PRLI parameter page. |
| * This data structure is the newer definition that |
| * uses bf macros so a byte swap is required. |
| */ |
| *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; |
| pcmd += sizeof(uint32_t); |
| npr_nvme = (struct lpfc_nvme_prli *)pcmd; |
| bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); |
| bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ |
| |
| /* Only initiators request first burst. */ |
| if ((phba->cfg_nvme_enable_fb) && |
| !phba->nvmet_support) |
| bf_set(prli_fba, npr_nvme, 1); |
| |
| if (phba->nvmet_support) { |
| bf_set(prli_tgt, npr_nvme, 1); |
| bf_set(prli_disc, npr_nvme, 1); |
| |
| } else { |
| bf_set(prli_init, npr_nvme, 1); |
| } |
| npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); |
| npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); |
| elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; |
| |
| /* Remove NVME type - processed. */ |
| local_nlp_type &= ~NLP_FC4_NVME; |
| } |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue PRLI: did:x%x", |
| ndlp->nlp_DID, 0, 0); |
| |
| phba->fc_stat.elsXmitPRLI++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_PRLI_SND; |
| spin_unlock_irq(shost->host_lock); |
| if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
| IOCB_ERROR) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_PRLI_SND; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| |
| /* The vport counters are used for lpfc_scan_finished, but |
| * the ndlp is used to track outstanding PRLIs for different |
| * FC4 types. |
| */ |
| vport->fc_prli_sent++; |
| ndlp->fc4_prli_sent++; |
| |
| /* The driver supports 2 FC4 types. Make sure |
| * a PRLI is issued for all types before exiting. |
| */ |
| if (phba->sli_rev == LPFC_SLI_REV4 && |
| local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) |
| goto send_next_prli; |
| |
| return 0; |
| } |
| |
| /** |
| * lpfc_rscn_disc - Perform rscn discovery for a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine performs Registration State Change Notification (RSCN) |
| * discovery for a @vport. If the @vport's node port recovery count is not |
| * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all |
| * the nodes that need recovery. If none of the PLOGI were needed through |
| * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be |
| * invoked to check and handle possible more RSCN came in during the period |
| * of processing the current ones. |
| **/ |
| static void |
| lpfc_rscn_disc(struct lpfc_vport *vport) |
| { |
| lpfc_can_disctmo(vport); |
| |
| /* RSCN discovery */ |
| /* go thru NPR nodes and issue ELS PLOGIs */ |
| if (vport->fc_npr_cnt) |
| if (lpfc_els_disc_plogi(vport)) |
| return; |
| |
| lpfc_end_rscn(vport); |
| } |
| |
| /** |
| * lpfc_adisc_done - Complete the adisc phase of discovery |
| * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. |
| * |
| * This function is called when the final ADISC is completed during discovery. |
| * This function handles clearing link attention or issuing reg_vpi depending |
| * on whether npiv is enabled. This function also kicks off the PLOGI phase of |
| * discovery. |
| * This function is called with no locks held. |
| **/ |
| static void |
| lpfc_adisc_done(struct lpfc_vport *vport) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| |
| /* |
| * For NPIV, cmpl_reg_vpi will set port_state to READY, |
| * and continue discovery. |
| */ |
| if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
| !(vport->fc_flag & FC_RSCN_MODE) && |
| (phba->sli_rev < LPFC_SLI_REV4)) { |
| /* The ADISCs are complete. Doesn't matter if they |
| * succeeded or failed because the ADISC completion |
| * routine guarantees to call the state machine and |
| * the RPI is either unregistered (failed ADISC response) |
| * or the RPI is still valid and the node is marked |
| * mapped for a target. The exchanges should be in the |
| * correct state. This code is specific to SLI3. |
| */ |
| lpfc_issue_clear_la(phba, vport); |
| lpfc_issue_reg_vpi(phba, vport); |
| return; |
| } |
| /* |
| * For SLI2, we need to set port_state to READY |
| * and continue discovery. |
| */ |
| if (vport->port_state < LPFC_VPORT_READY) { |
| /* If we get here, there is nothing to ADISC */ |
| lpfc_issue_clear_la(phba, vport); |
| if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { |
| vport->num_disc_nodes = 0; |
| /* go thru NPR list, issue ELS PLOGIs */ |
| if (vport->fc_npr_cnt) |
| lpfc_els_disc_plogi(vport); |
| if (!vport->num_disc_nodes) { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_NDISC_ACTIVE; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_can_disctmo(vport); |
| lpfc_end_rscn(vport); |
| } |
| } |
| vport->port_state = LPFC_VPORT_READY; |
| } else |
| lpfc_rscn_disc(vport); |
| } |
| |
| /** |
| * lpfc_more_adisc - Issue more adisc as needed |
| * @vport: pointer to a host virtual N_Port data structure. |
| * |
| * This routine determines whether there are more ndlps on a @vport |
| * node list need to have Address Discover (ADISC) issued. If so, it will |
| * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's |
| * remaining nodes which need to have ADISC sent. |
| **/ |
| void |
| lpfc_more_adisc(struct lpfc_vport *vport) |
| { |
| if (vport->num_disc_nodes) |
| vport->num_disc_nodes--; |
| /* Continue discovery with <num_disc_nodes> ADISCs to go */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
| "0210 Continue discovery with %d ADISCs to go " |
| "Data: x%x x%x x%x\n", |
| vport->num_disc_nodes, vport->fc_adisc_cnt, |
| vport->fc_flag, vport->port_state); |
| /* Check to see if there are more ADISCs to be sent */ |
| if (vport->fc_flag & FC_NLP_MORE) { |
| lpfc_set_disctmo(vport); |
| /* go thru NPR nodes and issue any remaining ELS ADISCs */ |
| lpfc_els_disc_adisc(vport); |
| } |
| if (!vport->num_disc_nodes) |
| lpfc_adisc_done(vport); |
| return; |
| } |
| |
| /** |
| * lpfc_cmpl_els_adisc - Completion callback function for adisc |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is the completion function for issuing the Address Discover |
| * (ADISC) command. It first checks to see whether link went down during |
| * the discovery process. If so, the node will be marked as node port |
| * recovery for issuing discover IOCB by the link attention handler and |
| * exit. Otherwise, the response status is checked. If error was reported |
| * in the response status, the ADISC command shall be retried by invoking |
| * the lpfc_els_retry() routine. Otherwise, if no error was reported in |
| * the response status, the state machine is invoked to set transition |
| * with respect to NLP_EVT_CMPL_ADISC event. |
| **/ |
| static void |
| lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| IOCB_t *irsp; |
| struct lpfc_nodelist *ndlp; |
| int disc; |
| |
| /* we pass cmdiocb to state machine which needs rspiocb as well */ |
| cmdiocb->context_un.rsp_iocb = rspiocb; |
| |
| irsp = &(rspiocb->iocb); |
| ndlp = (struct lpfc_nodelist *) cmdiocb->context1; |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "ADISC cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| ndlp->nlp_DID); |
| |
| /* Since ndlp can be freed in the disc state machine, note if this node |
| * is being used during discovery. |
| */ |
| spin_lock_irq(shost->host_lock); |
| disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); |
| ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); |
| spin_unlock_irq(shost->host_lock); |
| /* ADISC completes to NPort <nlp_DID> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0104 ADISC completes to NPort x%x " |
| "Data: x%x x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout, disc, vport->num_disc_nodes); |
| /* Check to see if link went down during discovery */ |
| if (lpfc_els_chk_latt(vport)) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| goto out; |
| } |
| |
| if (irsp->ulpStatus) { |
| /* Check for retry */ |
| if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { |
| /* ELS command is being retried */ |
| if (disc) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_set_disctmo(vport); |
| } |
| goto out; |
| } |
| /* ADISC failed */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2755 ADISC failure DID:%06X Status:x%x/x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4]); |
| /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
| if (!lpfc_error_lost_link(irsp)) |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_ADISC); |
| } else |
| /* Good status, call state machine */ |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_CMPL_ADISC); |
| |
| /* Check to see if there are more ADISCs to be sent */ |
| if (disc && vport->num_disc_nodes) |
| lpfc_more_adisc(vport); |
| out: |
| lpfc_els_free_iocb(phba, cmdiocb); |
| return; |
| } |
| |
| /** |
| * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport |
| * @vport: pointer to a virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues an Address Discover (ADISC) for an @ndlp on a |
| * @vport. It prepares the payload of the ADISC ELS command, updates the |
| * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine |
| * to issue the ADISC ELS command. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the ADISC ELS command. |
| * |
| * Return code |
| * 0 - successfully issued adisc |
| * 1 - failed to issue adisc |
| **/ |
| int |
| lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| uint8_t retry) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| ADISC *ap; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| |
| cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, ELS_CMD_ADISC); |
| if (!elsiocb) |
| return 1; |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| /* For ADISC request, remainder of payload is service parameters */ |
| *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; |
| pcmd += sizeof(uint32_t); |
| |
| /* Fill in ADISC payload */ |
| ap = (ADISC *) pcmd; |
| ap->hardAL_PA = phba->fc_pref_ALPA; |
| memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); |
| memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); |
| ap->DID = be32_to_cpu(vport->fc_myDID); |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue ADISC: did:x%x", |
| ndlp->nlp_DID, 0, 0); |
| |
| phba->fc_stat.elsXmitADISC++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_ADISC_SND; |
| spin_unlock_irq(shost->host_lock); |
| if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
| IOCB_ERROR) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_ADISC_SND; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * lpfc_cmpl_els_logo - Completion callback function for logo |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is the completion function for issuing the ELS Logout (LOGO) |
| * command. If no error status was reported from the LOGO response, the |
| * state machine of the associated ndlp shall be invoked for transition with |
| * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, |
| * the lpfc_els_retry() routine will be invoked to retry the LOGO command. |
| **/ |
| static void |
| lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; |
| struct lpfc_vport *vport = ndlp->vport; |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| IOCB_t *irsp; |
| struct lpfcMboxq *mbox; |
| unsigned long flags; |
| uint32_t skip_recovery = 0; |
| |
| /* we pass cmdiocb to state machine which needs rspiocb as well */ |
| cmdiocb->context_un.rsp_iocb = rspiocb; |
| |
| irsp = &(rspiocb->iocb); |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_LOGO_SND; |
| spin_unlock_irq(shost->host_lock); |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "LOGO cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| ndlp->nlp_DID); |
| |
| /* LOGO completes to NPort <nlp_DID> */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0105 LOGO completes to NPort x%x " |
| "Data: x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->ulpTimeout, vport->num_disc_nodes); |
| |
| if (lpfc_els_chk_latt(vport)) { |
| skip_recovery = 1; |
| goto out; |
| } |
| |
| /* Check to see if link went down during discovery */ |
| if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { |
| /* NLP_EVT_DEVICE_RM should unregister the RPI |
| * which should abort all outstanding IOs. |
| */ |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
| NLP_EVT_DEVICE_RM); |
| skip_recovery = 1; |
| goto out; |
| } |
| |
| if (irsp->ulpStatus) { |
| /* Check for retry */ |
| if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { |
| /* ELS command is being retried */ |
| skip_recovery = 1; |
| goto out; |
| } |
| /* LOGO failed */ |
| lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
| "2756 LOGO failure DID:%06X Status:x%x/x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4]); |
| /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
| if (lpfc_error_lost_link(irsp)) { |
| skip_recovery = 1; |
| goto out; |
| } |
| } |
| |
| /* Call state machine. This will unregister the rpi if needed. */ |
| lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); |
| |
| out: |
| lpfc_els_free_iocb(phba, cmdiocb); |
| /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ |
| if ((vport->fc_flag & FC_PT2PT) && |
| !(vport->fc_flag & FC_PT2PT_PLOGI)) { |
| phba->pport->fc_myDID = 0; |
| |
| if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
| (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
| if (phba->nvmet_support) |
| lpfc_nvmet_update_targetport(phba); |
| else |
| lpfc_nvme_update_localport(phba->pport); |
| } |
| |
| mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| if (mbox) { |
| lpfc_config_link(phba, mbox); |
| mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| mbox->vport = vport; |
| if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == |
| MBX_NOT_FINISHED) { |
| mempool_free(mbox, phba->mbox_mem_pool); |
| skip_recovery = 1; |
| } |
| } |
| } |
| |
| /* |
| * If the node is a target, the handling attempts to recover the port. |
| * For any other port type, the rpi is unregistered as an implicit |
| * LOGO. |
| */ |
| if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { |
| lpfc_cancel_retry_delay_tmo(vport, ndlp); |
| spin_lock_irqsave(shost->host_lock, flags); |
| ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
| spin_unlock_irqrestore(shost->host_lock, flags); |
| |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "3187 LOGO completes to NPort x%x: Start " |
| "Recovery Data: x%x x%x x%x x%x\n", |
| ndlp->nlp_DID, irsp->ulpStatus, |
| irsp->un.ulpWord[4], irsp->ulpTimeout, |
| vport->num_disc_nodes); |
| lpfc_disc_start(vport); |
| } |
| return; |
| } |
| |
| /** |
| * lpfc_issue_els_logo - Issue a logo to an node on a vport |
| * @vport: pointer to a virtual N_Port data structure. |
| * @ndlp: pointer to a node-list data structure. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine constructs and issues an ELS Logout (LOGO) iocb command |
| * to a remote node, referred by an @ndlp on a @vport. It constructs the |
| * payload of the IOCB, properly sets up the @ndlp state, and invokes the |
| * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the LOGO ELS command. |
| * |
| * Return code |
| * 0 - successfully issued logo |
| * 1 - failed to issue logo |
| **/ |
| int |
| lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
| uint8_t retry) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_hba *phba = vport->phba; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| int rc; |
| |
| spin_lock_irq(shost->host_lock); |
| if (ndlp->nlp_flag & NLP_LOGO_SND) { |
| spin_unlock_irq(shost->host_lock); |
| return 0; |
| } |
| spin_unlock_irq(shost->host_lock); |
| |
| cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, ELS_CMD_LOGO); |
| if (!elsiocb) |
| return 1; |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; |
| pcmd += sizeof(uint32_t); |
| |
| /* Fill in LOGO payload */ |
| *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); |
| pcmd += sizeof(uint32_t); |
| memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue LOGO: did:x%x", |
| ndlp->nlp_DID, 0, 0); |
| |
| /* |
| * If we are issuing a LOGO, we may try to recover the remote NPort |
| * by issuing a PLOGI later. Even though we issue ELS cmds by the |
| * VPI, if we have a valid RPI, and that RPI gets unreg'ed while |
| * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI |
| * for that ELS cmd. To avoid this situation, lets get rid of the |
| * RPI right now, before any ELS cmds are sent. |
| */ |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_ISSUE_LOGO; |
| spin_unlock_irq(shost->host_lock); |
| if (lpfc_unreg_rpi(vport, ndlp)) { |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 0; |
| } |
| |
| phba->fc_stat.elsXmitLOGO++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag |= NLP_LOGO_SND; |
| ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; |
| spin_unlock_irq(shost->host_lock); |
| rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
| |
| if (rc == IOCB_ERROR) { |
| spin_lock_irq(shost->host_lock); |
| ndlp->nlp_flag &= ~NLP_LOGO_SND; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * lpfc_cmpl_els_cmd - Completion callback function for generic els command |
| * @phba: pointer to lpfc hba data structure. |
| * @cmdiocb: pointer to lpfc command iocb data structure. |
| * @rspiocb: pointer to lpfc response iocb data structure. |
| * |
| * This routine is a generic completion callback function for ELS commands. |
| * Specifically, it is the callback function which does not need to perform |
| * any command specific operations. It is currently used by the ELS command |
| * issuing routines for the ELS State Change Request (SCR), |
| * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution |
| * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than |
| * certain debug loggings, this callback function simply invokes the |
| * lpfc_els_chk_latt() routine to check whether link went down during the |
| * discovery process. |
| **/ |
| static void |
| lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| struct lpfc_iocbq *rspiocb) |
| { |
| struct lpfc_vport *vport = cmdiocb->vport; |
| IOCB_t *irsp; |
| |
| irsp = &rspiocb->iocb; |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "ELS cmd cmpl: status:x%x/x%x did:x%x", |
| irsp->ulpStatus, irsp->un.ulpWord[4], |
| irsp->un.elsreq64.remoteID); |
| /* ELS cmd tag <ulpIoTag> completes */ |
| lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", |
| irsp->ulpIoTag, irsp->ulpStatus, |
| irsp->un.ulpWord[4], irsp->ulpTimeout); |
| /* Check to see if link went down during discovery */ |
| lpfc_els_chk_latt(vport); |
| lpfc_els_free_iocb(phba, cmdiocb); |
| return; |
| } |
| |
| /** |
| * lpfc_issue_els_scr - Issue a scr to an node on a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @nportid: N_Port identifier to the remote node. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues a State Change Request (SCR) to a fabric node |
| * on a @vport. The remote node @nportid is passed into the function. It |
| * first search the @vport node list to find the matching ndlp. If no such |
| * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An |
| * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() |
| * routine is invoked to send the SCR IOCB. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the SCR ELS command. |
| * |
| * Return code |
| * 0 - Successfully issued scr command |
| * 1 - Failed to issue scr command |
| **/ |
| int |
| lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct lpfc_iocbq *elsiocb; |
| uint8_t *pcmd; |
| uint16_t cmdsize; |
| struct lpfc_nodelist *ndlp; |
| |
| cmdsize = (sizeof(uint32_t) + sizeof(SCR)); |
| |
| ndlp = lpfc_findnode_did(vport, nportid); |
| if (!ndlp) { |
| ndlp = lpfc_nlp_init(vport, nportid); |
| if (!ndlp) |
| return 1; |
| lpfc_enqueue_node(vport, ndlp); |
| } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
| ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
| if (!ndlp) |
| return 1; |
| } |
| |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, ELS_CMD_SCR); |
| |
| if (!elsiocb) { |
| /* This will trigger the release of the node just |
| * allocated |
| */ |
| lpfc_nlp_put(ndlp); |
| return 1; |
| } |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| *((uint32_t *) (pcmd)) = ELS_CMD_SCR; |
| pcmd += sizeof(uint32_t); |
| |
| /* For SCR, remainder of payload is SCR parameter page */ |
| memset(pcmd, 0, sizeof(SCR)); |
| ((SCR *) pcmd)->Function = SCR_FUNC_FULL; |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue SCR: did:x%x", |
| ndlp->nlp_DID, 0, 0); |
| |
| phba->fc_stat.elsXmitSCR++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; |
| if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
| IOCB_ERROR) { |
| /* The additional lpfc_nlp_put will cause the following |
| * lpfc_els_free_iocb routine to trigger the rlease of |
| * the node. |
| */ |
| lpfc_nlp_put(ndlp); |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| /* This will cause the callback-function lpfc_cmpl_els_cmd to |
| * trigger the release of node. |
| */ |
| |
| lpfc_nlp_put(ndlp); |
| return 0; |
| } |
| |
| /** |
| * lpfc_issue_els_farpr - Issue a farp to an node on a vport |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @nportid: N_Port identifier to the remote node. |
| * @retry: number of retries to the command IOCB. |
| * |
| * This routine issues a Fibre Channel Address Resolution Response |
| * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) |
| * is passed into the function. It first search the @vport node list to find |
| * the matching ndlp. If no such ndlp is found, a new ndlp shall be created |
| * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the |
| * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. |
| * |
| * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp |
| * will be incremented by 1 for holding the ndlp and the reference to ndlp |
| * will be stored into the context1 field of the IOCB for the completion |
| * callback function to the PARPR ELS command. |
| * |
| * Return code |
| * 0 - Successfully issued farpr command |
| * 1 - Failed to issue farpr command |
| **/ |
| static int |
| lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) |
| { |
| struct lpfc_hba *phba = vport->phba; |
| struct lpfc_iocbq *elsiocb; |
| FARP *fp; |
| uint8_t *pcmd; |
| uint32_t *lp; |
| uint16_t cmdsize; |
| struct lpfc_nodelist *ondlp; |
| struct lpfc_nodelist *ndlp; |
| |
| cmdsize = (sizeof(uint32_t) + sizeof(FARP)); |
| |
| ndlp = lpfc_findnode_did(vport, nportid); |
| if (!ndlp) { |
| ndlp = lpfc_nlp_init(vport, nportid); |
| if (!ndlp) |
| return 1; |
| lpfc_enqueue_node(vport, ndlp); |
| } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
| ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
| if (!ndlp) |
| return 1; |
| } |
| |
| elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
| ndlp->nlp_DID, ELS_CMD_RNID); |
| if (!elsiocb) { |
| /* This will trigger the release of the node just |
| * allocated |
| */ |
| lpfc_nlp_put(ndlp); |
| return 1; |
| } |
| |
| pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
| |
| *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; |
| pcmd += sizeof(uint32_t); |
| |
| /* Fill in FARPR payload */ |
| fp = (FARP *) (pcmd); |
| memset(fp, 0, sizeof(FARP)); |
| lp = (uint32_t *) pcmd; |
| *lp++ = be32_to_cpu(nportid); |
| *lp++ = be32_to_cpu(vport->fc_myDID); |
| fp->Rflags = 0; |
| fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); |
| |
| memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); |
| memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); |
| ondlp = lpfc_findnode_did(vport, nportid); |
| if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { |
| memcpy(&fp->OportName, &ondlp->nlp_portname, |
| sizeof(struct lpfc_name)); |
| memcpy(&fp->OnodeName, &ondlp->nlp_nodename, |
| sizeof(struct lpfc_name)); |
| } |
| |
| lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
| "Issue FARPR: did:x%x", |
| ndlp->nlp_DID, 0, 0); |
| |
| phba->fc_stat.elsXmitFARPR++; |
| elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; |
| if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
| IOCB_ERROR) { |
| /* The additional lpfc_nlp_put will cause the following |
| * lpfc_els_free_iocb routine to trigger the release of |
| * the node. |
| */ |
| lpfc_nlp_put(ndlp); |
| lpfc_els_free_iocb(phba, elsiocb); |
| return 1; |
| } |
| /* This will cause the callback-function lpfc_cmpl_els_cmd to |
| * trigger the release of the node. |
| */ |
| lpfc_nlp_put(ndlp); |
| return 0; |
| } |
| |
| /** |
| * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry |
| * @vport: pointer to a host virtual N_Port data structure. |
| * @nlp: pointer to a node-list data structure. |
| * |
| * This routine cancels the timer with a delayed IOCB-command retry for |
| * a @vport's @ndlp. It stops the timer for the delayed function retrial and |
| * removes the ELS retry event if it presents. In addition, if the |
| * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB |
| * commands are sent for the @vport's nodes that require issuing discovery |
| * ADISC. |
| **/ |
| void |
| lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) |
| { |
| struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
| struct lpfc_work_evt *evtp; |
| |
| if (!(nlp->nlp_flag & NLP_DELAY_TMO)) |
| return; |
| spin_lock_irq(shost->host_lock); |
| nlp->nlp_flag &= ~NLP_DELAY_TMO; |
| spin_unlock_irq(shost->host_lock); |
| del_timer_sync(&nlp->nlp_delayfunc); |
| nlp->nlp_last_elscmd = 0; |
| if (!list_empty(&nlp->els_retry_evt.evt_listp)) { |
| list_del_init(&nlp->els_retry_evt.evt_listp); |
| /* Decrement nlp reference count held for the delayed retry */ |
| evtp = &nlp->els_retry_evt; |
| lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); |
| } |
| if (nlp->nlp_flag & NLP_NPR_2B_DISC) { |
| spin_lock_irq(shost->host_lock); |
| nlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
| spin_unlock_irq(shost->host_lock); |
| if (vport->num_disc_nodes) { |
| if (vport->port_state < LPFC_VPORT_READY) { |
| /* Check if there are more ADISCs to be sent */ |
| lpfc_more_adisc(vport); |
| } else { |
| /* Check if there are more PLOGIs to be sent */ |
| lpfc_more_plogi(vport); |
| if (vport->num_disc_nodes == 0) { |
| spin_lock_irq(shost->host_lock); |
| vport->fc_flag &= ~FC_NDISC_ACTIVE; |
| spin_unlock_irq(shost->host_lock); |
| lpfc_can_disctmo(vport); |
| lpfc_end_rscn(vport); |
| } |
| } |
| } |
| } |
| return; |
| } |
| |
| /** |
| * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer |
| * @ptr: holder for the pointer to the timer function associated data (ndlp). |
| * |
| * This routine is invoked by the ndlp delayed-function timer to check |
| * whether there is any pending ELS retry event(s) with the node. If not, it |
| * simply returns. Otherwise, if there is at least one ELS delayed event, it |
| * adds the delayed events to the HBA work list and invokes the |
| |