blob: ebc0af7d769c01699aa8077b4231a5f61fc86ec2 [file] [log] [blame]
/*
* linux/drivers/message/fusion/mptbase.c
* This is the Fusion MPT base driver which supports multiple
* (SCSI + LAN) specialized protocol drivers.
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h> /* needed for in_interrupt() proto */
#include <linux/dma-mapping.h>
#include <asm/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include <linux/kthread.h>
#include <scsi/scsi_host.h>
#include "mptbase.h"
#include "lsi/mpi_log_fc.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT base driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptbase"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*
* cmd line parameters
*/
static int mpt_msi_enable_spi;
module_param(mpt_msi_enable_spi, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_spi,
" Enable MSI Support for SPI controllers (default=0)");
static int mpt_msi_enable_fc;
module_param(mpt_msi_enable_fc, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_fc,
" Enable MSI Support for FC controllers (default=0)");
static int mpt_msi_enable_sas;
module_param(mpt_msi_enable_sas, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_sas,
" Enable MSI Support for SAS controllers (default=0)");
static int mpt_channel_mapping;
module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level,
" debug level - refer to mptdebug.h - (default=0)");
int mpt_fwfault_debug;
EXPORT_SYMBOL(mpt_fwfault_debug);
module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
[MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
#define PRINT_MF_COUNT 20000
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Public data...
*/
#define WHOINIT_UNKNOWN 0xAA
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Private data...
*/
/* Adapter link list */
LIST_HEAD(ioc_list);
/* Callback lookup table */
static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
/* Protocol driver class lookup table */
static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
/* Event handler lookup table */
static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
/* Reset handler lookup table */
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *mpt_proc_root_dir;
#endif
/*
* Driver Callback Index's
*/
static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
static u8 last_drv_idx;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Forward protos...
*/
static irqreturn_t mpt_interrupt(int irq, void *bus_id);
static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply);
static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
u32 *req, int replyBytes, u16 *u16reply, int maxwait,
int sleepFlag);
static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
static void mpt_adapter_disable(MPT_ADAPTER *ioc);
static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
static int PrimeIocFifos(MPT_ADAPTER *ioc);
static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int GetLanConfigPages(MPT_ADAPTER *ioc);
static int GetIoUnitPage2(MPT_ADAPTER *ioc);
int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
int sleepFlag);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
static const struct file_operations mpt_summary_proc_fops;
static const struct file_operations mpt_version_proc_fops;
static const struct file_operations mpt_iocinfo_proc_fops;
#endif
static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
static int ProcessEventNotification(MPT_ADAPTER *ioc,
EventNotificationReply_t *evReply, int *evHandlers);
static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
/* module entry point */
static int __init fusion_init (void);
static void __exit fusion_exit (void);
#define CHIPREG_READ32(addr) readl_relaxed(addr)
#define CHIPREG_READ32_dmasync(addr) readl(addr)
#define CHIPREG_WRITE32(addr,val) writel(val, addr)
#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
static void
pci_disable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg &= ~1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static void
pci_enable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg |= 1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
if (ret)
return ret;
list_for_each_entry(ioc, &ioc_list, list)
ioc->debug_level = mpt_debug_level;
return 0;
}
/**
* mpt_get_cb_idx - obtain cb_idx for registered driver
* @dclass: class driver enum
*
* Returns cb_idx, or zero means it wasn't found
**/
static u8
mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
{
u8 cb_idx;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
if (MptDriverClass[cb_idx] == dclass)
return cb_idx;
return 0;
}
/**
* mpt_is_discovery_complete - determine if discovery has completed
* @ioc: per adatper instance
*
* Returns 1 when discovery completed, else zero.
*/
static int
mpt_is_discovery_complete(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage0_t *buffer;
dma_addr_t dma_handle;
int rc = 0;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if ((mpt_config(ioc, &cfg)))
goto out;
if (!hdr.ExtPageLength)
goto out;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((mpt_config(ioc, &cfg)))
goto out_free_consistent;
if (!(buffer->PhyData[0].PortFlags &
MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
rc = 1;
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return rc;
}
/**
* mpt_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
* Return 0 if controller is removed from pci subsystem.
* Return -1 for other case.
*/
static int mpt_remove_dead_ioc_func(void *arg)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
if ((ioc == NULL))
return -1;
pdev = ioc->pcidev;
if ((pdev == NULL))
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
return 0;
}
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
**/
static void
mpt_fault_reset_work(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fault_reset_work.work);
u32 ioc_raw_state;
int rc;
unsigned long flags;
MPT_SCSI_HOST *hd;
struct task_struct *p;
if (ioc->ioc_reset_in_progress || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
ioc->name, __func__);
/*
* Call mptscsih_flush_pending_cmds callback so that we
* flush all pending commands back to OS.
* This call is required to aovid deadlock at block layer.
* Dead IOC will fail to do diag reset,and this call is safe
* since dead ioc will never return any command back from HW.
*/
hd = shost_priv(ioc->sh);
ioc->schedule_dead_ioc_flush_running_cmds(hd);
/*Remove the Dead Host */
p = kthread_run(mpt_remove_dead_ioc_func, ioc,
"mpt_dead_ioc_%d", ioc->id);
if (IS_ERR(p)) {
printk(MYIOC_s_ERR_FMT
"%s: Running mpt_dead_ioc thread failed !\n",
ioc->name, __func__);
} else {
printk(MYIOC_s_WARN_FMT
"%s: Running mpt_dead_ioc thread success !\n",
ioc->name, __func__);
}
return; /* don't rearm timer */
}
if ((ioc_raw_state & MPI_IOC_STATE_MASK)
== MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
} else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
if ((mpt_is_discovery_complete(ioc))) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
"discovery_quiesce_io flag\n", ioc->name));
ioc->sas_discovery_quiesce_io = 0;
}
}
out:
/*
* Take turns polling alternate controller
*/
if (ioc->alt_ioc)
ioc = ioc->alt_ioc;
/* rearm the timer */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
/*
* Process turbo (context) reply...
*/
static void
mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf = NULL;
MPT_FRAME_HDR *mr = NULL;
u16 req_idx = 0;
u8 cb_idx;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
ioc->name, pa));
switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
req_idx = pa & 0x0000FFFF;
cb_idx = (pa & 0x00FF0000) >> 16;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
break;
case MPI_CONTEXT_REPLY_TYPE_LAN:
cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
/*
* Blind set of mf to NULL here was fatal
* after lan_reply says "freeme"
* Fix sort of combined with an optimization here;
* added explicit check for case where lan_reply
* was just returning 1 and doing nothing else.
* For this case skip the callback, but set up
* proper mf value first here:-)
*/
if ((pa & 0x58000000) == 0x58000000) {
req_idx = pa & 0x0000FFFF;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
mpt_free_msg_frame(ioc, mf);
mb();
return;
break;
}
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
default:
cb_idx = 0;
BUG();
}
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
goto out;
}
if (MptCallbacks[cb_idx](ioc, mf, mr))
mpt_free_msg_frame(ioc, mf);
out:
mb();
}
static void
mpt_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf;
MPT_FRAME_HDR *mr;
u16 req_idx;
u8 cb_idx;
int freeme;
u32 reply_dma_low;
u16 ioc_stat;
/* non-TURBO reply! Hmmm, something may be up...
* Newest turbo reply mechanism; get address
* via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
*/
/* Map DMA address of reply header to cpu address.
* pa is 32 bits - but the dma address may be 32 or 64 bits
* get offset based only only the low addresses
*/
reply_dma_low = (pa <<= 1);
mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
(reply_dma_low - ioc->reply_frames_low_dma));
req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
/* Check/log IOC log info
*/
ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
if (ioc->bus_type == FC)
mpt_fc_log_info(ioc, log_info);
else if (ioc->bus_type == SPI)
mpt_spi_log_info(ioc, log_info);
else if (ioc->bus_type == SAS)
mpt_sas_log_info(ioc, log_info, cb_idx);
}
if (ioc_stat & MPI_IOCSTATUS_MASK)
mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
freeme = MptCallbacks[cb_idx](ioc, mf, mr);
out:
/* Flush (non-TURBO) reply with a WRITE! */
CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
if (freeme)
mpt_free_msg_frame(ioc, mf);
mb();
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
*
* This routine is registered via the request_irq() kernel API call,
* and handles all interrupts generated from a specific MPT adapter
* (also referred to as a IO Controller or IOC).
* This routine must clear the interrupt from the adapter and does
* so by reading the reply FIFO. Multiple replies may be processed
* per single call to this routine.
*
* This routine handles register-level access of the adapter but
* dispatches (calls) a protocol-specific callback routine to handle
* the protocol-specific details of the MPT request completion.
*/
static irqreturn_t
mpt_interrupt(int irq, void *bus_id)
{
MPT_ADAPTER *ioc = bus_id;
u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
if (pa == 0xFFFFFFFF)
return IRQ_NONE;
/*
* Drain the reply FIFO!
*/
do {
if (pa & MPI_ADDRESS_REPLY_A_BIT)
mpt_reply(ioc, pa);
else
mpt_turbo_reply(ioc, pa);
pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
} while (pa != 0xFFFFFFFF);
return IRQ_HANDLED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptbase_reply - MPT base driver's callback routine
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
* MPT base driver's callback routine; all base driver
* "internal" request/reply processing is routed here.
* Currently used for EventNotification and EventAck handling.
*
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
EventNotificationReply_t *pEventReply;
u8 event;
int evHandlers;
int freereq = 1;
switch (reply->u.hdr.Function) {
case MPI_FUNCTION_EVENT_NOTIFICATION:
pEventReply = (EventNotificationReply_t *)reply;
evHandlers = 0;
ProcessEventNotification(ioc, pEventReply, &evHandlers);
event = le32_to_cpu(pEventReply->Event) & 0xFF;
if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (reply) {
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->mptbase_cmds.reply, reply,
min(MPT_DEFAULT_FRAME_SIZE,
4 * reply->u.reply.MsgLength));
}
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->mptbase_cmds.done);
} else
freereq = 0;
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
freereq = 1;
break;
case MPI_FUNCTION_EVENT_ACK:
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"EventAck reply received\n", ioc->name));
break;
default:
printk(MYIOC_s_ERR_FMT
"Unexpected msg function (=%02Xh) reply received!\n",
ioc->name, reply->u.hdr.Function);
break;
}
/*
* Conditionally tell caller to free the original
* EventNotification/EventAck/unexpected request frame!
*/
return freereq;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_register - Register protocol-specific main callback handler.
* @cbfunc: callback function pointer
* @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
* @func_name: call function's name
*
* This routine is called by a protocol-specific driver (SCSI host,
* LAN, SCSI target) to register its reply callback routine. Each
* protocol-specific driver must do this before it will be able to
* use any IOC resources, such as obtaining request frames.
*
* NOTES: The SCSI protocol driver currently calls this routine thrice
* in order to register separate callbacks; one for "normal" SCSI IO;
* one for MptScsiTaskMgmt requests; one for Scan/DV requests.
*
* Returns u8 valued "handle" in the range (and S.O.D. order)
* {N,...,7,6,5,...,1} if successful.
* A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be
* considered an error by the caller.
*/
u8
mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
{
u8 cb_idx;
last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
/*
* Search for empty callback slot in this order: {N,...,7,6,5,...,1}
* (slot/handle 0 is reserved!)
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptCallbacks[cb_idx] == NULL) {
MptCallbacks[cb_idx] = cbfunc;
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
strlcpy(MptCallbacksName[cb_idx], func_name,
MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
return last_drv_idx;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_deregister - Deregister a protocol drivers resources.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine when its
* module is unloaded.
*/
void
mpt_deregister(u8 cb_idx)
{
if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
MptCallbacks[cb_idx] = NULL;
MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx++;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_register - Register protocol-specific event callback handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @ev_cbfunc: callback function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of MPT events.
*
* Returns 0 for success.
*/
int
mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptEvHandlers[cb_idx] = ev_cbfunc;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_deregister - Deregister protocol-specific event callback handler
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle events,
* or when its module is unloaded.
*/
void
mpt_event_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptEvHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_register - Register protocol-specific IOC reset handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @reset_func: reset function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of IOC resets.
*
* Returns 0 for success.
*/
int
mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptResetHandlers[cb_idx] = reset_func;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle IOC reset handling,
* or when its module is unloaded.
*/
void
mpt_reset_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptResetHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_register - Register device driver hooks
* @dd_cbfunc: driver callbacks struct
* @cb_idx: MPT protocol driver index
*/
int
mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
{
MPT_ADAPTER *ioc;
const struct pci_device_id *id;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -EINVAL;
MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
/* call per pci device probe entry point */
list_for_each_entry(ioc, &ioc_list, list) {
id = ioc->pcidev->driver ?
ioc->pcidev->driver->id_table : NULL;
if (dd_cbfunc->probe)
dd_cbfunc->probe(ioc->pcidev, id);
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_deregister - DeRegister device driver hooks
* @cb_idx: MPT protocol driver index
*/
void
mpt_device_driver_deregister(u8 cb_idx)
{
struct mpt_pci_driver *dd_cbfunc;
MPT_ADAPTER *ioc;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
list_for_each_entry(ioc, &ioc_list, list) {
if (dd_cbfunc->remove)
dd_cbfunc->remove(ioc->pcidev);
}
MptDeviceDriverHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_get_msg_frame - Obtain an MPT request frame from the pool
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
*
* Obtain an MPT request frame from the pool (of 1024) that are
* allocated per MPT adapter.
*
* Returns pointer to a MPT request frame or %NULL if none are available
* or IOC is not active.
*/
MPT_FRAME_HDR*
mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
{
MPT_FRAME_HDR *mf;
unsigned long flags;
u16 req_idx; /* Request index */
/* validate handle and ioc identifier */
#ifdef MFCNT
if (!ioc->active)
printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
"returning NULL!\n", ioc->name);
#endif
/* If interrupts are not attached, do not return a request frame */
if (!ioc->active)
return NULL;
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (!list_empty(&ioc->FreeQ)) {
int req_offset;
mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&mf->u.frame.linkage.list);
mf->u.frame.linkage.arg1 = 0;
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
/* Default, will be changed if necessary in SG generation */
ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
#ifdef MFCNT
ioc->mfcnt++;
#endif
}
else
mf = NULL;
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
#ifdef MFCNT
if (mf == NULL)
printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
"Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
ioc->req_depth);
mfcounter++;
if (mfcounter == PRINT_MF_COUNT)
printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
ioc->mfcnt, ioc->req_depth);
#endif
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
ioc->name, cb_idx, ioc->id, mf));
return mf;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
*/
void
mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
"RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
ioc->RequestNB[req_idx]));
CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
}
/**
* mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* Send a protocol-specific MPT request frame to an IOC using
* hi-priority request queue.
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
**/
void
mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
ioc->name, mf_dma_addr, req_idx));
CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_free_msg_frame - Place MPT request frame back on FreeQ.
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
void
mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
unsigned long flags;
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
goto out;
/* signature to know if this mf is freed */
mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
#endif
out:
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
static void
mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
pSge->FlagsLength = cpu_to_le32(flagslength);
pSge->Address = cpu_to_le32(dma_addr);
}
/**
* mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
pSge->Address.High = cpu_to_le32
(upper_32_bits(dma_addr));
pSge->FlagsLength = cpu_to_le32
((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/**
* mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
u32 tmp;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
tmp = (u32)(upper_32_bits(dma_addr));
/*
* 1078 errata workaround for the 36GB limitation
*/
if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
flagslength |=
MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
tmp |= (1<<31);
if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
printk(KERN_DEBUG "1078 P0M2 addressing for "
"addr = 0x%llx len = %d\n",
(unsigned long long)dma_addr,
MPI_SGE_LENGTH(flagslength));
}
pSge->Address.High = cpu_to_le32(tmp);
pSge->FlagsLength = cpu_to_le32(
(flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
pChain->Length = cpu_to_le16(length);
pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
pChain->NextChainOffset = next;
pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
u32 tmp = dma_addr & 0xFFFFFFFF;
pChain->Length = cpu_to_le16(length);
pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING);
pChain->NextChainOffset = next;
pChain->Address.Low = cpu_to_le32(tmp);
tmp = (u32)(upper_32_bits(dma_addr));
pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_send_handshake_request - Send MPT request via doorbell handshake method.
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @reqBytes: Size of the request in bytes
* @req: Pointer to MPT request frame
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine is used exclusively to send MptScsiTaskMgmt
* requests since they are required to be sent via doorbell handshake.
*
* NOTE: It is the callers responsibility to byte-swap fields in the
* request which are greater than 1 byte in size.
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
{
int r = 0;
u8 *req_as_bytes;
int ii;
/* State is known to be good upon entering
* this function so issue the bus reset
* request.
*/
/*
* Emulate what mpt_put_msg_frame() does /wrt to sanity
* setting cb_idx/req_idx. But ONLY if this request
* is in proper (pre-alloc'd) request buffer range...
*/
ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
}
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
/* Wait for IOC doorbell int */
if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
return ii;
}
/* Read doorbell and check for active bit */
if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
return -5;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
ioc->name, ii));
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}
/* Send request via doorbell handshake */
req_as_bytes = (u8 *) req;
for (ii = 0; ii < reqBytes/4; ii++) {
u32 word;
word = ((req_as_bytes[(ii*4) + 0] << 0) |
(req_as_bytes[(ii*4) + 1] << 8) |
(req_as_bytes[(ii*4) + 2] << 16) |
(req_as_bytes[(ii*4) + 3] << 24));
CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
r = -3;
break;
}
}
if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
r = 0;
else
r = -4;
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_access_control - control the IOC's Host Page Buffer access
* @ioc: Pointer to MPT adapter structure
* @access_control_value: define bits below
* @sleepFlag: Specifies whether the process can sleep
*
* Provides mechanism for the host driver to control the IOC's
* Host Page Buffer access.
*
* Access Control Value - bits[15:12]
* 0h Reserved
* 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
* 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
* 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
*
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
int r = 0;
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
return -1;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
<<MPI_DOORBELL_FUNCTION_SHIFT) |
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}else
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_alloc - allocate system memory for the fw
* @ioc: Pointer to pointer to IOC adapter
* @ioc_init: Pointer to ioc init config page
*
* If we already allocated memory in past, then resend the same pointer.
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
{
char *psge;
int flags_length;
u32 host_page_buffer_sz=0;
if(!ioc->HostPageBuffer) {
host_page_buffer_sz =
le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
if(!host_page_buffer_sz)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
while(host_page_buffer_sz > 0) {
if((ioc->HostPageBuffer = pci_alloc_consistent(
ioc->pcidev,
host_page_buffer_sz,
&ioc->HostPageBuffer_dma)) != NULL) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
(u32)ioc->HostPageBuffer_dma,
host_page_buffer_sz));
ioc->alloc_total += host_page_buffer_sz;
ioc->HostPageBuffer_sz = host_page_buffer_sz;
break;
}
host_page_buffer_sz -= (4*1024);
}
}
if(!ioc->HostPageBuffer) {
printk(MYIOC_s_ERR_FMT
"Failed to alloc memory for host_page_buffer!\n",
ioc->name);
return -999;
}
psge = (char *)&ioc_init->HostPageBufferSGE;
flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
flags_length |= ioc->HostPageBuffer_sz;
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
* @iocid: IOC unique identifier (integer)
* @iocpp: Pointer to pointer to IOC adapter
*
* Given a unique IOC identifier, set pointer to the associated MPT
* adapter structure.
*
* Returns iocid and sets iocpp if iocid is found.
* Returns -1 if iocid is not found.
*/
int
mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
{
MPT_ADAPTER *ioc;
list_for_each_entry(ioc,&ioc_list,list) {
if (ioc->id == iocid) {
*iocpp =ioc;
return iocid;
}
}
*iocpp = NULL;
return -1;
}
/**
* mpt_get_product_name - returns product string
* @vendor: pci vendor id
* @device: pci device id
* @revision: pci revision id
* @prod_name: string returned
*
* Returns product string displayed when driver loads,
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
*
**/
static void
mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
{
char *product_str = NULL;
if (vendor == PCI_VENDOR_ID_BROCADE) {
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "BRE040 A0";
break;
case 0x01:
product_str = "BRE040 A1";
break;
default:
product_str = "BRE040";
break;
}
break;
}
goto out;
}
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC909:
product_str = "LSIFC909 B1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919:
product_str = "LSIFC919 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929:
product_str = "LSIFC929 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
if (revision < 0x80)
product_str = "LSIFC919X A0";
else
product_str = "LSIFC919XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < 0x80)
product_str = "LSIFC929X A0";
else
product_str = "LSIFC929XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
product_str = "LSIFC939X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
product_str = "LSIFC949X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "LSIFC949E A0";
break;
case 0x01:
product_str = "LSIFC949E A1";
break;
default:
product_str = "LSIFC949E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
switch (revision)
{
case 0x00:
product_str = "LSI53C1030 A0";
break;
case 0x01:
product_str = "LSI53C1030 B0";
break;
case 0x03:
product_str = "LSI53C1030 B1";
break;
case 0x07:
product_str = "LSI53C1030 B2";
break;
case 0x08:
product_str = "LSI53C1030 C0";
break;
case 0x80:
product_str = "LSI53C1030T A0";
break;
case 0x83:
product_str = "LSI53C1030T A2";
break;
case 0x87:
product_str = "LSI53C1030T A3";
break;
case 0xc1:
product_str = "LSI53C1020A A1";
break;
default:
product_str = "LSI53C1030";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
switch (revision)
{
case 0x03:
product_str = "LSI53C1035 A2";
break;
case 0x04:
product_str = "LSI53C1035 B0";
break;
default:
product_str = "LSI53C1035";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064 A1";
break;
case 0x01:
product_str = "LSISAS1064 A2";
break;
case 0x02:
product_str = "LSISAS1064 A3";
break;
case 0x03:
product_str = "LSISAS1064 A4";
break;
default:
product_str = "LSISAS1064";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064E A0";
break;
case 0x01:
product_str = "LSISAS1064E B0";
break;
case 0x02:
product_str = "LSISAS1064E B1";
break;
case 0x04:
product_str = "LSISAS1064E B2";
break;
case 0x08:
product_str = "LSISAS1064E B3";
break;
default:
product_str = "LSISAS1064E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068 A0";
break;
case 0x01:
product_str = "LSISAS1068 B0";
break;
case 0x02:
product_str = "LSISAS1068 B1";
break;
default:
product_str = "LSISAS1068";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068E A0";
break;
case 0x01:
product_str = "LSISAS1068E B0";
break;
case 0x02:
product_str = "LSISAS1068E B1";
break;
case 0x04:
product_str = "LSISAS1068E B2";
break;
case 0x08:
product_str = "LSISAS1068E B3";
break;
default:
product_str = "LSISAS1068E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1078:
switch (revision)
{
case 0x00:
product_str = "LSISAS1078 A0";
break;
case 0x01:
product_str = "LSISAS1078 B0";
break;
case 0x02:
product_str = "LSISAS1078 C0";
break;
case 0x03:
product_str = "LSISAS1078 C1";
break;
case 0x04:
product_str = "LSISAS1078 C2";
break;
default:
product_str = "LSISAS1078";
break;
}
break;
}
out:
if (product_str)
sprintf(prod_name, "%s", product_str);
}
/**
* mpt_mapresources - map in memory mapped io
* @ioc: Pointer to pointer to IOC adapter
*
**/
static int
mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
resource_size_t mem_phys;
unsigned long port;
u32 msize;
u32 psize;
int r = -ENODEV;
struct pci_dev *pdev;
pdev = ioc->pcidev;
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
"failed\n", ioc->name);
return r;
}
if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
"MEM failed\n", ioc->name);
goto out_pci_disable_device;
}
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
if (required_mask > DMA_BIT_MASK(32)
&& !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64))) {
ioc->dma_mask = DMA_BIT_MASK(64);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
goto out_pci_release_region;
}
} else {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
goto out_pci_release_region;
}
}
mem_phys = msize = 0;
port = psize = 0;
for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
if (psize)
continue;
/* Get I/O space! */
port = pci_resource_start(pdev, ii);
psize = pci_resource_len(pdev, ii);
} else {
if (msize)
continue;
/* Get memmap */
mem_phys = pci_resource_start(pdev, ii);
msize = pci_resource_len(pdev, ii);
}
}
ioc->mem_size = msize;
mem = NULL;
/* Get logical ptr for PciMem0 space */
/*mem = ioremap(mem_phys, msize);*/
mem = ioremap(mem_phys, msize);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
" memory!\n", ioc->name);
r = -EINVAL;
goto out_pci_release_region;
}
ioc->memmap = mem;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
ioc->name, mem, (unsigned long long)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
/* Save Port IO values in case we need to do downloadboot */
ioc->pio_mem_phys = port;
ioc->pio_chip = (SYSIF_REGS __iomem *)port;
return 0;
out_pci_release_region:
pci_release_selected_regions(pdev, ioc->bars);
out_pci_disable_device:
pci_disable_device(pdev);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_attach - Install a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
* @id: PCI device ID information
*
* This routine performs all the steps necessary to bring the IOC of
* a MPT adapter to a OPERATIONAL state. This includes registering
* memory regions, registering the interrupt, and allocating request
* and reply memory pools.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns 0 for success, non-zero for failure.
*
* TODO: Add support for polled controllers
*/
int
mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dent;
#endif
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
}
ioc->id = mpt_ids++;
sprintf(ioc->name, "ioc%d", ioc->id);
dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
/*
* set initial debug level
* (refer to mptdebug.h)
*
*/
ioc->debug_level = mpt_debug_level;
if (mpt_debug_level)
printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
ioc->pcidev = pdev;
if (mpt_mapresources(ioc)) {
kfree(ioc);
return r;
}
/*
* Setting up proper handlers for scatter gather handling
*/
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
spin_lock_init(&ioc->taskmgmt_lock);
mutex_init(&ioc->internal_cmds.mutex);
init_completion(&ioc->internal_cmds.done);
mutex_init(&ioc->mptbase_cmds.mutex);
init_completion(&ioc->mptbase_cmds.done);
mutex_init(&ioc->taskmgmt_cmds.mutex);
init_completion(&ioc->taskmgmt_cmds.done);
/* Initialize the event logging.
*/
ioc->eventTypes = 0; /* None */
ioc->eventContext = 0;
ioc->eventLogSize = 0;
ioc->events = NULL;
#ifdef MFCNT
ioc->mfcnt = 0;
#endif
ioc->sh = NULL;
ioc->cached_fw = NULL;
/* Initialize SCSI Config Data structure
*/
memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
/* Initialize the fc rport list head.
*/
INIT_LIST_HEAD(&ioc->fc_rports);
/* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list);
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
"mpt_poll_%d", ioc->id);
ioc->reset_work_q =
create_singlethread_workqueue(ioc->reset_work_q_name);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
kfree(ioc);
return -ENOMEM;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
ioc->prod_name);
switch (pdev->device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
} else {
/* 929XL Chip Fix. Set MMRBC to 0x08.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd |= 0x08;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
/* 919X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
ioc->bus_type = SAS;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
break;
}
switch (ioc->bus_type) {
case SAS:
ioc->msi_enable = mpt_msi_enable_sas;
break;
case SPI:
ioc->msi_enable = mpt_msi_enable_spi;
break;
case FC:
ioc->msi_enable = mpt_msi_enable_fc;
break;
default:
ioc->msi_enable = 0;
break;
}
ioc->fw_events_off = 1;
if (ioc->errata_flag_1064)
pci_disable_io_access(pdev);
spin_lock_init(&ioc->FreeQlock);
/* Disable all! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
/* Set IOC ptr in the pcidev's driver data. */
pci_set_drvdata(ioc->pcidev, ioc);
/* Set lookup ptr. */
list_add_tail(&ioc->list, &ioc_list);
/* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
*/
mpt_detect_bound_ports(ioc, pdev);
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
ioc->name, r);
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
iounmap(ioc->memmap);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
kfree(ioc);
pci_set_drvdata(pdev, NULL);
return r;
}
/* call per device driver probe entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->probe) {
MptDeviceDriverHandlers[cb_idx]->probe(pdev,id);
}
}
#ifdef CONFIG_PROC_FS
/*
* Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
*/
dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
if (dent) {
proc_create_data("info", S_IRUGO, dent, &mpt_iocinfo_proc_fops, ioc);
proc_create_data("summary", S_IRUGO, dent, &mpt_summary_proc_fops, ioc);
}
#endif
if (!ioc->alt_ioc)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detach - Remove a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
*/
void
mpt_detach(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
char pname[32];
u8 cb_idx;
unsigned long flags;
struct workqueue_struct *wq;
/*
* Stop polling ioc for fault condition
*/
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->fw_event_q;
ioc->fw_event_q = NULL;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
destroy_workqueue(wq);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
remove_proc_entry(pname, NULL);
/* call per device driver remove entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->remove) {
MptDeviceDriverHandlers[cb_idx]->remove(pdev);
}
}
/* Disable interrupts! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
synchronize_irq(pdev->irq);
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
mpt_adapter_dispose(ioc);
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_suspend - Fusion MPT base driver suspend routine.
* @pdev: Pointer to pci_dev structure
* @state: new state to enter
*/
int
mpt_suspend(struct pci_dev *pdev, pm_message_t state)
{
u32 device_state;
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
device_state = pci_choose_state(pdev, state);
printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
/* put ioc into READY_STATE */
if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
/* disable interrupts */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_release_selected_regions(pdev, ioc->bars);
pci_set_power_state(pdev, device_state);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_resume - Fusion MPT base driver resume routine.
* @pdev: Pointer to pci_dev structure
*/
int
mpt_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
u32 device_state = pdev->current_state;
int recovery_state;
int err;
printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
ioc->pcidev = pdev;
err = mpt_mapresources(ioc);
if (err)
return err;
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
CHIPREG_READ32(&ioc->chip->Doorbell));
/*
* Errata workaround for SAS pci express:
* Upon returning to the D0 state, the contents of the doorbell will be
* stale data, and this will incorrectly signal to the host driver that
* the firmware is ready to process mpt commands. The workaround is
* to issue a diagnostic reset.
*/
if (ioc->bus_type == SAS && (pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1064E)) {
if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
ioc->name);
goto out;
}
}
/* bring ioc to operational state */
printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP);
if (recovery_state != 0)
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
"error:[%x]\n", ioc->name, recovery_state);
else
printk(MYIOC_s_INFO_FMT
"pci-resume: success\n", ioc->name);
out:
return 0;
}
#endif
static int
mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
{
if ((MptDriverClass[index] == MPTSPI_DRIVER &&
ioc->bus_type != SPI) ||
(MptDriverClass[index] == MPTFC_DRIVER &&
ioc->bus_type != FC) ||
(MptDriverClass[index] == MPTSAS_DRIVER &&
ioc->bus_type != SAS))
/* make sure we only call the relevant reset handler
* for the bus */
return 0;
return (MptResetHandlers[index])(ioc, reset_phase);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_ioc_recovery - Initialize or recover MPT adapter.
* @ioc: Pointer to MPT adapter structure
* @reason: Event word / reason
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine performs all the steps necessary to bring the IOC
* to a OPERATIONAL state.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns:
* 0 for success
* -1 if failed to get board READY
* -2 if READY but IOCFacts Failed
* -3 if READY but PrimeIOCFifos Failed
* -4 if READY but IOCInit Failed
* -5 if failed to enable_device and/or request_selected_regions
* -6 if failed to upload firmware
*/
static int
mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
{
int hard_reset_done = 0;
int alt_ioc_ready = 0;
int hard;
int rc=0;
int ii;
int ret = 0;
int reset_alt_ioc_active = 0;
int irq_allocated = 0;
u8 *a;
printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
/* Disable reply interrupts (also blocks FreeQ) */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
if (ioc->alt_ioc) {
if (ioc->alt_ioc->active ||
reason == MPT_HOSTEVENT_IOC_RECOVER) {
reset_alt_ioc_active = 1;
/* Disable alt-IOC's reply interrupts
* (and FreeQ) for a bit
**/
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
0xFFFFFFFF);
ioc->alt_ioc->active = 0;
}
}
hard = 1;
if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
hard = 0;
if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
if (hard_reset_done == -4) {
printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
ioc->name);
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
dprintk(ioc, printk(MYIOC_s_INFO_FMT
"alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
} else {
printk(MYIOC_s_WARN_FMT
"NOT READY WARNING!\n", ioc->name);
}
ret = -1;
goto out;
}
/* hard_reset_done = 0 if a soft reset was performed
* and 1 if a hard reset was performed.
*/
if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
alt_ioc_ready = 1;
else
printk(MYIOC_s_WARN_FMT
": alt-ioc Not ready WARNING!\n",
ioc->alt_ioc->name);
}
for (ii=0; ii<5; ii++) {
/* Get IOC facts! Allow 5 retries */
if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
break;
}
if (ii == 5) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry IocFacts failed rc=%x\n", ioc->name, rc));
ret = -2;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc);
}
if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Initial Alt IocFacts failed rc=%x\n",
ioc->name, rc));
/* Retry - alt IOC was initialized once
*/
rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
}
if (rc) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc->alt_ioc);
}
}
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
(ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
pci_release_selected_regions(ioc->pcidev, ioc->bars);
ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
IORESOURCE_IO);
if (pci_enable_device(ioc->pcidev))
return -5;
if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
"mpt"))
return -5;
}
/*
* Device is reset now. It must have de-asserted the interrupt line
* (if it was asserted) and it should be safe to register for the
* interrupt now.
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
ioc->pci_irq = -1;
if (ioc->pcidev->irq) {
if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
ioc->name);
else
ioc->msi_enable = 0;
rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
IRQF_SHARED, ioc->name, ioc);
if (rc < 0) {
printk(MYIOC_s_ERR_FMT "Unable to allocate "
"interrupt %d!\n",
ioc->name, ioc->pcidev->irq);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ret = -EBUSY;
goto out;
}
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
pci_set_drvdata(ioc->pcidev, ioc);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"installed at interrupt %d\n", ioc->name,
ioc->pcidev->irq));
}
}
/* Prime reply & request queues!
* (mucho alloc's) Must be done prior to
* init as upper addresses are needed for init.
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
ioc->name));
if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
ret = -3;
/* May need to check/upload firmware & data here!
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
ioc->name));
if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
ret = -4;
// NEW!
if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
printk(MYIOC_s_WARN_FMT
": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
ioc->alt_ioc->name, rc);
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
}
if (alt_ioc_ready) {
if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
printk(MYIOC_s_WARN_FMT
": alt-ioc: (%d) init failure WARNING!\n",
ioc->alt_ioc->name, rc);
}
}
if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
if (ioc->upload_fw) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"firmware upload required!\n", ioc->name));
/* Controller is not operational, cannot do upload
*/
if (ret == 0) {
rc = mpt_do_upload(ioc, sleepFlag);
if (rc == 0) {
if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
/*
* Maintain only one pointer to FW memory
* so there will not be two attempt to
* downloadboot onboard dual function
* chips (mpt_adapter_disable,
* mpt_diag_reset)
*/
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"mpt_upload: alt_%s has cached_fw=%p \n",
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
ioc->cached_fw = NULL;
}
} else {
printk(MYIOC_s_WARN_FMT
"firmware upload failure!\n", ioc->name);
ret = -6;
}
}
}
}
/* Enable MPT base driver management of EventNotification
* and EventAck handling.
*/
if ((ret == 0) && (!ioc->facts.EventState)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"SendEventNotification\n",
ioc->name));
ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
}
if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
if (ret == 0) {
/* Enable! (reply interrupt) */
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
ioc->active = 1;
}
if (rc == 0) { /* alt ioc */
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt) */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
"reply irq re-enabled\n",
ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
}
/* Add additional "reason" check before call to GetLanConfigPages
* (combined with GetIoUnitPage2 call). This prevents a somewhat
* recursive scenario; GetLanConfigPages times out, timer expired
* routine calls HardResetHandler, which calls into here again,
* and we try GetLanConfigPages again...
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
/*
* Initialize link list for inactive raid volumes.
*/
mutex_init(&ioc->raid_data.inactive_list_mutex);
INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
switch (ioc->bus_type) {
case SAS:
/* clear persistency table */
if(ioc->facts.IOCExceptions &
MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
ret = mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
if(ret != 0)
goto out;
}
/* Find IM volumes
*/
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
break;
case FC:
if ((ioc->pfacts[0].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_LAN) &&
(ioc->lan_cnfg_page0.Header.PageLength == 0)) {
/*
* Pre-fetch the ports LAN MAC address!
* (LANPage1_t stuff)
*/
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"LanAddr = %02X:%02X:%02X"
":%02X:%02X:%02X\n",
ioc->name, a[5], a[4],
a[3], a[2], a[1], a[0]));
}
break;
case SPI:
/* Get NVRAM and adapter maximums from SPP 0 and 2
*/
mpt_GetScsiPortSettings(ioc, 0);
/* Get version and length of SDP 1
*/
mpt_readScsiDevicePageHeaders(ioc, 0);
/* Find IM volumes
*/
if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
mpt_read_ioc_pg_4(ioc);
break;
}
GetIoUnitPage2(ioc);
mpt_get_manufacturing_pg_0(ioc);
}
out:
if ((ret != 0) && irq_allocated) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detect_bound_ports - Search for matching PCI bus/dev_function
* @ioc: Pointer to MPT adapter structure
* @pdev: Pointer to (struct pci_dev) structure
*
* Search for PCI bus/dev_function which matches
* PCI bus/dev_function (+/-1) for newly discovered 929,
* 929X, 1030 or 1035.
*
* If match on PCI dev_function +/-1 is found, bind the two MPT adapters
* using alt_ioc pointer fields in their %MPT_ADAPTER structures.
*/
static void
mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
{
struct pci_dev *peer=NULL;
unsigned int slot = PCI_SLOT(pdev->devfn);
unsigned int func = PCI_FUNC(pdev->devfn);
MPT_ADAPTER *ioc_srch;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
" searching for devfn match on %x or %x\n",
ioc->name, pci_name(pdev), pdev->bus->number,
pdev->devfn, func-1, func+1));
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
if (!peer) {
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
if (!peer)
return;
}
list_for_each_entry(ioc_srch, &ioc_list, list) {
struct pci_dev *_pcidev = ioc_srch->pcidev;
if (_pcidev == peer) {
/* Paranoia checks */
if (ioc->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc->name, ioc->name, ioc->alt_ioc->name);
break;
} else if (ioc_srch->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc_srch->name, ioc_srch->name,
ioc_srch->alt_ioc->name);
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"FOUND! binding %s <==> %s\n",
ioc->name, ioc->name, ioc_srch->name));
ioc_srch->alt_ioc = ioc;
ioc->alt_ioc = ioc_srch;
}
}
pci_dev_put(peer);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_disable - Disable misbehaving MPT adapter.
* @ioc: Pointer to MPT adapter structure
*/
static void
mpt_adapter_disable(MPT_ADAPTER *ioc)
{
int sz;
int ret;
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Pushing FW onto adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
": firmware downloadboot failure (%d)!\n",
ioc->name, ret);
}
}
/*
* Put the controller into ready state (if its not already)
*/
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
CAN_SLEEP)) {
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
"reset failed to put ioc in ready state!\n",
ioc->name, __func__);
} else
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
"failed!\n", ioc->name, __func__);
}
/* Disable adapter interrupts! */
synchronize_irq(ioc->pcidev->irq);
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
pci_free_consistent(ioc->pcidev, sz,
ioc->alloc, ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
pci_free_consistent(ioc->pcidev, sz,
ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
if (ioc->events != NULL){
sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
kfree(ioc->events);
ioc->events = NULL;
ioc->alloc_total -= sz;
}
mpt_free_fw_memory(ioc);
kfree(ioc->spi_data.nvram);
mpt_inactive_raid_list_free(ioc);
kfree(ioc->raid_data.pIocPg2);
kfree(ioc->raid_data.pIocPg3);
ioc->spi_data.nvram = NULL;
ioc->raid_data.pIocPg3 = NULL;
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
pci_free_consistent(ioc->pcidev, sz,
ioc->spi_data.pIocPg4,
ioc->spi_data.IocPg4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= sz;
}
if (ioc->ReqToChain != NULL) {
kfree(ioc->ReqToChain);
kfree(ioc->RequestNB);
ioc->ReqToChain = NULL;
}
kfree(ioc->ChainToChain);
ioc->ChainToChain = NULL;
if (ioc->HostPageBuffer != NULL) {
if((ret = mpt_host_page_access_control(ioc,
MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
printk(MYIOC_s_ERR_FMT
": %s: host page buffers free failed (%d)!\n",
ioc->name, __func__, ret);
}
dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
ioc->alloc_total -= ioc->HostPageBuffer_sz;
}
pci_set_drvdata(ioc->pcidev, NULL);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_dispose - Free all resources associated with an MPT adapter
* @ioc: Pointer to MPT adapter structure
*
* This routine unregisters h/w resources and frees all alloc'd memory
* associated with a MPT adapter structure.
*/
static void
mpt_adapter_dispose(MPT_ADAPTER *ioc)
{
int sz_first, sz_last;
if (ioc == NULL)
return;
sz_first = ioc->alloc_total;
mpt_adapter_disable(ioc);
if (ioc->pci_irq != -1) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) {
iounmap(ioc->memmap);
ioc->memmap = NULL;
}
pci_disable_device(ioc->pcidev);
pci_release_selected_regions(ioc->pcidev, ioc->bars);
#if defined(CONFIG_MTRR) && 0
if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0);
dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name));
}
#endif
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
sz_last = ioc->alloc_total;
dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
kfree(ioc);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MptDisplayIocCapabilities - Disply IOC's capabilities.
* @ioc: Pointer to MPT adapter structure
*/
static void
MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
{
int i = 0;
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
printk("%s: ", ioc->prod_name);
printk("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
printk("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
printk("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
printk("%sLAN", i ? "," : "");
i++;
}
#if 0
/*
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
printk("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
printk("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MakeIocReady - Get IOC to a READY state, using KickStart if needed.
* @ioc: Pointer to MPT_ADAPTER structure
* @force: Force hard KickStart of IOC
* @sleepFlag: Specifies whether the process can sleep
*
* Returns:
* 1 - DIAG reset and READY
* 0 - READY initially OR soft reset and READY
* -1 - Any failure on KickStart
* -2 - Msg Unit Reset Failed
* -3 - IO Unit Reset Failed
* -4 - IOC owned by a PEER
*/
static int
MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
{
u32 ioc_state;
int statefault = 0;
int cntdn;
int hard_reset_done = 0;
int r;
int ii;
int whoinit;
/* Get current [raw] IOC state */
ioc_state = mpt_GetIocState(ioc, 0);
dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
/*
* Check to see if IOC got left/stuck in doorbell handshake
* grip of death. If so, hard reset the IOC.
*/
if (ioc_state & MPI_DOORBELL_ACTIVE) {
statefault = 1;
printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
ioc->name);
}
/* Is it already READY? */
if (!statefault &&
((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"IOC is in READY state\n", ioc->name));
return 0;
}
/*
* Check to see if IOC is in FAULT state.
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
statefault = 2;
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
ioc->name);
printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
}
/*
* Hmmm... Did it get left operational?
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
ioc->name));
/* Check WhoInit.
* If PCI Peer, exit.
* Else, if no fault conditions are present, issue a MessageUnitReset
* Else, fall through to KickStart case
*/
whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"whoinit 0x%x statefault %d force %d\n",
ioc->name, whoinit, statefault, force));
if (whoinit == MPI_WHOINIT_PCI_PEER)
return -4;
else {
if ((statefault == 0 ) && (force == 0)) {
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
return 0;
}
statefault = 3;
}
}
hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
if (hard_reset_done < 0)
return -1;
/*
* Loop here waiting for IOC to come READY.
*/
ii = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
/*
* BIOS or previous driver load left IOC in OP state.
* Reset messaging FIFOs.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
return -2;
}
} else if (ioc_state == MPI_IOC_STATE_RESET) {
/*
* Something is wrong. Try to get IOC back
* to a known state.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
return -3;
}
}
ii++; cntdn--;
if (!cntdn) {
printk(MYIOC_s_ERR_FMT
"Wait IOC_READY state (0x%x) timeout(%d)!\n",
ioc->name, ioc_state, (int)((ii+5)/HZ));
return -ETIME;
}
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1); /* 1 msec delay */
}
}
if (statefault < 3) {
printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
statefault == 1 ? "stuck handshake" : "IOC FAULT");
}
return hard_reset_done;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_GetIocState - Get the current state of a MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
* Returns all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
{
u32 s, sc;
/* Get! */
s = CHIPREG_READ32(&ioc->chip->Doorbell);
sc = s & MPI_IOC_STATE_MASK;
/* Save! */
ioc->last_state = sc;
return cooked ? sc : s;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetIocFacts - Send IOCFacts request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
* @reason: If recovery, only update facts.
*
* Returns 0 for success, non-zero for failure.
*/
static int
GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
{
IOCFacts_t get_facts;
IOCFactsReply_t *facts;
int r;
int req_sz;
int reply_sz;
int sz;
u32 status, vv;
u8 shiftFactor=1;
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
printk(KERN_ERR MYNAM
": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
ioc->name, ioc->last_state);
return -44;
}
facts = &ioc->facts;
/* Destination (reply area)... */
reply_sz = sizeof(*facts);
memset(facts, 0, reply_sz);
/* Request area (get_facts on the stack right now!) */
req_sz = sizeof(get_facts);
memset(&get_facts, 0, req_sz);
get_facts.Function = MPI_FUNCTION_IOC_FACTS;
/* Assert: All other get_facts fields are zero! */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n",
ioc->name, req_sz, reply_sz));
/* No non-zero fields in the get_facts request are greater than
* 1 byte in size, so we can just fire it off as is.
*/
r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
if (r != 0)
return r;
/*
* Now byte swap (GRRR) the necessary fields before any further
* inspection of reply contents.
*
* But need to do some sanity checks on MsgLength (byte) field
* to make sure we don't zero IOC's req_sz!
*/
/* Did we get a valid reply? */
if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* If not been here, done that, save off first WhoInit value
*/
if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
ioc->FirstWhoInit = facts->WhoInit;
}
facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
facts->MsgContext = le32_to_cpu(facts->MsgContext);
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
/* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
/*
* FC f/w version changed between 1.1 and 1.2
* Old: u16{Major(4),Minor(4),SubMinor(8)}
* New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
*/
if (facts->MsgVersion < MPI_VERSION_01_02) {
/*
* Handle old FC f/w style, convert to new...
*/
u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
facts->FWVersion.Word =
((oldv<<12) & 0xFF000000) |
((oldv<<8) & 0x000FFF00);
} else
facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
facts->ProductID = le16_to_cpu(facts->ProductID);
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
ioc->ir_firmware = 1;
facts->CurrentHostMfaHighAddr =
le32_to_cpu(facts->CurrentHostMfaHighAddr);
facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
facts->CurrentSenseBufferHighAddr =
le32_to_cpu(facts->CurrentSenseBufferHighAddr);
facts->CurReplyFrameSize =
le16_to_cpu(facts->CurReplyFrameSize);
facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
/*
* Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
* Older MPI-1.00.xx struct had 13 dwords, and enlarged
* to 14 in MPI-1.01.0x.
*/
if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
facts->MsgVersion > MPI_VERSION_01_00) {
facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
}
sz = facts->FWImageSize;
if ( sz & 0x01 )