| /* |
| * Disk Array driver for HP Smart Array SAS controllers |
| * Copyright 2016 Microsemi Corporation |
| * Copyright 2014-2015 PMC-Sierra, Inc. |
| * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License as published by |
| * the Free Software Foundation; version 2 of the License. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| * NON INFRINGEMENT. See the GNU General Public License for more details. |
| * |
| * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com |
| * |
| */ |
| |
| #include <linux/module.h> |
| #include <linux/interrupt.h> |
| #include <linux/types.h> |
| #include <linux/pci.h> |
| #include <linux/pci-aspm.h> |
| #include <linux/kernel.h> |
| #include <linux/slab.h> |
| #include <linux/delay.h> |
| #include <linux/fs.h> |
| #include <linux/timer.h> |
| #include <linux/init.h> |
| #include <linux/spinlock.h> |
| #include <linux/compat.h> |
| #include <linux/blktrace_api.h> |
| #include <linux/uaccess.h> |
| #include <linux/io.h> |
| #include <linux/dma-mapping.h> |
| #include <linux/completion.h> |
| #include <linux/moduleparam.h> |
| #include <scsi/scsi.h> |
| #include <scsi/scsi_cmnd.h> |
| #include <scsi/scsi_device.h> |
| #include <scsi/scsi_host.h> |
| #include <scsi/scsi_tcq.h> |
| #include <scsi/scsi_eh.h> |
| #include <scsi/scsi_transport_sas.h> |
| #include <scsi/scsi_dbg.h> |
| #include <linux/cciss_ioctl.h> |
| #include <linux/string.h> |
| #include <linux/bitmap.h> |
| #include <linux/atomic.h> |
| #include <linux/jiffies.h> |
| #include <linux/percpu-defs.h> |
| #include <linux/percpu.h> |
| #include <asm/unaligned.h> |
| #include <asm/div64.h> |
| #include "hpsa_cmd.h" |
| #include "hpsa.h" |
| |
| /* |
| * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' |
| * with an optional trailing '-' followed by a byte value (0-255). |
| */ |
| #define HPSA_DRIVER_VERSION "3.4.20-0" |
| #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
| #define HPSA "hpsa" |
| |
| /* How long to wait for CISS doorbell communication */ |
| #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ |
| #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ |
| #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ |
| #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ |
| #define MAX_IOCTL_CONFIG_WAIT 1000 |
| |
| /*define how many times we will try a command because of bus resets */ |
| #define MAX_CMD_RETRIES 3 |
| |
| /* Embedded module documentation macros - see modules.h */ |
| MODULE_AUTHOR("Hewlett-Packard Company"); |
| MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ |
| HPSA_DRIVER_VERSION); |
| MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); |
| MODULE_VERSION(HPSA_DRIVER_VERSION); |
| MODULE_LICENSE("GPL"); |
| MODULE_ALIAS("cciss"); |
| |
| static int hpsa_simple_mode; |
| module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); |
| MODULE_PARM_DESC(hpsa_simple_mode, |
| "Use 'simple mode' rather than 'performant mode'"); |
| |
| /* define the PCI info for the cards we can control */ |
| static const struct pci_device_id hpsa_pci_device_id[] = { |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, |
| {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, |
| {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, |
| {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, |
| {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, |
| {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, |
| {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, |
| {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, |
| {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
| PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
| {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
| PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
| {0,} |
| }; |
| |
| MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); |
| |
| /* board_id = Subsystem Device ID & Vendor ID |
| * product = Marketing Name for the board |
| * access = Address of the struct of function pointers |
| */ |
| static struct board_type products[] = { |
| {0x40700E11, "Smart Array 5300", &SA5A_access}, |
| {0x40800E11, "Smart Array 5i", &SA5B_access}, |
| {0x40820E11, "Smart Array 532", &SA5B_access}, |
| {0x40830E11, "Smart Array 5312", &SA5B_access}, |
| {0x409A0E11, "Smart Array 641", &SA5A_access}, |
| {0x409B0E11, "Smart Array 642", &SA5A_access}, |
| {0x409C0E11, "Smart Array 6400", &SA5A_access}, |
| {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, |
| {0x40910E11, "Smart Array 6i", &SA5A_access}, |
| {0x3225103C, "Smart Array P600", &SA5A_access}, |
| {0x3223103C, "Smart Array P800", &SA5A_access}, |
| {0x3234103C, "Smart Array P400", &SA5A_access}, |
| {0x3235103C, "Smart Array P400i", &SA5A_access}, |
| {0x3211103C, "Smart Array E200i", &SA5A_access}, |
| {0x3212103C, "Smart Array E200", &SA5A_access}, |
| {0x3213103C, "Smart Array E200i", &SA5A_access}, |
| {0x3214103C, "Smart Array E200i", &SA5A_access}, |
| {0x3215103C, "Smart Array E200i", &SA5A_access}, |
| {0x3237103C, "Smart Array E500", &SA5A_access}, |
| {0x323D103C, "Smart Array P700m", &SA5A_access}, |
| {0x3241103C, "Smart Array P212", &SA5_access}, |
| {0x3243103C, "Smart Array P410", &SA5_access}, |
| {0x3245103C, "Smart Array P410i", &SA5_access}, |
| {0x3247103C, "Smart Array P411", &SA5_access}, |
| {0x3249103C, "Smart Array P812", &SA5_access}, |
| {0x324A103C, "Smart Array P712m", &SA5_access}, |
| {0x324B103C, "Smart Array P711m", &SA5_access}, |
| {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ |
| {0x3350103C, "Smart Array P222", &SA5_access}, |
| {0x3351103C, "Smart Array P420", &SA5_access}, |
| {0x3352103C, "Smart Array P421", &SA5_access}, |
| {0x3353103C, "Smart Array P822", &SA5_access}, |
| {0x3354103C, "Smart Array P420i", &SA5_access}, |
| {0x3355103C, "Smart Array P220i", &SA5_access}, |
| {0x3356103C, "Smart Array P721m", &SA5_access}, |
| {0x1920103C, "Smart Array P430i", &SA5_access}, |
| {0x1921103C, "Smart Array P830i", &SA5_access}, |
| {0x1922103C, "Smart Array P430", &SA5_access}, |
| {0x1923103C, "Smart Array P431", &SA5_access}, |
| {0x1924103C, "Smart Array P830", &SA5_access}, |
| {0x1925103C, "Smart Array P831", &SA5_access}, |
| {0x1926103C, "Smart Array P731m", &SA5_access}, |
| {0x1928103C, "Smart Array P230i", &SA5_access}, |
| {0x1929103C, "Smart Array P530", &SA5_access}, |
| {0x21BD103C, "Smart Array P244br", &SA5_access}, |
| {0x21BE103C, "Smart Array P741m", &SA5_access}, |
| {0x21BF103C, "Smart HBA H240ar", &SA5_access}, |
| {0x21C0103C, "Smart Array P440ar", &SA5_access}, |
| {0x21C1103C, "Smart Array P840ar", &SA5_access}, |
| {0x21C2103C, "Smart Array P440", &SA5_access}, |
| {0x21C3103C, "Smart Array P441", &SA5_access}, |
| {0x21C4103C, "Smart Array", &SA5_access}, |
| {0x21C5103C, "Smart Array P841", &SA5_access}, |
| {0x21C6103C, "Smart HBA H244br", &SA5_access}, |
| {0x21C7103C, "Smart HBA H240", &SA5_access}, |
| {0x21C8103C, "Smart HBA H241", &SA5_access}, |
| {0x21C9103C, "Smart Array", &SA5_access}, |
| {0x21CA103C, "Smart Array P246br", &SA5_access}, |
| {0x21CB103C, "Smart Array P840", &SA5_access}, |
| {0x21CC103C, "Smart Array", &SA5_access}, |
| {0x21CD103C, "Smart Array", &SA5_access}, |
| {0x21CE103C, "Smart HBA", &SA5_access}, |
| {0x05809005, "SmartHBA-SA", &SA5_access}, |
| {0x05819005, "SmartHBA-SA 8i", &SA5_access}, |
| {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, |
| {0x05839005, "SmartHBA-SA 8e", &SA5_access}, |
| {0x05849005, "SmartHBA-SA 16i", &SA5_access}, |
| {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, |
| {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
| {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, |
| {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, |
| {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, |
| {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, |
| {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
| }; |
| |
| static struct scsi_transport_template *hpsa_sas_transport_template; |
| static int hpsa_add_sas_host(struct ctlr_info *h); |
| static void hpsa_delete_sas_host(struct ctlr_info *h); |
| static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, |
| struct hpsa_scsi_dev_t *device); |
| static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); |
| static struct hpsa_scsi_dev_t |
| *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, |
| struct sas_rphy *rphy); |
| |
| #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) |
| static const struct scsi_cmnd hpsa_cmd_busy; |
| #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) |
| static const struct scsi_cmnd hpsa_cmd_idle; |
| static int number_of_controllers; |
| |
| static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
| static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
| static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
| |
| #ifdef CONFIG_COMPAT |
| static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, |
| void __user *arg); |
| #endif |
| |
| static void cmd_free(struct ctlr_info *h, struct CommandList *c); |
| static struct CommandList *cmd_alloc(struct ctlr_info *h); |
| static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); |
| static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, |
| struct scsi_cmnd *scmd); |
| static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
| void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
| int cmd_type); |
| static void hpsa_free_cmd_pool(struct ctlr_info *h); |
| #define VPD_PAGE (1 << 8) |
| #define HPSA_SIMPLE_ERROR_BITS 0x03 |
| |
| static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
| static void hpsa_scan_start(struct Scsi_Host *); |
| static int hpsa_scan_finished(struct Scsi_Host *sh, |
| unsigned long elapsed_time); |
| static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); |
| |
| static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
| static int hpsa_slave_alloc(struct scsi_device *sdev); |
| static int hpsa_slave_configure(struct scsi_device *sdev); |
| static void hpsa_slave_destroy(struct scsi_device *sdev); |
| |
| static void hpsa_update_scsi_devices(struct ctlr_info *h); |
| static int check_for_unit_attention(struct ctlr_info *h, |
| struct CommandList *c); |
| static void check_ioctl_unit_attention(struct ctlr_info *h, |
| struct CommandList *c); |
| /* performant mode helper functions */ |
| static void calc_bucket_map(int *bucket, int num_buckets, |
| int nsgs, int min_blocks, u32 *bucket_map); |
| static void hpsa_free_performant_mode(struct ctlr_info *h); |
| static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
| static inline u32 next_command(struct ctlr_info *h, u8 q); |
| static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
| u32 *cfg_base_addr, u64 *cfg_base_addr_index, |
| u64 *cfg_offset); |
| static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
| unsigned long *memory_bar); |
| static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, |
| bool *legacy_board); |
| static int wait_for_device_to_become_ready(struct ctlr_info *h, |
| unsigned char lunaddr[], |
| int reply_queue); |
| static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
| int wait_for_ready); |
| static inline void finish_cmd(struct CommandList *c); |
| static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); |
| #define BOARD_NOT_READY 0 |
| #define BOARD_READY 1 |
| static void hpsa_drain_accel_commands(struct ctlr_info *h); |
| static void hpsa_flush_cache(struct ctlr_info *h); |
| static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
| struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
| u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); |
| static void hpsa_command_resubmit_worker(struct work_struct *work); |
| static u32 lockup_detected(struct ctlr_info *h); |
| static int detect_controller_lockup(struct ctlr_info *h); |
| static void hpsa_disable_rld_caching(struct ctlr_info *h); |
| static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
| struct ReportExtendedLUNdata *buf, int bufsize); |
| static bool hpsa_vpd_page_supported(struct ctlr_info *h, |
| unsigned char scsi3addr[], u8 page); |
| static int hpsa_luns_changed(struct ctlr_info *h); |
| static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, |
| struct hpsa_scsi_dev_t *dev, |
| unsigned char *scsi3addr); |
| |
| static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
| { |
| unsigned long *priv = shost_priv(sdev->host); |
| return (struct ctlr_info *) *priv; |
| } |
| |
| static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
| { |
| unsigned long *priv = shost_priv(sh); |
| return (struct ctlr_info *) *priv; |
| } |
| |
| static inline bool hpsa_is_cmd_idle(struct CommandList *c) |
| { |
| return c->scsi_cmd == SCSI_CMD_IDLE; |
| } |
| |
| static inline bool hpsa_is_pending_event(struct CommandList *c) |
| { |
| return c->reset_pending; |
| } |
| |
| /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ |
| static void decode_sense_data(const u8 *sense_data, int sense_data_len, |
| u8 *sense_key, u8 *asc, u8 *ascq) |
| { |
| struct scsi_sense_hdr sshdr; |
| bool rc; |
| |
| *sense_key = -1; |
| *asc = -1; |
| *ascq = -1; |
| |
| if (sense_data_len < 1) |
| return; |
| |
| rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); |
| if (rc) { |
| *sense_key = sshdr.sense_key; |
| *asc = sshdr.asc; |
| *ascq = sshdr.ascq; |
| } |
| } |
| |
| static int check_for_unit_attention(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| u8 sense_key, asc, ascq; |
| int sense_len; |
| |
| if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) |
| sense_len = sizeof(c->err_info->SenseInfo); |
| else |
| sense_len = c->err_info->SenseLen; |
| |
| decode_sense_data(c->err_info->SenseInfo, sense_len, |
| &sense_key, &asc, &ascq); |
| if (sense_key != UNIT_ATTENTION || asc == 0xff) |
| return 0; |
| |
| switch (asc) { |
| case STATE_CHANGED: |
| dev_warn(&h->pdev->dev, |
| "%s: a state change detected, command retried\n", |
| h->devname); |
| break; |
| case LUN_FAILED: |
| dev_warn(&h->pdev->dev, |
| "%s: LUN failure detected\n", h->devname); |
| break; |
| case REPORT_LUNS_CHANGED: |
| dev_warn(&h->pdev->dev, |
| "%s: report LUN data changed\n", h->devname); |
| /* |
| * Note: this REPORT_LUNS_CHANGED condition only occurs on the external |
| * target (array) devices. |
| */ |
| break; |
| case POWER_OR_RESET: |
| dev_warn(&h->pdev->dev, |
| "%s: a power on or device reset detected\n", |
| h->devname); |
| break; |
| case UNIT_ATTENTION_CLEARED: |
| dev_warn(&h->pdev->dev, |
| "%s: unit attention cleared by another initiator\n", |
| h->devname); |
| break; |
| default: |
| dev_warn(&h->pdev->dev, |
| "%s: unknown unit attention detected\n", |
| h->devname); |
| break; |
| } |
| return 1; |
| } |
| |
| static int check_for_busy(struct ctlr_info *h, struct CommandList *c) |
| { |
| if (c->err_info->CommandStatus != CMD_TARGET_STATUS || |
| (c->err_info->ScsiStatus != SAM_STAT_BUSY && |
| c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) |
| return 0; |
| dev_warn(&h->pdev->dev, HPSA "device busy"); |
| return 1; |
| } |
| |
| static u32 lockup_detected(struct ctlr_info *h); |
| static ssize_t host_show_lockup_detected(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| int ld; |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| ld = lockup_detected(h); |
| |
| return sprintf(buf, "ld=%d\n", ld); |
| } |
| |
| static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t count) |
| { |
| int status, len; |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| char tmpbuf[10]; |
| |
| if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
| return -EACCES; |
| len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; |
| strncpy(tmpbuf, buf, len); |
| tmpbuf[len] = '\0'; |
| if (sscanf(tmpbuf, "%d", &status) != 1) |
| return -EINVAL; |
| h = shost_to_hba(shost); |
| h->acciopath_status = !!status; |
| dev_warn(&h->pdev->dev, |
| "hpsa: HP SSD Smart Path %s via sysfs update.\n", |
| h->acciopath_status ? "enabled" : "disabled"); |
| return count; |
| } |
| |
| static ssize_t host_store_raid_offload_debug(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t count) |
| { |
| int debug_level, len; |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| char tmpbuf[10]; |
| |
| if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
| return -EACCES; |
| len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; |
| strncpy(tmpbuf, buf, len); |
| tmpbuf[len] = '\0'; |
| if (sscanf(tmpbuf, "%d", &debug_level) != 1) |
| return -EINVAL; |
| if (debug_level < 0) |
| debug_level = 0; |
| h = shost_to_hba(shost); |
| h->raid_offload_debug = debug_level; |
| dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", |
| h->raid_offload_debug); |
| return count; |
| } |
| |
| static ssize_t host_store_rescan(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t count) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| h = shost_to_hba(shost); |
| hpsa_scan_start(h->scsi_host); |
| return count; |
| } |
| |
| static ssize_t host_show_firmware_revision(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| unsigned char *fwrev; |
| |
| h = shost_to_hba(shost); |
| if (!h->hba_inquiry_data) |
| return 0; |
| fwrev = &h->hba_inquiry_data[32]; |
| return snprintf(buf, 20, "%c%c%c%c\n", |
| fwrev[0], fwrev[1], fwrev[2], fwrev[3]); |
| } |
| |
| static ssize_t host_show_commands_outstanding(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct Scsi_Host *shost = class_to_shost(dev); |
| struct ctlr_info *h = shost_to_hba(shost); |
| |
| return snprintf(buf, 20, "%d\n", |
| atomic_read(&h->commands_outstanding)); |
| } |
| |
| static ssize_t host_show_transport_mode(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| return snprintf(buf, 20, "%s\n", |
| h->transMethod & CFGTBL_Trans_Performant ? |
| "performant" : "simple"); |
| } |
| |
| static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| return snprintf(buf, 30, "HP SSD Smart Path %s\n", |
| (h->acciopath_status == 1) ? "enabled" : "disabled"); |
| } |
| |
| /* List of controllers which cannot be hard reset on kexec with reset_devices */ |
| static u32 unresettable_controller[] = { |
| 0x324a103C, /* Smart Array P712m */ |
| 0x324b103C, /* Smart Array P711m */ |
| 0x3223103C, /* Smart Array P800 */ |
| 0x3234103C, /* Smart Array P400 */ |
| 0x3235103C, /* Smart Array P400i */ |
| 0x3211103C, /* Smart Array E200i */ |
| 0x3212103C, /* Smart Array E200 */ |
| 0x3213103C, /* Smart Array E200i */ |
| 0x3214103C, /* Smart Array E200i */ |
| 0x3215103C, /* Smart Array E200i */ |
| 0x3237103C, /* Smart Array E500 */ |
| 0x323D103C, /* Smart Array P700m */ |
| 0x40800E11, /* Smart Array 5i */ |
| 0x409C0E11, /* Smart Array 6400 */ |
| 0x409D0E11, /* Smart Array 6400 EM */ |
| 0x40700E11, /* Smart Array 5300 */ |
| 0x40820E11, /* Smart Array 532 */ |
| 0x40830E11, /* Smart Array 5312 */ |
| 0x409A0E11, /* Smart Array 641 */ |
| 0x409B0E11, /* Smart Array 642 */ |
| 0x40910E11, /* Smart Array 6i */ |
| }; |
| |
| /* List of controllers which cannot even be soft reset */ |
| static u32 soft_unresettable_controller[] = { |
| 0x40800E11, /* Smart Array 5i */ |
| 0x40700E11, /* Smart Array 5300 */ |
| 0x40820E11, /* Smart Array 532 */ |
| 0x40830E11, /* Smart Array 5312 */ |
| 0x409A0E11, /* Smart Array 641 */ |
| 0x409B0E11, /* Smart Array 642 */ |
| 0x40910E11, /* Smart Array 6i */ |
| /* Exclude 640x boards. These are two pci devices in one slot |
| * which share a battery backed cache module. One controls the |
| * cache, the other accesses the cache through the one that controls |
| * it. If we reset the one controlling the cache, the other will |
| * likely not be happy. Just forbid resetting this conjoined mess. |
| * The 640x isn't really supported by hpsa anyway. |
| */ |
| 0x409C0E11, /* Smart Array 6400 */ |
| 0x409D0E11, /* Smart Array 6400 EM */ |
| }; |
| |
| static int board_id_in_array(u32 a[], int nelems, u32 board_id) |
| { |
| int i; |
| |
| for (i = 0; i < nelems; i++) |
| if (a[i] == board_id) |
| return 1; |
| return 0; |
| } |
| |
| static int ctlr_is_hard_resettable(u32 board_id) |
| { |
| return !board_id_in_array(unresettable_controller, |
| ARRAY_SIZE(unresettable_controller), board_id); |
| } |
| |
| static int ctlr_is_soft_resettable(u32 board_id) |
| { |
| return !board_id_in_array(soft_unresettable_controller, |
| ARRAY_SIZE(soft_unresettable_controller), board_id); |
| } |
| |
| static int ctlr_is_resettable(u32 board_id) |
| { |
| return ctlr_is_hard_resettable(board_id) || |
| ctlr_is_soft_resettable(board_id); |
| } |
| |
| static ssize_t host_show_resettable(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); |
| } |
| |
| static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) |
| { |
| return (scsi3addr[3] & 0xC0) == 0x40; |
| } |
| |
| static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", |
| "1(+0)ADM", "UNKNOWN", "PHYS DRV" |
| }; |
| #define HPSA_RAID_0 0 |
| #define HPSA_RAID_4 1 |
| #define HPSA_RAID_1 2 /* also used for RAID 10 */ |
| #define HPSA_RAID_5 3 /* also used for RAID 50 */ |
| #define HPSA_RAID_51 4 |
| #define HPSA_RAID_6 5 /* also used for RAID 60 */ |
| #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ |
| #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) |
| #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) |
| |
| static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) |
| { |
| return !device->physical_device; |
| } |
| |
| static ssize_t raid_level_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| ssize_t l = 0; |
| unsigned char rlevel; |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->lock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| return -ENODEV; |
| } |
| |
| /* Is this even a logical drive? */ |
| if (!is_logical_device(hdev)) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| l = snprintf(buf, PAGE_SIZE, "N/A\n"); |
| return l; |
| } |
| |
| rlevel = hdev->raid_level; |
| spin_unlock_irqrestore(&h->lock, flags); |
| if (rlevel > RAID_UNKNOWN) |
| rlevel = RAID_UNKNOWN; |
| l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); |
| return l; |
| } |
| |
| static ssize_t lunid_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| unsigned char lunid[8]; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->lock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| return -ENODEV; |
| } |
| memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); |
| spin_unlock_irqrestore(&h->lock, flags); |
| return snprintf(buf, 20, "0x%8phN\n", lunid); |
| } |
| |
| static ssize_t unique_id_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| unsigned char sn[16]; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->lock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| return -ENODEV; |
| } |
| memcpy(sn, hdev->device_id, sizeof(sn)); |
| spin_unlock_irqrestore(&h->lock, flags); |
| return snprintf(buf, 16 * 2 + 2, |
| "%02X%02X%02X%02X%02X%02X%02X%02X" |
| "%02X%02X%02X%02X%02X%02X%02X%02X\n", |
| sn[0], sn[1], sn[2], sn[3], |
| sn[4], sn[5], sn[6], sn[7], |
| sn[8], sn[9], sn[10], sn[11], |
| sn[12], sn[13], sn[14], sn[15]); |
| } |
| |
| static ssize_t sas_address_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| u64 sas_address; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->lock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| return -ENODEV; |
| } |
| sas_address = hdev->sas_address; |
| spin_unlock_irqrestore(&h->lock, flags); |
| |
| return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); |
| } |
| |
| static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| int offload_enabled; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->lock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev) { |
| spin_unlock_irqrestore(&h->lock, flags); |
| return -ENODEV; |
| } |
| offload_enabled = hdev->offload_enabled; |
| spin_unlock_irqrestore(&h->lock, flags); |
| return snprintf(buf, 20, "%d\n", offload_enabled); |
| } |
| |
| #define MAX_PATHS 8 |
| static ssize_t path_info_show(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct scsi_device *sdev; |
| struct hpsa_scsi_dev_t *hdev; |
| unsigned long flags; |
| int i; |
| int output_len = 0; |
| u8 box; |
| u8 bay; |
| u8 path_map_index = 0; |
| char *active; |
| unsigned char phys_connector[2]; |
| |
| sdev = to_scsi_device(dev); |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->devlock, flags); |
| hdev = sdev->hostdata; |
| if (!hdev) { |
| spin_unlock_irqrestore(&h->devlock, flags); |
| return -ENODEV; |
| } |
| |
| bay = hdev->bay; |
| for (i = 0; i < MAX_PATHS; i++) { |
| path_map_index = 1<<i; |
| if (i == hdev->active_path_index) |
| active = "Active"; |
| else if (hdev->path_map & path_map_index) |
| active = "Inactive"; |
| else |
| continue; |
| |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, |
| "[%d:%d:%d:%d] %20.20s ", |
| h->scsi_host->host_no, |
| hdev->bus, hdev->target, hdev->lun, |
| scsi_device_type(hdev->devtype)); |
| |
| if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, |
| "%s\n", active); |
| continue; |
| } |
| |
| box = hdev->box[i]; |
| memcpy(&phys_connector, &hdev->phys_connector[i], |
| sizeof(phys_connector)); |
| if (phys_connector[0] < '0') |
| phys_connector[0] = '0'; |
| if (phys_connector[1] < '0') |
| phys_connector[1] = '0'; |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, |
| "PORT: %.2s ", |
| phys_connector); |
| if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && |
| hdev->expose_device) { |
| if (box == 0 || box == 0xFF) { |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, |
| "BAY: %hhu %s\n", |
| bay, active); |
| } else { |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, |
| "BOX: %hhu BAY: %hhu %s\n", |
| box, bay, active); |
| } |
| } else if (box != 0 && box != 0xFF) { |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, "BOX: %hhu %s\n", |
| box, active); |
| } else |
| output_len += scnprintf(buf + output_len, |
| PAGE_SIZE - output_len, "%s\n", active); |
| } |
| |
| spin_unlock_irqrestore(&h->devlock, flags); |
| return output_len; |
| } |
| |
| static ssize_t host_show_ctlr_num(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| return snprintf(buf, 20, "%d\n", h->ctlr); |
| } |
| |
| static ssize_t host_show_legacy_board(struct device *dev, |
| struct device_attribute *attr, char *buf) |
| { |
| struct ctlr_info *h; |
| struct Scsi_Host *shost = class_to_shost(dev); |
| |
| h = shost_to_hba(shost); |
| return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); |
| } |
| |
| static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
| static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); |
| static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); |
| static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); |
| static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL); |
| static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, |
| host_show_hp_ssd_smart_path_enabled, NULL); |
| static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); |
| static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, |
| host_show_hp_ssd_smart_path_status, |
| host_store_hp_ssd_smart_path_status); |
| static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, |
| host_store_raid_offload_debug); |
| static DEVICE_ATTR(firmware_revision, S_IRUGO, |
| host_show_firmware_revision, NULL); |
| static DEVICE_ATTR(commands_outstanding, S_IRUGO, |
| host_show_commands_outstanding, NULL); |
| static DEVICE_ATTR(transport_mode, S_IRUGO, |
| host_show_transport_mode, NULL); |
| static DEVICE_ATTR(resettable, S_IRUGO, |
| host_show_resettable, NULL); |
| static DEVICE_ATTR(lockup_detected, S_IRUGO, |
| host_show_lockup_detected, NULL); |
| static DEVICE_ATTR(ctlr_num, S_IRUGO, |
| host_show_ctlr_num, NULL); |
| static DEVICE_ATTR(legacy_board, S_IRUGO, |
| host_show_legacy_board, NULL); |
| |
| static struct device_attribute *hpsa_sdev_attrs[] = { |
| &dev_attr_raid_level, |
| &dev_attr_lunid, |
| &dev_attr_unique_id, |
| &dev_attr_hp_ssd_smart_path_enabled, |
| &dev_attr_path_info, |
| &dev_attr_sas_address, |
| NULL, |
| }; |
| |
| static struct device_attribute *hpsa_shost_attrs[] = { |
| &dev_attr_rescan, |
| &dev_attr_firmware_revision, |
| &dev_attr_commands_outstanding, |
| &dev_attr_transport_mode, |
| &dev_attr_resettable, |
| &dev_attr_hp_ssd_smart_path_status, |
| &dev_attr_raid_offload_debug, |
| &dev_attr_lockup_detected, |
| &dev_attr_ctlr_num, |
| &dev_attr_legacy_board, |
| NULL, |
| }; |
| |
| #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ |
| HPSA_MAX_CONCURRENT_PASSTHRUS) |
| |
| static struct scsi_host_template hpsa_driver_template = { |
| .module = THIS_MODULE, |
| .name = HPSA, |
| .proc_name = HPSA, |
| .queuecommand = hpsa_scsi_queue_command, |
| .scan_start = hpsa_scan_start, |
| .scan_finished = hpsa_scan_finished, |
| .change_queue_depth = hpsa_change_queue_depth, |
| .this_id = -1, |
| .use_clustering = ENABLE_CLUSTERING, |
| .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
| .ioctl = hpsa_ioctl, |
| .slave_alloc = hpsa_slave_alloc, |
| .slave_configure = hpsa_slave_configure, |
| .slave_destroy = hpsa_slave_destroy, |
| #ifdef CONFIG_COMPAT |
| .compat_ioctl = hpsa_compat_ioctl, |
| #endif |
| .sdev_attrs = hpsa_sdev_attrs, |
| .shost_attrs = hpsa_shost_attrs, |
| .max_sectors = 1024, |
| .no_write_same = 1, |
| }; |
| |
| static inline u32 next_command(struct ctlr_info *h, u8 q) |
| { |
| u32 a; |
| struct reply_queue_buffer *rq = &h->reply_queue[q]; |
| |
| if (h->transMethod & CFGTBL_Trans_io_accel1) |
| return h->access.command_completed(h, q); |
| |
| if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
| return h->access.command_completed(h, q); |
| |
| if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { |
| a = rq->head[rq->current_entry]; |
| rq->current_entry++; |
| atomic_dec(&h->commands_outstanding); |
| } else { |
| a = FIFO_EMPTY; |
| } |
| /* Check for wraparound */ |
| if (rq->current_entry == h->max_commands) { |
| rq->current_entry = 0; |
| rq->wraparound ^= 1; |
| } |
| return a; |
| } |
| |
| /* |
| * There are some special bits in the bus address of the |
| * command that we have to set for the controller to know |
| * how to process the command: |
| * |
| * Normal performant mode: |
| * bit 0: 1 means performant mode, 0 means simple mode. |
| * bits 1-3 = block fetch table entry |
| * bits 4-6 = command type (== 0) |
| * |
| * ioaccel1 mode: |
| * bit 0 = "performant mode" bit. |
| * bits 1-3 = block fetch table entry |
| * bits 4-6 = command type (== 110) |
| * (command type is needed because ioaccel1 mode |
| * commands are submitted through the same register as normal |
| * mode commands, so this is how the controller knows whether |
| * the command is normal mode or ioaccel1 mode.) |
| * |
| * ioaccel2 mode: |
| * bit 0 = "performant mode" bit. |
| * bits 1-4 = block fetch table entry (note extra bit) |
| * bits 4-6 = not needed, because ioaccel2 mode has |
| * a separate special register for submitting commands. |
| */ |
| |
| /* |
| * set_performant_mode: Modify the tag for cciss performant |
| * set bit 0 for pull model, bits 3-1 for block fetch |
| * register number |
| */ |
| #define DEFAULT_REPLY_QUEUE (-1) |
| static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, |
| int reply_queue) |
| { |
| if (likely(h->transMethod & CFGTBL_Trans_Performant)) { |
| c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
| if (unlikely(!h->msix_vectors)) |
| return; |
| if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
| c->Header.ReplyQueue = |
| raw_smp_processor_id() % h->nreply_queues; |
| else |
| c->Header.ReplyQueue = reply_queue % h->nreply_queues; |
| } |
| } |
| |
| static void set_ioaccel1_performant_mode(struct ctlr_info *h, |
| struct CommandList *c, |
| int reply_queue) |
| { |
| struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
| |
| /* |
| * Tell the controller to post the reply to the queue for this |
| * processor. This seems to give the best I/O throughput. |
| */ |
| if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
| cp->ReplyQueue = smp_processor_id() % h->nreply_queues; |
| else |
| cp->ReplyQueue = reply_queue % h->nreply_queues; |
| /* |
| * Set the bits in the address sent down to include: |
| * - performant mode bit (bit 0) |
| * - pull count (bits 1-3) |
| * - command type (bits 4-6) |
| */ |
| c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | |
| IOACCEL1_BUSADDR_CMDTYPE; |
| } |
| |
| static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, |
| struct CommandList *c, |
| int reply_queue) |
| { |
| struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) |
| &h->ioaccel2_cmd_pool[c->cmdindex]; |
| |
| /* Tell the controller to post the reply to the queue for this |
| * processor. This seems to give the best I/O throughput. |
| */ |
| if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
| cp->reply_queue = smp_processor_id() % h->nreply_queues; |
| else |
| cp->reply_queue = reply_queue % h->nreply_queues; |
| /* Set the bits in the address sent down to include: |
| * - performant mode bit not used in ioaccel mode 2 |
| * - pull count (bits 0-3) |
| * - command type isn't needed for ioaccel2 |
| */ |
| c->busaddr |= h->ioaccel2_blockFetchTable[0]; |
| } |
| |
| static void set_ioaccel2_performant_mode(struct ctlr_info *h, |
| struct CommandList *c, |
| int reply_queue) |
| { |
| struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; |
| |
| /* |
| * Tell the controller to post the reply to the queue for this |
| * processor. This seems to give the best I/O throughput. |
| */ |
| if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
| cp->reply_queue = smp_processor_id() % h->nreply_queues; |
| else |
| cp->reply_queue = reply_queue % h->nreply_queues; |
| /* |
| * Set the bits in the address sent down to include: |
| * - performant mode bit not used in ioaccel mode 2 |
| * - pull count (bits 0-3) |
| * - command type isn't needed for ioaccel2 |
| */ |
| c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); |
| } |
| |
| static int is_firmware_flash_cmd(u8 *cdb) |
| { |
| return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; |
| } |
| |
| /* |
| * During firmware flash, the heartbeat register may not update as frequently |
| * as it should. So we dial down lockup detection during firmware flash. and |
| * dial it back up when firmware flash completes. |
| */ |
| #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) |
| #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) |
| #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) |
| static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| if (!is_firmware_flash_cmd(c->Request.CDB)) |
| return; |
| atomic_inc(&h->firmware_flash_in_progress); |
| h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; |
| } |
| |
| static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| if (is_firmware_flash_cmd(c->Request.CDB) && |
| atomic_dec_and_test(&h->firmware_flash_in_progress)) |
| h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; |
| } |
| |
| static void __enqueue_cmd_and_start_io(struct ctlr_info *h, |
| struct CommandList *c, int reply_queue) |
| { |
| dial_down_lockup_detection_during_fw_flash(h, c); |
| atomic_inc(&h->commands_outstanding); |
| switch (c->cmd_type) { |
| case CMD_IOACCEL1: |
| set_ioaccel1_performant_mode(h, c, reply_queue); |
| writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
| break; |
| case CMD_IOACCEL2: |
| set_ioaccel2_performant_mode(h, c, reply_queue); |
| writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); |
| break; |
| case IOACCEL2_TMF: |
| set_ioaccel2_tmf_performant_mode(h, c, reply_queue); |
| writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); |
| break; |
| default: |
| set_performant_mode(h, c, reply_queue); |
| h->access.submit_command(h, c); |
| } |
| } |
| |
| static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) |
| { |
| if (unlikely(hpsa_is_pending_event(c))) |
| return finish_cmd(c); |
| |
| __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); |
| } |
| |
| static inline int is_hba_lunid(unsigned char scsi3addr[]) |
| { |
| return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; |
| } |
| |
| static inline int is_scsi_rev_5(struct ctlr_info *h) |
| { |
| if (!h->hba_inquiry_data) |
| return 0; |
| if ((h->hba_inquiry_data[2] & 0x07) == 5) |
| return 1; |
| return 0; |
| } |
| |
| static int hpsa_find_target_lun(struct ctlr_info *h, |
| unsigned char scsi3addr[], int bus, int *target, int *lun) |
| { |
| /* finds an unused bus, target, lun for a new physical device |
| * assumes h->devlock is held |
| */ |
| int i, found = 0; |
| DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); |
| |
| bitmap_zero(lun_taken, HPSA_MAX_DEVICES); |
| |
| for (i = 0; i < h->ndevices; i++) { |
| if (h->dev[i]->bus == bus && h->dev[i]->target != -1) |
| __set_bit(h->dev[i]->target, lun_taken); |
| } |
| |
| i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); |
| if (i < HPSA_MAX_DEVICES) { |
| /* *bus = 1; */ |
| *target = i; |
| *lun = 0; |
| found = 1; |
| } |
| return !found; |
| } |
| |
| static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *dev, char *description) |
| { |
| #define LABEL_SIZE 25 |
| char label[LABEL_SIZE]; |
| |
| if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) |
| return; |
| |
| switch (dev->devtype) { |
| case TYPE_RAID: |
| snprintf(label, LABEL_SIZE, "controller"); |
| break; |
| case TYPE_ENCLOSURE: |
| snprintf(label, LABEL_SIZE, "enclosure"); |
| break; |
| case TYPE_DISK: |
| case TYPE_ZBC: |
| if (dev->external) |
| snprintf(label, LABEL_SIZE, "external"); |
| else if (!is_logical_dev_addr_mode(dev->scsi3addr)) |
| snprintf(label, LABEL_SIZE, "%s", |
| raid_label[PHYSICAL_DRIVE]); |
| else |
| snprintf(label, LABEL_SIZE, "RAID-%s", |
| dev->raid_level > RAID_UNKNOWN ? "?" : |
| raid_label[dev->raid_level]); |
| break; |
| case TYPE_ROM: |
| snprintf(label, LABEL_SIZE, "rom"); |
| break; |
| case TYPE_TAPE: |
| snprintf(label, LABEL_SIZE, "tape"); |
| break; |
| case TYPE_MEDIUM_CHANGER: |
| snprintf(label, LABEL_SIZE, "changer"); |
| break; |
| default: |
| snprintf(label, LABEL_SIZE, "UNKNOWN"); |
| break; |
| } |
| |
| dev_printk(level, &h->pdev->dev, |
| "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", |
| h->scsi_host->host_no, dev->bus, dev->target, dev->lun, |
| description, |
| scsi_device_type(dev->devtype), |
| dev->vendor, |
| dev->model, |
| label, |
| dev->offload_config ? '+' : '-', |
| dev->offload_enabled ? '+' : '-', |
| dev->expose_device); |
| } |
| |
| /* Add an entry into h->dev[] array. */ |
| static int hpsa_scsi_add_entry(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *device, |
| struct hpsa_scsi_dev_t *added[], int *nadded) |
| { |
| /* assumes h->devlock is held */ |
| int n = h->ndevices; |
| int i; |
| unsigned char addr1[8], addr2[8]; |
| struct hpsa_scsi_dev_t *sd; |
| |
| if (n >= HPSA_MAX_DEVICES) { |
| dev_err(&h->pdev->dev, "too many devices, some will be " |
| "inaccessible.\n"); |
| return -1; |
| } |
| |
| /* physical devices do not have lun or target assigned until now. */ |
| if (device->lun != -1) |
| /* Logical device, lun is already assigned. */ |
| goto lun_assigned; |
| |
| /* If this device a non-zero lun of a multi-lun device |
| * byte 4 of the 8-byte LUN addr will contain the logical |
| * unit no, zero otherwise. |
| */ |
| if (device->scsi3addr[4] == 0) { |
| /* This is not a non-zero lun of a multi-lun device */ |
| if (hpsa_find_target_lun(h, device->scsi3addr, |
| device->bus, &device->target, &device->lun) != 0) |
| return -1; |
| goto lun_assigned; |
| } |
| |
| /* This is a non-zero lun of a multi-lun device. |
| * Search through our list and find the device which |
| * has the same 8 byte LUN address, excepting byte 4 and 5. |
| * Assign the same bus and target for this new LUN. |
| * Use the logical unit number from the firmware. |
| */ |
| memcpy(addr1, device->scsi3addr, 8); |
| addr1[4] = 0; |
| addr1[5] = 0; |
| for (i = 0; i < n; i++) { |
| sd = h->dev[i]; |
| memcpy(addr2, sd->scsi3addr, 8); |
| addr2[4] = 0; |
| addr2[5] = 0; |
| /* differ only in byte 4 and 5? */ |
| if (memcmp(addr1, addr2, 8) == 0) { |
| device->bus = sd->bus; |
| device->target = sd->target; |
| device->lun = device->scsi3addr[4]; |
| break; |
| } |
| } |
| if (device->lun == -1) { |
| dev_warn(&h->pdev->dev, "physical device with no LUN=0," |
| " suspect firmware bug or unsupported hardware " |
| "configuration.\n"); |
| return -1; |
| } |
| |
| lun_assigned: |
| |
| h->dev[n] = device; |
| h->ndevices++; |
| added[*nadded] = device; |
| (*nadded)++; |
| hpsa_show_dev_msg(KERN_INFO, h, device, |
| device->expose_device ? "added" : "masked"); |
| device->offload_to_be_enabled = device->offload_enabled; |
| device->offload_enabled = 0; |
| return 0; |
| } |
| |
| /* Update an entry in h->dev[] array. */ |
| static void hpsa_scsi_update_entry(struct ctlr_info *h, |
| int entry, struct hpsa_scsi_dev_t *new_entry) |
| { |
| int offload_enabled; |
| /* assumes h->devlock is held */ |
| BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
| |
| /* Raid level changed. */ |
| h->dev[entry]->raid_level = new_entry->raid_level; |
| |
| /* Raid offload parameters changed. Careful about the ordering. */ |
| if (new_entry->offload_config && new_entry->offload_enabled) { |
| /* |
| * if drive is newly offload_enabled, we want to copy the |
| * raid map data first. If previously offload_enabled and |
| * offload_config were set, raid map data had better be |
| * the same as it was before. if raid map data is changed |
| * then it had better be the case that |
| * h->dev[entry]->offload_enabled is currently 0. |
| */ |
| h->dev[entry]->raid_map = new_entry->raid_map; |
| h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; |
| } |
| if (new_entry->hba_ioaccel_enabled) { |
| h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; |
| wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ |
| } |
| h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; |
| h->dev[entry]->offload_config = new_entry->offload_config; |
| h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; |
| h->dev[entry]->queue_depth = new_entry->queue_depth; |
| |
| /* |
| * We can turn off ioaccel offload now, but need to delay turning |
| * it on until we can update h->dev[entry]->phys_disk[], but we |
| * can't do that until all the devices are updated. |
| */ |
| h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; |
| if (!new_entry->offload_enabled) |
| h->dev[entry]->offload_enabled = 0; |
| |
| offload_enabled = h->dev[entry]->offload_enabled; |
| h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; |
| hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); |
| h->dev[entry]->offload_enabled = offload_enabled; |
| } |
| |
| /* Replace an entry from h->dev[] array. */ |
| static void hpsa_scsi_replace_entry(struct ctlr_info *h, |
| int entry, struct hpsa_scsi_dev_t *new_entry, |
| struct hpsa_scsi_dev_t *added[], int *nadded, |
| struct hpsa_scsi_dev_t *removed[], int *nremoved) |
| { |
| /* assumes h->devlock is held */ |
| BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
| removed[*nremoved] = h->dev[entry]; |
| (*nremoved)++; |
| |
| /* |
| * New physical devices won't have target/lun assigned yet |
| * so we need to preserve the values in the slot we are replacing. |
| */ |
| if (new_entry->target == -1) { |
| new_entry->target = h->dev[entry]->target; |
| new_entry->lun = h->dev[entry]->lun; |
| } |
| |
| h->dev[entry] = new_entry; |
| added[*nadded] = new_entry; |
| (*nadded)++; |
| hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); |
| new_entry->offload_to_be_enabled = new_entry->offload_enabled; |
| new_entry->offload_enabled = 0; |
| } |
| |
| /* Remove an entry from h->dev[] array. */ |
| static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, |
| struct hpsa_scsi_dev_t *removed[], int *nremoved) |
| { |
| /* assumes h->devlock is held */ |
| int i; |
| struct hpsa_scsi_dev_t *sd; |
| |
| BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
| |
| sd = h->dev[entry]; |
| removed[*nremoved] = h->dev[entry]; |
| (*nremoved)++; |
| |
| for (i = entry; i < h->ndevices-1; i++) |
| h->dev[i] = h->dev[i+1]; |
| h->ndevices--; |
| hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); |
| } |
| |
| #define SCSI3ADDR_EQ(a, b) ( \ |
| (a)[7] == (b)[7] && \ |
| (a)[6] == (b)[6] && \ |
| (a)[5] == (b)[5] && \ |
| (a)[4] == (b)[4] && \ |
| (a)[3] == (b)[3] && \ |
| (a)[2] == (b)[2] && \ |
| (a)[1] == (b)[1] && \ |
| (a)[0] == (b)[0]) |
| |
| static void fixup_botched_add(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *added) |
| { |
| /* called when scsi_add_device fails in order to re-adjust |
| * h->dev[] to match the mid layer's view. |
| */ |
| unsigned long flags; |
| int i, j; |
| |
| spin_lock_irqsave(&h->lock, flags); |
| for (i = 0; i < h->ndevices; i++) { |
| if (h->dev[i] == added) { |
| for (j = i; j < h->ndevices-1; j++) |
| h->dev[j] = h->dev[j+1]; |
| h->ndevices--; |
| break; |
| } |
| } |
| spin_unlock_irqrestore(&h->lock, flags); |
| kfree(added); |
| } |
| |
| static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, |
| struct hpsa_scsi_dev_t *dev2) |
| { |
| /* we compare everything except lun and target as these |
| * are not yet assigned. Compare parts likely |
| * to differ first |
| */ |
| if (memcmp(dev1->scsi3addr, dev2->scsi3addr, |
| sizeof(dev1->scsi3addr)) != 0) |
| return 0; |
| if (memcmp(dev1->device_id, dev2->device_id, |
| sizeof(dev1->device_id)) != 0) |
| return 0; |
| if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) |
| return 0; |
| if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) |
| return 0; |
| if (dev1->devtype != dev2->devtype) |
| return 0; |
| if (dev1->bus != dev2->bus) |
| return 0; |
| return 1; |
| } |
| |
| static inline int device_updated(struct hpsa_scsi_dev_t *dev1, |
| struct hpsa_scsi_dev_t *dev2) |
| { |
| /* Device attributes that can change, but don't mean |
| * that the device is a different device, nor that the OS |
| * needs to be told anything about the change. |
| */ |
| if (dev1->raid_level != dev2->raid_level) |
| return 1; |
| if (dev1->offload_config != dev2->offload_config) |
| return 1; |
| if (dev1->offload_enabled != dev2->offload_enabled) |
| return 1; |
| if (!is_logical_dev_addr_mode(dev1->scsi3addr)) |
| if (dev1->queue_depth != dev2->queue_depth) |
| return 1; |
| return 0; |
| } |
| |
| /* Find needle in haystack. If exact match found, return DEVICE_SAME, |
| * and return needle location in *index. If scsi3addr matches, but not |
| * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle |
| * location in *index. |
| * In the case of a minor device attribute change, such as RAID level, just |
| * return DEVICE_UPDATED, along with the updated device's location in index. |
| * If needle not found, return DEVICE_NOT_FOUND. |
| */ |
| static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, |
| struct hpsa_scsi_dev_t *haystack[], int haystack_size, |
| int *index) |
| { |
| int i; |
| #define DEVICE_NOT_FOUND 0 |
| #define DEVICE_CHANGED 1 |
| #define DEVICE_SAME 2 |
| #define DEVICE_UPDATED 3 |
| if (needle == NULL) |
| return DEVICE_NOT_FOUND; |
| |
| for (i = 0; i < haystack_size; i++) { |
| if (haystack[i] == NULL) /* previously removed. */ |
| continue; |
| if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
| *index = i; |
| if (device_is_the_same(needle, haystack[i])) { |
| if (device_updated(needle, haystack[i])) |
| return DEVICE_UPDATED; |
| return DEVICE_SAME; |
| } else { |
| /* Keep offline devices offline */ |
| if (needle->volume_offline) |
| return DEVICE_NOT_FOUND; |
| return DEVICE_CHANGED; |
| } |
| } |
| } |
| *index = -1; |
| return DEVICE_NOT_FOUND; |
| } |
| |
| static void hpsa_monitor_offline_device(struct ctlr_info *h, |
| unsigned char scsi3addr[]) |
| { |
| struct offline_device_entry *device; |
| unsigned long flags; |
| |
| /* Check to see if device is already on the list */ |
| spin_lock_irqsave(&h->offline_device_lock, flags); |
| list_for_each_entry(device, &h->offline_device_list, offline_list) { |
| if (memcmp(device->scsi3addr, scsi3addr, |
| sizeof(device->scsi3addr)) == 0) { |
| spin_unlock_irqrestore(&h->offline_device_lock, flags); |
| return; |
| } |
| } |
| spin_unlock_irqrestore(&h->offline_device_lock, flags); |
| |
| /* Device is not on the list, add it. */ |
| device = kmalloc(sizeof(*device), GFP_KERNEL); |
| if (!device) |
| return; |
| |
| memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); |
| spin_lock_irqsave(&h->offline_device_lock, flags); |
| list_add_tail(&device->offline_list, &h->offline_device_list); |
| spin_unlock_irqrestore(&h->offline_device_lock, flags); |
| } |
| |
| /* Print a message explaining various offline volume states */ |
| static void hpsa_show_volume_status(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *sd) |
| { |
| if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| switch (sd->volume_offline) { |
| case HPSA_LV_OK: |
| break; |
| case HPSA_LV_UNDERGOING_ERASE: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_NOT_AVAILABLE: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_UNDERGOING_RPI: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_PENDING_RPI: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_ENCRYPTED_NO_KEY: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_UNDERGOING_ENCRYPTION: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_PENDING_ENCRYPTION: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| case HPSA_LV_PENDING_ENCRYPTION_REKEYING: |
| dev_info(&h->pdev->dev, |
| "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", |
| h->scsi_host->host_no, |
| sd->bus, sd->target, sd->lun); |
| break; |
| } |
| } |
| |
| /* |
| * Figure the list of physical drive pointers for a logical drive with |
| * raid offload configured. |
| */ |
| static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *dev[], int ndevices, |
| struct hpsa_scsi_dev_t *logical_drive) |
| { |
| struct raid_map_data *map = &logical_drive->raid_map; |
| struct raid_map_disk_data *dd = &map->data[0]; |
| int i, j; |
| int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
| le16_to_cpu(map->metadata_disks_per_row); |
| int nraid_map_entries = le16_to_cpu(map->row_cnt) * |
| le16_to_cpu(map->layout_map_count) * |
| total_disks_per_row; |
| int nphys_disk = le16_to_cpu(map->layout_map_count) * |
| total_disks_per_row; |
| int qdepth; |
| |
| if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) |
| nraid_map_entries = RAID_MAP_MAX_ENTRIES; |
| |
| logical_drive->nphysical_disks = nraid_map_entries; |
| |
| qdepth = 0; |
| for (i = 0; i < nraid_map_entries; i++) { |
| logical_drive->phys_disk[i] = NULL; |
| if (!logical_drive->offload_config) |
| continue; |
| for (j = 0; j < ndevices; j++) { |
| if (dev[j] == NULL) |
| continue; |
| if (dev[j]->devtype != TYPE_DISK && |
| dev[j]->devtype != TYPE_ZBC) |
| continue; |
| if (is_logical_device(dev[j])) |
| continue; |
| if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) |
| continue; |
| |
| logical_drive->phys_disk[i] = dev[j]; |
| if (i < nphys_disk) |
| qdepth = min(h->nr_cmds, qdepth + |
| logical_drive->phys_disk[i]->queue_depth); |
| break; |
| } |
| |
| /* |
| * This can happen if a physical drive is removed and |
| * the logical drive is degraded. In that case, the RAID |
| * map data will refer to a physical disk which isn't actually |
| * present. And in that case offload_enabled should already |
| * be 0, but we'll turn it off here just in case |
| */ |
| if (!logical_drive->phys_disk[i]) { |
| logical_drive->offload_enabled = 0; |
| logical_drive->offload_to_be_enabled = 0; |
| logical_drive->queue_depth = 8; |
| } |
| } |
| if (nraid_map_entries) |
| /* |
| * This is correct for reads, too high for full stripe writes, |
| * way too high for partial stripe writes |
| */ |
| logical_drive->queue_depth = qdepth; |
| else |
| logical_drive->queue_depth = h->nr_cmds; |
| } |
| |
| static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *dev[], int ndevices) |
| { |
| int i; |
| |
| for (i = 0; i < ndevices; i++) { |
| if (dev[i] == NULL) |
| continue; |
| if (dev[i]->devtype != TYPE_DISK && |
| dev[i]->devtype != TYPE_ZBC) |
| continue; |
| if (!is_logical_device(dev[i])) |
| continue; |
| |
| /* |
| * If offload is currently enabled, the RAID map and |
| * phys_disk[] assignment *better* not be changing |
| * and since it isn't changing, we do not need to |
| * update it. |
| */ |
| if (dev[i]->offload_enabled) |
| continue; |
| |
| hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); |
| } |
| } |
| |
| static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) |
| { |
| int rc = 0; |
| |
| if (!h->scsi_host) |
| return 1; |
| |
| if (is_logical_device(device)) /* RAID */ |
| rc = scsi_add_device(h->scsi_host, device->bus, |
| device->target, device->lun); |
| else /* HBA */ |
| rc = hpsa_add_sas_device(h->sas_host, device); |
| |
| return rc; |
| } |
| |
| static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *dev) |
| { |
| int i; |
| int count = 0; |
| |
| for (i = 0; i < h->nr_cmds; i++) { |
| struct CommandList *c = h->cmd_pool + i; |
| int refcount = atomic_inc_return(&c->refcount); |
| |
| if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, |
| dev->scsi3addr)) { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&h->lock, flags); /* Implied MB */ |
| if (!hpsa_is_cmd_idle(c)) |
| ++count; |
| spin_unlock_irqrestore(&h->lock, flags); |
| } |
| |
| cmd_free(h, c); |
| } |
| |
| return count; |
| } |
| |
| static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *device) |
| { |
| int cmds = 0; |
| int waits = 0; |
| |
| while (1) { |
| cmds = hpsa_find_outstanding_commands_for_dev(h, device); |
| if (cmds == 0) |
| break; |
| if (++waits > 20) |
| break; |
| dev_warn(&h->pdev->dev, |
| "%s: removing device with %d outstanding commands!\n", |
| __func__, cmds); |
| msleep(1000); |
| } |
| } |
| |
| static void hpsa_remove_device(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *device) |
| { |
| struct scsi_device *sdev = NULL; |
| |
| if (!h->scsi_host) |
| return; |
| |
| if (is_logical_device(device)) { /* RAID */ |
| sdev = scsi_device_lookup(h->scsi_host, device->bus, |
| device->target, device->lun); |
| if (sdev) { |
| scsi_remove_device(sdev); |
| scsi_device_put(sdev); |
| } else { |
| /* |
| * We don't expect to get here. Future commands |
| * to this device will get a selection timeout as |
| * if the device were gone. |
| */ |
| hpsa_show_dev_msg(KERN_WARNING, h, device, |
| "didn't find device for removal."); |
| } |
| } else { /* HBA */ |
| |
| device->removed = 1; |
| hpsa_wait_for_outstanding_commands_for_dev(h, device); |
| |
| hpsa_remove_sas_device(device); |
| } |
| } |
| |
| static void adjust_hpsa_scsi_table(struct ctlr_info *h, |
| struct hpsa_scsi_dev_t *sd[], int nsds) |
| { |
| /* sd contains scsi3 addresses and devtypes, and inquiry |
| * data. This function takes what's in sd to be the current |
| * reality and updates h->dev[] to reflect that reality. |
| */ |
| int i, entry, device_change, changes = 0; |
| struct hpsa_scsi_dev_t *csd; |
| unsigned long flags; |
| struct hpsa_scsi_dev_t **added, **removed; |
| int nadded, nremoved; |
| |
| /* |
| * A reset can cause a device status to change |
| * re-schedule the scan to see what happened. |
| */ |
| spin_lock_irqsave(&h->reset_lock, flags); |
| if (h->reset_in_progress) { |
| h->drv_req_rescan = 1; |
| spin_unlock_irqrestore(&h->reset_lock, flags); |
| return; |
| } |
| spin_unlock_irqrestore(&h->reset_lock, flags); |
| |
| added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| |
| if (!added || !removed) { |
| dev_warn(&h->pdev->dev, "out of memory in " |
| "adjust_hpsa_scsi_table\n"); |
| goto free_and_out; |
| } |
| |
| spin_lock_irqsave(&h->devlock, flags); |
| |
| /* find any devices in h->dev[] that are not in |
| * sd[] and remove them from h->dev[], and for any |
| * devices which have changed, remove the old device |
| * info and add the new device info. |
| * If minor device attributes change, just update |
| * the existing device structure. |
| */ |
| i = 0; |
| nremoved = 0; |
| nadded = 0; |
| while (i < h->ndevices) { |
| csd = h->dev[i]; |
| device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); |
| if (device_change == DEVICE_NOT_FOUND) { |
| changes++; |
| hpsa_scsi_remove_entry(h, i, removed, &nremoved); |
| continue; /* remove ^^^, hence i not incremented */ |
| } else if (device_change == DEVICE_CHANGED) { |
| changes++; |
| hpsa_scsi_replace_entry(h, i, sd[entry], |
| added, &nadded, removed, &nremoved); |
| /* Set it to NULL to prevent it from being freed |
| * at the bottom of hpsa_update_scsi_devices() |
| */ |
| sd[entry] = NULL; |
| } else if (device_change == DEVICE_UPDATED) { |
| hpsa_scsi_update_entry(h, i, sd[entry]); |
| } |
| i++; |
| } |
| |
| /* Now, make sure every device listed in sd[] is also |
| * listed in h->dev[], adding them if they aren't found |
| */ |
| |
| for (i = 0; i < nsds; i++) { |
| if (!sd[i]) /* if already added above. */ |
| continue; |
| |
| /* Don't add devices which are NOT READY, FORMAT IN PROGRESS |
| * as the SCSI mid-layer does not handle such devices well. |
| * It relentlessly loops sending TUR at 3Hz, then READ(10) |
| * at 160Hz, and prevents the system from coming up. |
| */ |
| if (sd[i]->volume_offline) { |
| hpsa_show_volume_status(h, sd[i]); |
| hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); |
| continue; |
| } |
| |
| device_change = hpsa_scsi_find_entry(sd[i], h->dev, |
| h->ndevices, &entry); |
| if (device_change == DEVICE_NOT_FOUND) { |
| changes++; |
| if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) |
| break; |
| sd[i] = NULL; /* prevent from being freed later. */ |
| } else if (device_change == DEVICE_CHANGED) { |
| /* should never happen... */ |
| changes++; |
| dev_warn(&h->pdev->dev, |
| "device unexpectedly changed.\n"); |
| /* but if it does happen, we just ignore that device */ |
| } |
| } |
| hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); |
| |
| /* Now that h->dev[]->phys_disk[] is coherent, we can enable |
| * any logical drives that need it enabled. |
| */ |
| for (i = 0; i < h->ndevices; i++) { |
| if (h->dev[i] == NULL) |
| continue; |
| h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; |
| } |
| |
| spin_unlock_irqrestore(&h->devlock, flags); |
| |
| /* Monitor devices which are in one of several NOT READY states to be |
| * brought online later. This must be done without holding h->devlock, |
| * so don't touch h->dev[] |
| */ |
| for (i = 0; i < nsds; i++) { |
| if (!sd[i]) /* if already added above. */ |
| continue; |
| if (sd[i]->volume_offline) |
| hpsa_monitor_offline_device(h, sd[i]->scsi3addr); |
| } |
| |
| /* Don't notify scsi mid layer of any changes the first time through |
| * (or if there are no changes) scsi_scan_host will do it later the |
| * first time through. |
| */ |
| if (!changes) |
| goto free_and_out; |
| |
| /* Notify scsi mid layer of any removed devices */ |
| for (i = 0; i < nremoved; i++) { |
| if (removed[i] == NULL) |
| continue; |
| if (removed[i]->expose_device) |
| hpsa_remove_device(h, removed[i]); |
| kfree(removed[i]); |
| removed[i] = NULL; |
| } |
| |
| /* Notify scsi mid layer of any added devices */ |
| for (i = 0; i < nadded; i++) { |
| int rc = 0; |
| |
| if (added[i] == NULL) |
| continue; |
| if (!(added[i]->expose_device)) |
| continue; |
| rc = hpsa_add_device(h, added[i]); |
| if (!rc) |
| continue; |
| dev_warn(&h->pdev->dev, |
| "addition failed %d, device not added.", rc); |
| /* now we have to remove it from h->dev, |
| * since it didn't get added to scsi mid layer |
| */ |
| fixup_botched_add(h, added[i]); |
| h->drv_req_rescan = 1; |
| } |
| |
| free_and_out: |
| kfree(added); |
| kfree(removed); |
| } |
| |
| /* |
| * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * |
| * Assume's h->devlock is held. |
| */ |
| static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, |
| int bus, int target, int lun) |
| { |
| int i; |
| struct hpsa_scsi_dev_t *sd; |
| |
| for (i = 0; i < h->ndevices; i++) { |
| sd = h->dev[i]; |
| if (sd->bus == bus && sd->target == target && sd->lun == lun) |
| return sd; |
| } |
| return NULL; |
| } |
| |
| static int hpsa_slave_alloc(struct scsi_device *sdev) |
| { |
| struct hpsa_scsi_dev_t *sd = NULL; |
| unsigned long flags; |
| struct ctlr_info *h; |
| |
| h = sdev_to_hba(sdev); |
| spin_lock_irqsave(&h->devlock, flags); |
| if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { |
| struct scsi_target *starget; |
| struct sas_rphy *rphy; |
| |
| starget = scsi_target(sdev); |
| rphy = target_to_rphy(starget); |
| sd = hpsa_find_device_by_sas_rphy(h, rphy); |
| if (sd) { |
| sd->target = sdev_id(sdev); |
| sd->lun = sdev->lun; |
| } |
| } |
| if (!sd) |
| sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), |
| sdev_id(sdev), sdev->lun); |
| |
| if (sd && sd->expose_device) { |
| atomic_set(&sd->ioaccel_cmds_out, 0); |
| sdev->hostdata = sd; |
| } else |
| sdev->hostdata = NULL; |
| spin_unlock_irqrestore(&h->devlock, flags); |
| return 0; |
| } |
| |
| /* configure scsi device based on internal per-device structure */ |
| static int hpsa_slave_configure(struct scsi_device *sdev) |
| { |
| struct hpsa_scsi_dev_t *sd; |
| int queue_depth; |
| |
| sd = sdev->hostdata; |
| sdev->no_uld_attach = !sd || !sd->expose_device; |
| |
| if (sd) { |
| if (sd->external) |
| queue_depth = EXTERNAL_QD; |
| else |
| queue_depth = sd->queue_depth != 0 ? |
| sd->queue_depth : sdev->host->can_queue; |
| } else |
| queue_depth = sdev->host->can_queue; |
| |
| scsi_change_queue_depth(sdev, queue_depth); |
| |
| return 0; |
| } |
| |
| static void hpsa_slave_destroy(struct scsi_device *sdev) |
| { |
| /* nothing to do. */ |
| } |
| |
| static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
| { |
| int i; |
| |
| if (!h->ioaccel2_cmd_sg_list) |
| return; |
| for (i = 0; i < h->nr_cmds; i++) { |
| kfree(h->ioaccel2_cmd_sg_list[i]); |
| h->ioaccel2_cmd_sg_list[i] = NULL; |
| } |
| kfree(h->ioaccel2_cmd_sg_list); |
| h->ioaccel2_cmd_sg_list = NULL; |
| } |
| |
| static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
| { |
| int i; |
| |
| if (h->chainsize <= 0) |
| return 0; |
| |
| h->ioaccel2_cmd_sg_list = |
| kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, |
| GFP_KERNEL); |
| if (!h->ioaccel2_cmd_sg_list) |
| return -ENOMEM; |
| for (i = 0; i < h->nr_cmds; i++) { |
| h->ioaccel2_cmd_sg_list[i] = |
| kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * |
| h->maxsgentries, GFP_KERNEL); |
| if (!h->ioaccel2_cmd_sg_list[i]) |
| goto clean; |
| } |
| return 0; |
| |
| clean: |
| hpsa_free_ioaccel2_sg_chain_blocks(h); |
| return -ENOMEM; |
| } |
| |
| static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
| { |
| int i; |
| |
| if (!h->cmd_sg_list) |
| return; |
| for (i = 0; i < h->nr_cmds; i++) { |
| kfree(h->cmd_sg_list[i]); |
| h->cmd_sg_list[i] = NULL; |
| } |
| kfree(h->cmd_sg_list); |
| h->cmd_sg_list = NULL; |
| } |
| |
| static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) |
| { |
| int i; |
| |
| if (h->chainsize <= 0) |
| return 0; |
| |
| h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, |
| GFP_KERNEL); |
| if (!h->cmd_sg_list) |
| return -ENOMEM; |
| |
| for (i = 0; i < h->nr_cmds; i++) { |
| h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * |
| h->chainsize, GFP_KERNEL); |
| if (!h->cmd_sg_list[i]) |
| goto clean; |
| |
| } |
| return 0; |
| |
| clean: |
| hpsa_free_sg_chain_blocks(h); |
| return -ENOMEM; |
| } |
| |
| static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, |
| struct io_accel2_cmd *cp, struct CommandList *c) |
| { |
| struct ioaccel2_sg_element *chain_block; |
| u64 temp64; |
| u32 chain_size; |
| |
| chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; |
| chain_size = le32_to_cpu(cp->sg[0].length); |
| temp64 = pci_map_single(h->pdev, chain_block, chain_size, |
| PCI_DMA_TODEVICE); |
| if (dma_mapping_error(&h->pdev->dev, temp64)) { |
| /* prevent subsequent unmapping */ |
| cp->sg->address = 0; |
| return -1; |
| } |
| cp->sg->address = cpu_to_le64(temp64); |
| return 0; |
| } |
| |
| static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, |
| struct io_accel2_cmd *cp) |
| { |
| struct ioaccel2_sg_element *chain_sg; |
| u64 temp64; |
| u32 chain_size; |
| |
| chain_sg = cp->sg; |
| temp64 = le64_to_cpu(chain_sg->address); |
| chain_size = le32_to_cpu(cp->sg[0].length); |
| pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); |
| } |
| |
| static int hpsa_map_sg_chain_block(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| struct SGDescriptor *chain_sg, *chain_block; |
| u64 temp64; |
| u32 chain_len; |
| |
| chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
| chain_block = h->cmd_sg_list[c->cmdindex]; |
| chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); |
| chain_len = sizeof(*chain_sg) * |
| (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
| chain_sg->Len = cpu_to_le32(chain_len); |
| temp64 = pci_map_single(h->pdev, chain_block, chain_len, |
| PCI_DMA_TODEVICE); |
| if (dma_mapping_error(&h->pdev->dev, temp64)) { |
| /* prevent subsequent unmapping */ |
| chain_sg->Addr = cpu_to_le64(0); |
| return -1; |
| } |
| chain_sg->Addr = cpu_to_le64(temp64); |
| return 0; |
| } |
| |
| static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| struct SGDescriptor *chain_sg; |
| |
| if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) |
| return; |
| |
| chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
| pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), |
| le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); |
| } |
| |
| |
| /* Decode the various types of errors on ioaccel2 path. |
| * Return 1 for any error that should generate a RAID path retry. |
| * Return 0 for errors that don't require a RAID path retry. |
| */ |
| static int handle_ioaccel_mode2_error(struct ctlr_info *h, |
| struct CommandList *c, |
| struct scsi_cmnd *cmd, |
| struct io_accel2_cmd *c2, |
| struct hpsa_scsi_dev_t *dev) |
| { |
| int data_len; |
| int retry = 0; |
| u32 ioaccel2_resid = 0; |
| |
| switch (c2->error_data.serv_response) { |
| case IOACCEL2_SERV_RESPONSE_COMPLETE: |
| switch (c2->error_data.status) { |
| case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: |
| break; |
| case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: |
| cmd->result |= SAM_STAT_CHECK_CONDITION; |
| if (c2->error_data.data_present != |
| IOACCEL2_SENSE_DATA_PRESENT) { |
| memset(cmd->sense_buffer, 0, |
| SCSI_SENSE_BUFFERSIZE); |
| break; |
| } |
| /* copy the sense data */ |
| data_len = c2->error_data.sense_data_len; |
| if (data_len > SCSI_SENSE_BUFFERSIZE) |
| data_len = SCSI_SENSE_BUFFERSIZE; |
| if (data_len > sizeof(c2->error_data.sense_data_buff)) |
| data_len = |
| sizeof(c2->error_data.sense_data_buff); |
| memcpy(cmd->sense_buffer, |
| c2->error_data.sense_data_buff, data_len); |
| retry = 1; |
| break; |
| case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: |
| retry = 1; |
| break; |
| case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: |
| retry = 1; |
| break; |
| case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: |
| retry = 1; |
| break; |
| case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: |
| retry = 1; |
| break; |
| default: |
| retry = 1; |
| break; |
| } |
| break; |
| case IOACCEL2_SERV_RESPONSE_FAILURE: |
| switch (c2->error_data.status) { |
| case IOACCEL2_STATUS_SR_IO_ERROR: |
| case IOACCEL2_STATUS_SR_IO_ABORTED: |
| case IOACCEL2_STATUS_SR_OVERRUN: |
| retry = 1; |
| break; |
| case IOACCEL2_STATUS_SR_UNDERRUN: |
| cmd->result = (DID_OK << 16); /* host byte */ |
| cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
| ioaccel2_resid = get_unaligned_le32( |
| &c2->error_data.resid_cnt[0]); |
| scsi_set_resid(cmd, ioaccel2_resid); |
| break; |
| case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: |
| case IOACCEL2_STATUS_SR_INVALID_DEVICE: |
| case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: |
| /* |
| * Did an HBA disk disappear? We will eventually |
| * get a state change event from the controller but |
| * in the meantime, we need to tell the OS that the |
| * HBA disk is no longer there and stop I/O |
| * from going down. This allows the potential re-insert |
| * of the disk to get the same device node. |
| */ |
| if (dev->physical_device && dev->expose_device) { |
| cmd->result = DID_NO_CONNECT << 16; |
| dev->removed = 1; |
| h->drv_req_rescan = 1; |
| dev_warn(&h->pdev->dev, |
| "%s: device is gone!\n", __func__); |
| } else |
| /* |
| * Retry by sending down the RAID path. |
| * We will get an event from ctlr to |
| * trigger rescan regardless. |
| */ |
| retry = 1; |
| break; |
| default: |
| retry = 1; |
| } |
| break; |
| case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: |
| break; |
| case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: |
| break; |
| case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: |
| retry = 1; |
| break; |
| case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: |
| break; |
| default: |
| retry = 1; |
| break; |
| } |
| |
| return retry; /* retry on raid path? */ |
| } |
| |
| static void hpsa_cmd_resolve_events(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| bool do_wake = false; |
| |
| /* |
| * Reset c->scsi_cmd here so that the reset handler will know |
| * this command has completed. Then, check to see if the handler is |
| * waiting for this command, and, if so, wake it. |
| */ |
| c->scsi_cmd = SCSI_CMD_IDLE; |
| mb(); /* Declare command idle before checking for pending events. */ |
| if (c->reset_pending) { |
| unsigned long flags; |
| struct hpsa_scsi_dev_t *dev; |
| |
| /* |
| * There appears to be a reset pending; lock the lock and |
| * reconfirm. If so, then decrement the count of outstanding |
| * commands and wake the reset command if this is the last one. |
| */ |
| spin_lock_irqsave(&h->lock, flags); |
| dev = c->reset_pending; /* Re-fetch under the lock. */ |
| if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) |
| do_wake = true; |
| c->reset_pending = NULL; |
| spin_unlock_irqrestore(&h->lock, flags); |
| } |
| |
| if (do_wake) |
| wake_up_all(&h->event_sync_wait_queue); |
| } |
| |
| static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, |
| struct CommandList *c) |
| { |
| hpsa_cmd_resolve_events(h, c); |
| cmd_tagged_free(h, c); |
| } |
| |
| static void hpsa_cmd_free_and_done(struct ctlr_info *h, |
| struct CommandList *c, struct scsi_cmnd *cmd) |
| { |
| hpsa_cmd_resolve_and_free(h, c); |
| if (cmd && cmd->scsi_done) |
| cmd->scsi_done(cmd); |
| } |
| |
| static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) |
| { |
| INIT_WORK(&c->work, hpsa_command_resubmit_worker); |
| queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); |
| } |
| |
| static void process_ioaccel2_completion(struct ctlr_info *h, |
| struct CommandList *c, struct scsi_cmnd *cmd, |
| struct hpsa_scsi_dev_t *dev) |
| { |
| struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
| |
| /* check for good status */ |
| if (likely(c2->error_data.serv_response == 0 && |
| c2->error_data.status == 0)) |
| return hpsa_cmd_free_and_done(h, c, cmd); |
| |
| /* |
| * Any RAID offload error results in retry which will use |
| * the normal I/O path so the controller can handle whatever's |
| * wrong. |
| */ |
| if (is_logical_device(dev) && |
| c2->error_data.serv_response == |
| IOACCEL2_SERV_RESPONSE_FAILURE) { |
| if (c2->error_data.status == |
| IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { |
| dev->offload_enabled = 0; |
| dev->offload_to_be_enabled = 0; |
| } |
| |
| return hpsa_retry_cmd(h, c); |
| } |
| |
| if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) |
| return hpsa_retry_cmd(h, c); |
| |
| return hpsa_cmd_free_and_done(h, c, cmd); |
| } |
| |
| /* Returns 0 on success, < 0 otherwise. */ |
| static int hpsa_evaluate_tmf_status(struct ctlr_info *h, |
| struct CommandList *cp) |
| { |
| u8 tmf_status = cp->err_info->ScsiStatus; |
| |
| switch (tmf_status) { |
| case CISS_TMF_COMPLETE: |
| /* |
| * CISS_TMF_COMPLETE never happens, instead, |
| * ei->CommandStatus == 0 for this case. |
| */ |
| case CISS_TMF_SUCCESS: |
| return 0; |
| case CISS_TMF_INVALID_FRAME: |
| case CISS_TMF_NOT_SUPPORTED: |
| case CISS_TMF_FAILED: |
| case CISS_TMF_WRONG_LUN: |
| case CISS_TMF_OVERLAPPED_TAG: |
| break; |
| default: |
| dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", |
| tmf_status); |
| break; |
| } |
| return -tmf_status; |
| } |
| |
| static void complete_scsi_command(struct CommandList *cp) |
| { |
| struct scsi_cmnd *cmd; |
| struct ctlr_info *h; |
| struct ErrorInfo *ei; |
| struct hpsa_scsi_dev_t *dev; |
| struct io_accel2_cmd *c2; |
| |
| u8 sense_key; |
| u8 asc; /* additional sense code */ |
| u8 ascq; /* additional sense code qualifier */ |
| unsigned long sense_data_size; |
| |
| ei = cp->err_info; |
| cmd = cp->scsi_cmd; |
| h = cp->h; |
| |
| if (!cmd->device) { |
| cmd->result = DID_NO_CONNECT << 16; |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| } |
| |
| dev = cmd->device->hostdata; |
| if (!dev) { |
| cmd->result = DID_NO_CONNECT << 16; |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| } |
| c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; |
| |
| scsi_dma_unmap(cmd); /* undo the DMA mappings */ |
| if ((cp->cmd_type == CMD_SCSI) && |
| (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) |
| hpsa_unmap_sg_chain_block(h, cp); |
| |
| if ((cp->cmd_type == CMD_IOACCEL2) && |
| (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) |
| hpsa_unmap_ioaccel2_sg_chain_block(h, c2); |
| |
| cmd->result = (DID_OK << 16); /* host byte */ |
| cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
| |
| if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { |
| if (dev->physical_device && dev->expose_device && |
| dev->removed) { |
| cmd->result = DID_NO_CONNECT << 16; |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| } |
| if (likely(cp->phys_disk != NULL)) |
| atomic_dec(&cp->phys_disk->ioaccel_cmds_out); |
| } |
| |
| /* |
| * We check for lockup status here as it may be set for |
| * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by |
| * fail_all_oustanding_cmds() |
| */ |
| if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { |
| /* DID_NO_CONNECT will prevent a retry */ |
| cmd->result = DID_NO_CONNECT << 16; |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| } |
| |
| if ((unlikely(hpsa_is_pending_event(cp)))) |
| if (cp->reset_pending) |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| |
| if (cp->cmd_type == CMD_IOACCEL2) |
| return process_ioaccel2_completion(h, cp, cmd, dev); |
| |
| scsi_set_resid(cmd, ei->ResidualCnt); |
| if (ei->CommandStatus == 0) |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| |
| /* For I/O accelerator commands, copy over some fields to the normal |
| * CISS header used below for error handling. |
| */ |
| if (cp->cmd_type == CMD_IOACCEL1) { |
| struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; |
| cp->Header.SGList = scsi_sg_count(cmd); |
| cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); |
| cp->Request.CDBLen = le16_to_cpu(c->io_flags) & |
| IOACCEL1_IOFLAGS_CDBLEN_MASK; |
| cp->Header.tag = c->tag; |
| memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); |
| memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); |
| |
| /* Any RAID offload error results in retry which will use |
| * the normal I/O path so the controller can handle whatever's |
| * wrong. |
| */ |
| if (is_logical_device(dev)) { |
| if (ei->CommandStatus == CMD_IOACCEL_DISABLED) |
| dev->offload_enabled = 0; |
| return hpsa_retry_cmd(h, cp); |
| } |
| } |
| |
| /* an error has occurred */ |
| switch (ei->CommandStatus) { |
| |
| case CMD_TARGET_STATUS: |
| cmd->result |= ei->ScsiStatus; |
| /* copy the sense data */ |
| if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) |
| sense_data_size = SCSI_SENSE_BUFFERSIZE; |
| else |
| sense_data_size = sizeof(ei->SenseInfo); |
| if (ei->SenseLen < sense_data_size) |
| sense_data_size = ei->SenseLen; |
| memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); |
| if (ei->ScsiStatus) |
| decode_sense_data(ei->SenseInfo, sense_data_size, |
| &sense_key, &asc, &ascq); |
| if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { |
| if (sense_key == ABORTED_COMMAND) { |
| cmd->result |= DID_SOFT_ERROR << 16; |
| break; |
| } |
| break; |
| } |
| /* Problem was not a check condition |
| * Pass it up to the upper layers... |
| */ |
| if (ei->ScsiStatus) { |
| dev_warn(&h->pdev->dev, "cp %p has status 0x%x " |
| "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
| "Returning result: 0x%x\n", |
| cp, ei->ScsiStatus, |
| sense_key, asc, ascq, |
| cmd->result); |
| } else { /* scsi status is zero??? How??? */ |
| dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " |
| "Returning no connection.\n", cp), |
| |
| /* Ordinarily, this case should never happen, |
| * but there is a bug in some released firmware |
| * revisions that allows it to happen if, for |
| * example, a 4100 backplane loses power and |
| * the tape drive is in it. We assume that |
| * it's a fatal error of some kind because we |
| * can't show that it wasn't. We will make it |
| * look like selection timeout since that is |
| * the most common reason for this to occur, |
| * and it's severe enough. |
| */ |
| |
| cmd->result = DID_NO_CONNECT << 16; |
| } |
| break; |
| |
| case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
| break; |
| case CMD_DATA_OVERRUN: |
| dev_warn(&h->pdev->dev, |
| "CDB %16phN data overrun\n", cp->Request.CDB); |
| break; |
| case CMD_INVALID: { |
| /* print_bytes(cp, sizeof(*cp), 1, 0); |
| print_cmd(cp); */ |
| /* We get CMD_INVALID if you address a non-existent device |
| * instead of a selection timeout (no response). You will |
| * see this if you yank out a drive, then try to access it. |
| * This is kind of a shame because it means that any other |
| * CMD_INVALID (e.g. driver bug) will get interpreted as a |
| * missing target. */ |
| cmd->result = DID_NO_CONNECT << 16; |
| } |
| break; |
| case CMD_PROTOCOL_ERR: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", |
| cp->Request.CDB); |
| break; |
| case CMD_HARDWARE_ERR: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", |
| cp->Request.CDB); |
| break; |
| case CMD_CONNECTION_LOST: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", |
| cp->Request.CDB); |
| break; |
| case CMD_ABORTED: |
| cmd->result = DID_ABORT << 16; |
| break; |
| case CMD_ABORT_FAILED: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", |
| cp->Request.CDB); |
| break; |
| case CMD_UNSOLICITED_ABORT: |
| cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ |
| dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", |
| cp->Request.CDB); |
| break; |
| case CMD_TIMEOUT: |
| cmd->result = DID_TIME_OUT << 16; |
| dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", |
| cp->Request.CDB); |
| break; |
| case CMD_UNABORTABLE: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "Command unabortable\n"); |
| break; |
| case CMD_TMF_STATUS: |
| if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ |
| cmd->result = DID_ERROR << 16; |
| break; |
| case CMD_IOACCEL_DISABLED: |
| /* This only handles the direct pass-through case since RAID |
| * offload is handled above. Just attempt a retry. |
| */ |
| cmd->result = DID_SOFT_ERROR << 16; |
| dev_warn(&h->pdev->dev, |
| "cp %p had HP SSD Smart Path error\n", cp); |
| break; |
| default: |
| cmd->result = DID_ERROR << 16; |
| dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", |
| cp, ei->CommandStatus); |
| } |
| |
| return hpsa_cmd_free_and_done(h, cp, cmd); |
| } |
| |
| static void hpsa_pci_unmap(struct pci_dev *pdev, |
| struct CommandList *c, int sg_used, int data_direction) |
| { |
| int i; |
| |
| for (i = 0; i < sg_used; i++) |
| pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), |
| le32_to_cpu(c->SG[i].Len), |
| data_direction); |
| } |
| |
| static int hpsa_map_one(struct pci_dev *pdev, |
| struct CommandList *cp, |
| unsigned char *buf, |
| size_t buflen, |
| int data_direction) |
| { |
| u64 addr64; |
| |
| if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
| cp->Header.SGList = 0; |
| cp->Header.SGTotal = cpu_to_le16(0); |
| return 0; |
| } |
| |
| addr64 = pci_map_single(pdev, buf, buflen, data_direction); |
| if (dma_mapping_error(&pdev->dev, addr64)) { |
| /* Prevent subsequent unmap of something never mapped */ |
| cp->Header.SGList = 0; |
| cp->Header.SGTotal = cpu_to_le16(0); |
| return -1; |
| } |
| cp->SG[0].Addr = cpu_to_le64(addr64); |
| cp->SG[0].Len = cpu_to_le32(buflen); |
| cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ |
| cp->Header.SGList = 1; /* no. SGs contig in this cmd */ |
| cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ |
| return 0; |
| } |
| |
| #define NO_TIMEOUT ((unsigned long) -1) |
| #define DEFAULT_TIMEOUT 30000 /* milliseconds */ |
| static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
| struct CommandList *c, int reply_queue, unsigned long timeout_msecs) |
| { |
| DECLARE_COMPLETION_ONSTACK(wait); |
| |
| c->waiting = &wait; |
| __enqueue_cmd_and_start_io(h, c, reply_queue); |
| if (timeout_msecs == NO_TIMEOUT) { |
| /* TODO: get rid of this no-timeout thing */ |
| wait_for_completion_io(&wait); |
| return IO_OK; |
| } |
| if (!wait_for_completion_io_timeout(&wait, |
| msecs_to_jiffies(timeout_msecs))) { |
| dev_warn(&h->pdev->dev, "Command timed out.\n"); |
| return -ETIMEDOUT; |
| } |
| return IO_OK; |
| } |
| |
| static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, |
| int reply_queue, unsigned long timeout_msecs) |
| { |
| if (unlikely(lockup_detected(h))) { |
| c->err_info->CommandStatus = CMD_CTLR_LOCKUP; |
| return IO_OK; |
| } |
| return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); |
| } |
| |
| static u32 lockup_detected(struct ctlr_info *h) |
| { |
| int cpu; |
| u32 rc, *lockup_detected; |
| |
| cpu = get_cpu(); |
| lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); |
| rc = *lockup_detected; |
| put_cpu(); |
| return rc; |
| } |
| |
| #define MAX_DRIVER_CMD_RETRIES 25 |
| static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
| struct CommandList *c, int data_direction, unsigned long timeout_msecs) |
| { |
| int backoff_time = 10, retry_count = 0; |
| int rc; |
| |
| do { |
| memset(c->err_info, 0, sizeof(*c->err_info)); |
| rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
| timeout_msecs); |
| if (rc) |
| break; |
| retry_count++; |
| if (retry_count > 3) { |
| msleep(backoff_time); |
| if (backoff_time < 1000) |
| backoff_time *= 2; |
| } |
| } while ((check_for_unit_attention(h, c) || |
| check_for_busy(h, c)) && |
| retry_count <= MAX_DRIVER_CMD_RETRIES); |
| hpsa_pci_unmap(h->pdev, c, 1, data_direction); |
| if (retry_count > MAX_DRIVER_CMD_RETRIES) |
| rc = -EIO; |
| return rc; |
| } |
| |
| static void hpsa_print_cmd(struct ctlr_info *h, char *txt, |
| struct CommandList *c) |
| { |
| const u8 *cdb = c->Request.CDB; |
| const u8 *lun = c->Header.LUN.LunAddrBytes; |
| |
| dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", |
| txt, lun, cdb); |
| } |
| |
| static void hpsa_scsi_interpret_error(struct ctlr_info *h, |
| struct CommandList *cp) |
| { |
| const struct ErrorInfo *ei = cp->err_info; |
| struct device *d = &cp->h->pdev->dev; |
| u8 sense_key, asc, ascq; |
| int sense_len; |
| |
| switch (ei->CommandStatus) { |
| case CMD_TARGET_STATUS: |
|