diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c index 2f531bb44674..a865f5ab04d3 100644 --- a/sys/dev/mrsas/mrsas.c +++ b/sys/dev/mrsas/mrsas.c @@ -1,5052 +1,5063 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* * Function prototypes */ static d_open_t mrsas_open; static d_close_t mrsas_close; static d_ioctl_t mrsas_ioctl; static d_poll_t mrsas_poll; static void mrsas_ich_startup(void *arg); static struct mrsas_mgmt_info mrsas_mgmt_info; static struct mrsas_ident *mrsas_find_ident(device_t); static int mrsas_setup_msix(struct mrsas_softc *sc); static int mrsas_allocate_msix(struct mrsas_softc *sc); static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); static void mrsas_flush_cache(struct mrsas_softc *sc); static void mrsas_reset_reply_desc(struct mrsas_softc *sc); static void mrsas_ocr_thread(void *arg); static int mrsas_get_map_info(struct mrsas_softc *sc); static int mrsas_get_ld_map_info(struct mrsas_softc *sc); static int mrsas_sync_map_info(struct mrsas_softc *sc); static int mrsas_get_pd_list(struct mrsas_softc *sc); static int mrsas_get_ld_list(struct mrsas_softc *sc); static int mrsas_setup_irq(struct mrsas_softc *sc); static int mrsas_alloc_mem(struct mrsas_softc *sc); static int mrsas_init_fw(struct mrsas_softc *sc); static int mrsas_setup_raidmap(struct mrsas_softc *sc); static void megasas_setup_jbod_map(struct mrsas_softc *sc); static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); static int mrsas_clear_intr(struct mrsas_softc *sc); static int mrsas_get_ctrl_info(struct mrsas_softc *sc); static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort); static void mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg); u_int32_t mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd); void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); int mrsas_init_adapter(struct mrsas_softc *sc); int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); int mrsas_ioc_init(struct mrsas_softc *sc); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); int mrsas_reset_targets(struct mrsas_softc *sc); int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size); void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_disable_intr(struct mrsas_softc *sc); void mrsas_enable_intr(struct mrsas_softc *sc); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_mem(struct mrsas_softc *sc); void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); void mrsas_isr(void *arg); void mrsas_teardown_intr(struct mrsas_softc *sc); void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); void mrsas_kill_hba(struct mrsas_softc *sc); void mrsas_aen_handler(struct mrsas_softc *sc); void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status); struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_cam_attach(struct mrsas_softc *sc); extern void mrsas_cam_detach(struct mrsas_softc *sc); extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_xpt_freeze(struct mrsas_softc *sc); extern void mrsas_xpt_release(struct mrsas_softc *sc); extern MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense); void mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MRSAS Driver Parameters"); /* * PCI device struct and table * */ typedef struct mrsas_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; const char *desc; } MRSAS_CTLR_ID; MRSAS_CTLR_ID device_table[] = { {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, {0, 0, 0, 0, NULL} }; /* * Character device entry points * */ static struct cdevsw mrsas_cdevsw = { .d_version = D_VERSION, .d_open = mrsas_open, .d_close = mrsas_close, .d_ioctl = mrsas_ioctl, .d_poll = mrsas_poll, .d_name = "mrsas", }; MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); int mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { return (0); } int mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { return (0); } u_int32_t mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) { u_int32_t i = 0, ret_val; if (sc->is_aero) { do { ret_val = mrsas_read_reg(sc, offset); i++; } while(ret_val == 0 && i < 3); } else ret_val = mrsas_read_reg(sc, offset); return ret_val; } /* * Register Read/Write Functions * */ void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; bus_space_write_4(bus_tag, bus_handle, offset, value); } u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); } /* * Interrupt Disable/Enable/Clear Functions * */ void mrsas_disable_intr(struct mrsas_softc *sc) { u_int32_t mask = 0xFFFFFFFF; sc->mask_interrupts = 1; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); /* Dummy read to force pci flush */ (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } void mrsas_enable_intr(struct mrsas_softc *sc) { u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; sc->mask_interrupts = 0; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } static int mrsas_clear_intr(struct mrsas_softc *sc) { u_int32_t status; /* Read received interrupt */ status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); /* Not our interrupt, so just return */ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return (0); /* We got a reply interrupt */ return (1); } /* * PCI Support Functions * */ static struct mrsas_ident * mrsas_find_ident(device_t dev) { struct mrsas_ident *pci_device; for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { if ((pci_device->vendor == pci_get_vendor(dev)) && (pci_device->device == pci_get_device(dev)) && ((pci_device->subvendor == pci_get_subvendor(dev)) || (pci_device->subvendor == 0xffff)) && ((pci_device->subdevice == pci_get_subdevice(dev)) || (pci_device->subdevice == 0xffff))) return (pci_device); } return (NULL); } static int mrsas_probe(device_t dev) { static u_int8_t first_ctrl = 1; struct mrsas_ident *id; if ((id = mrsas_find_ident(dev)) != NULL) { if (first_ctrl) { printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION); first_ctrl = 0; } device_set_desc(dev, id->desc); /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ return (-30); } return (ENXIO); } /* * mrsas_setup_sysctl: setup sysctl values for mrsas * input: Adapter instance soft state * * Setup sysctl entries for mrsas driver. */ static void mrsas_setup_sysctl(struct mrsas_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", device_get_unit(sc->mrsas_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, "Disable the use of OCR"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, strlen(MRSAS_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_count", CTLFLAG_RD, &sc->reset_count, 0, "number of ocr from start of the day"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "fw_outstanding", CTLFLAG_RD, &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, "Driver debug level"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 0, "Driver IO timeout value in mili-second."); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, &sc->mrsas_fw_fault_check_delay, 0, "FW fault check thread delay in seconds. "); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_in_progress", CTLFLAG_RD, &sc->reset_in_progress, 0, "ocr in progress status"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "block_sync_cache", CTLFLAG_RW, &sc->block_sync_cache, 0, "Block SYNC CACHE at driver. "); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "stream detection", CTLFLAG_RW, &sc->drv_stream_detection, 0, "Disable/Enable Stream detection. "); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prp_count", CTLFLAG_RD, &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "SGE holes", CTLFLAG_RD, &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); } /* * mrsas_get_tunables: get tunable parameters. * input: Adapter instance soft state * * Get tunable parameters. This will help to debug driver at boot time. */ static void mrsas_get_tunables(struct mrsas_softc *sc) { char tmpstr[80]; /* XXX default to some debugging for now */ sc->mrsas_debug = (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; sc->mrsas_fw_fault_check_delay = 1; sc->reset_count = 0; sc->reset_in_progress = 0; sc->block_sync_cache = 0; sc->drv_stream_detection = 1; /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", device_get_unit(sc->mrsas_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); } /* * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. * Used to get sequence number at driver load time. * input: Adapter soft state * * Allocates DMAable memory for the event log info internal command. */ int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) { int el_info_size; /* Allocate get event log info command */ el_info_size = sizeof(struct mrsas_evt_log_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, el_info_size, 1, el_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->el_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, sc->el_info_mem, el_info_size, mrsas_addr_cb, &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); return (ENOMEM); } memset(sc->el_info_mem, 0, el_info_size); return (0); } /* * mrsas_free_evt_info_cmd: Free memory for Event log info command * input: Adapter soft state * * Deallocates memory for the event log info internal command. */ void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) { if (sc->el_info_phys_addr) bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); if (sc->el_info_mem != NULL) bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); if (sc->el_info_tag != NULL) bus_dma_tag_destroy(sc->el_info_tag); } /* * mrsas_get_seq_num: Get latest event sequence number * @sc: Adapter soft state * @eli: Firmware event log sequence number information. * * Firmware maintains a log of all events in a non-volatile area. * Driver get the sequence number using DCMD * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. */ static int mrsas_get_seq_num(struct mrsas_softc *sc, struct mrsas_evt_log_info *eli) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; u_int8_t do_ocr = 1, retcode = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info)); dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info)); retcode = mrsas_issue_blocked_cmd(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; do_ocr = 0; /* * Copy the data back into callers buffer */ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); mrsas_free_evt_log_info_cmd(sc); dcmd_timeout: if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; else mrsas_release_mfi_cmd(cmd); return retcode; } /* * mrsas_register_aen: Register for asynchronous event notification * @sc: Adapter soft state * @seq_num: Starting sequence number * @class_locale: Class of the event * * This function subscribes for events beyond the @seq_num * and type @class_locale. * */ static int mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, u_int32_t class_locale_word) { int ret_val; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; union mrsas_evt_class_locale curr_aen; union mrsas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new AEN * request we currently have. If it is, then we don't have to do * anything. In other words, whichever events the current AEN request * is subscribing to, have already been subscribed to. If the old_cmd * is _not_ inclusive, then we have to abort that command, form a * class_locale that is superset of both old and current and re-issue * to the FW */ curr_aen.word = class_locale_word; if (sc->aen_cmd) { prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]); /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. Locale numbers don't have such hierarchy. They * are bitmap values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; sc->aen_cmd->abort_aen = 1; ret_val = mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (ret_val) { printf("mrsas: Failed to abort previous AEN command\n"); return ret_val; } else sc->aen_cmd = NULL; } } cmd = mrsas_get_mfi_cmd(sc); if (!cmd) return ENOMEM; dcmd = &cmd->frame->dcmd; memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail)); dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT); dcmd->mbox.w[0] = htole32(seq_num); sc->last_seq_num = seq_num; dcmd->mbox.w[1] = htole32(curr_aen.word); dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail)); if (sc->aen_cmd != NULL) { mrsas_release_mfi_cmd(cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ sc->aen_cmd = cmd; /* * Issue the aen registration frame */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); return (1); } return 0; } /* * mrsas_start_aen: Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int mrsas_start_aen(struct mrsas_softc *sc) { struct mrsas_evt_log_info eli; union mrsas_evt_class_locale class_locale; /* Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (mrsas_get_seq_num(sc, &eli)) return -1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return mrsas_register_aen(sc, eli.newest_seq_num + 1, class_locale.word); } /* * mrsas_setup_msix: Allocate MSI-x vectors * @sc: adapter soft state */ static int mrsas_setup_msix(struct mrsas_softc *sc) { int i; for (i = 0; i < sc->msix_vectors; i++) { sc->irq_context[i].sc = sc; sc->irq_context[i].MSIxIndex = i; sc->irq_id[i] = i + 1; sc->mrsas_irq[i] = bus_alloc_resource_any (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] ,RF_ACTIVE); if (sc->mrsas_irq[i] == NULL) { device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); goto irq_alloc_failed; } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[i], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[i], &sc->intr_handle[i])) { device_printf(sc->mrsas_dev, "Cannot set up MSI-x interrupt handler\n"); goto irq_alloc_failed; } } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_allocate_msix: Setup MSI-x vectors * @sc: adapter soft state */ static int mrsas_allocate_msix(struct mrsas_softc *sc) { if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { device_printf(sc->mrsas_dev, "Using MSI-X with %d number" " of vectors\n", sc->msix_vectors); } else { device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); goto irq_alloc_failed; } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_attach: PCI entry point * input: pointer to device struct * * Performs setup of PCI and registers, initializes mutexes and linked lists, * registers interrupts and CAM, and initializes the adapter/controller to * its proper state. */ static int mrsas_attach(device_t dev) { struct mrsas_softc *sc = device_get_softc(dev); uint32_t cmd, error; memset(sc, 0, sizeof(struct mrsas_softc)); /* Look up our softc and initialize its fields. */ sc->mrsas_dev = dev; sc->device_id = pci_get_device(dev); switch (sc->device_id) { case MRSAS_INVADER: case MRSAS_FURY: case MRSAS_INTRUDER: case MRSAS_INTRUDER_24: case MRSAS_CUTLASS_52: case MRSAS_CUTLASS_53: sc->mrsas_gen3_ctrl = 1; break; case MRSAS_VENTURA: case MRSAS_CRUSADER: case MRSAS_HARPOON: case MRSAS_TOMCAT: case MRSAS_VENTURA_4PORT: case MRSAS_CRUSADER_4PORT: sc->is_ventura = true; break; case MRSAS_AERO_10E1: case MRSAS_AERO_10E5: device_printf(dev, "Adapter is in configurable secure mode\n"); case MRSAS_AERO_10E2: case MRSAS_AERO_10E6: sc->is_aero = true; break; case MRSAS_AERO_10E0: case MRSAS_AERO_10E3: case MRSAS_AERO_10E4: case MRSAS_AERO_10E7: device_printf(dev, "Adapter is in non-secure mode\n"); return SUCCESS; } mrsas_get_tunables(sc); /* * Set up PCI and registers */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); /* Force the busmaster enable bit on. */ cmd |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* For Ventura/Aero system registers are mapped to BAR0 */ if (sc->is_ventura || sc->is_aero) sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ else sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->reg_res_id), RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); goto attach_fail; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Intialize mutexes */ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); /* Intialize linked list */ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); mrsas_atomic_set(&sc->fw_outstanding, 0); mrsas_atomic_set(&sc->target_reset_outstanding, 0); mrsas_atomic_set(&sc->prp_count, 0); mrsas_atomic_set(&sc->sge_holes, 0); sc->io_cmds_highwater = 0; sc->adprecovery = MRSAS_HBA_OPERATIONAL; sc->UnevenSpanSupport = 0; sc->msix_enable = 0; /* Initialize Firmware */ if (mrsas_init_fw(sc) != SUCCESS) { goto attach_fail_fw; } /* Register mrsas to CAM layer */ if ((mrsas_cam_attach(sc) != SUCCESS)) { goto attach_fail_cam; } /* Register IRQs */ if (mrsas_setup_irq(sc) != SUCCESS) { goto attach_fail_irq; } error = mrsas_kproc_create(mrsas_ocr_thread, sc, &sc->ocr_thread, 0, 0, "mrsas_ocr%d", device_get_unit(sc->mrsas_dev)); if (error) { device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); goto attach_fail_ocr_thread; } /* * After FW initialization and OCR thread creation * we will defer the cdev creation, AEN setup on ICH callback */ sc->mrsas_ich.ich_func = mrsas_ich_startup; sc->mrsas_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mrsas_ich) != 0) { device_printf(sc->mrsas_dev, "Config hook is already established\n"); } mrsas_setup_sysctl(sc); return SUCCESS; attach_fail_ocr_thread: if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); attach_fail_irq: mrsas_teardown_intr(sc); attach_fail_cam: mrsas_cam_detach(sc); attach_fail_fw: /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ if (sc->msix_enable == 1) pci_release_msi(sc->mrsas_dev); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); mtx_destroy(&sc->stream_lock); attach_fail: if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } return (ENXIO); } /* * Interrupt config hook */ static void mrsas_ich_startup(void *arg) { int i = 0; struct mrsas_softc *sc = (struct mrsas_softc *)arg; /* * Intialize a counting Semaphore to take care no. of concurrent IOCTLs */ sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, IOCTL_SEMA_DESCRIPTION); /* Create a /dev entry for mrsas controller. */ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", device_get_unit(sc->mrsas_dev)); if (device_get_unit(sc->mrsas_dev) == 0) { make_dev_alias_p(MAKEDEV_CHECKNAME, &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, "megaraid_sas_ioctl_node"); } if (sc->mrsas_cdev) sc->mrsas_cdev->si_drv1 = sc; /* * Add this controller to mrsas_mgmt_info structure so that it can be * exported to management applications */ if (device_get_unit(sc->mrsas_dev) == 0) memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); mrsas_mgmt_info.count++; mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; mrsas_mgmt_info.max_index++; /* Enable Interrupts */ mrsas_enable_intr(sc); /* Call DCMD get_pd_info for all system PDs */ for (i = 0; i < MRSAS_MAX_PD; i++) { if ((sc->target_list[i].target_id != 0xffff) && sc->pd_info_mem) mrsas_get_pd_info(sc, sc->target_list[i].target_id); } /* Initiate AEN (Asynchronous Event Notification) */ if (mrsas_start_aen(sc)) { device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " "Further events from the controller will not be communicated.\n" "Either there is some problem in the controller" "or the controller does not support AEN.\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } if (sc->mrsas_ich.ich_arg != NULL) { device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); config_intrhook_disestablish(&sc->mrsas_ich); sc->mrsas_ich.ich_arg = NULL; } } /* * mrsas_detach: De-allocates and teardown resources * input: pointer to device struct * * This function is the entry point for device disconnect and detach. * It performs memory de-allocations, shutdown of the controller and various * teardown and destroy resource functions. */ static int mrsas_detach(device_t dev) { struct mrsas_softc *sc; int i = 0; sc = device_get_softc(dev); sc->remove_in_progress = 1; /* Destroy the character device so no other IOCTL will be handled */ if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) destroy_dev(sc->mrsas_linux_emulator_cdev); destroy_dev(sc->mrsas_cdev); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < mrsas_mgmt_info.max_index; i++) { if (mrsas_mgmt_info.sc_ptr[i] == sc) { mrsas_mgmt_info.count--; mrsas_mgmt_info.sc_ptr[i] = NULL; break; } } if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_shutdown", hz); } i = 0; while (sc->ocr_thread_active) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for " "mrsas_ocr thread to quit ocr %d\n", i, sc->ocr_thread_active); } pause("mr_shutdown", hz); } mrsas_flush_cache(sc); mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); mrsas_disable_intr(sc); if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) free(sc->streamDetectByLD[i], M_MRSAS); free(sc->streamDetectByLD, M_MRSAS); sc->streamDetectByLD = NULL; } mrsas_cam_detach(sc); mrsas_teardown_intr(sc); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); mtx_destroy(&sc->stream_lock); /* Wait for all the semaphores to be released */ while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) pause("mr_shutdown", hz); /* Destroy the counting semaphore created for Ioctl */ sema_destroy(&sc->ioctl_count_sema); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); return (0); } static int mrsas_shutdown(device_t dev) { struct mrsas_softc *sc; int i; sc = device_get_softc(dev); sc->remove_in_progress = 1; if (!KERNEL_PANICKED()) { if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); i = 0; while (sc->reset_in_progress && i < 15) { i++; if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for OCR to be finished " "from %s\n", i, __func__); } pause("mr_shutdown", hz); } if (sc->reset_in_progress) { mrsas_dprint(sc, MRSAS_INFO, "gave up waiting for OCR to be finished\n"); return (0); } } mrsas_flush_cache(sc); mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); mrsas_disable_intr(sc); return (0); } /* * mrsas_free_mem: Frees allocated memory * input: Adapter instance soft state * * This function is called from mrsas_detach() to free previously allocated * memory. */ void mrsas_free_mem(struct mrsas_softc *sc) { int i; u_int32_t max_fw_cmds; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; /* * Free RAID map memory */ for (i = 0; i < 2; i++) { if (sc->raidmap_phys_addr[i]) bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); if (sc->raidmap_mem[i] != NULL) bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); if (sc->raidmap_tag[i] != NULL) bus_dma_tag_destroy(sc->raidmap_tag[i]); if (sc->ld_drv_map[i] != NULL) free(sc->ld_drv_map[i], M_MRSAS); } for (i = 0; i < 2; i++) { if (sc->jbodmap_phys_addr[i]) bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); if (sc->jbodmap_mem[i] != NULL) bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); if (sc->jbodmap_tag[i] != NULL) bus_dma_tag_destroy(sc->jbodmap_tag[i]); } /* * Free version buffer memory */ if (sc->verbuf_phys_addr) bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); if (sc->verbuf_mem != NULL) bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); if (sc->verbuf_tag != NULL) bus_dma_tag_destroy(sc->verbuf_tag); /* * Free sense buffer memory */ if (sc->sense_phys_addr) bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); if (sc->sense_mem != NULL) bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); if (sc->sense_tag != NULL) bus_dma_tag_destroy(sc->sense_tag); /* * Free chain frame memory */ if (sc->chain_frame_phys_addr) bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); if (sc->chain_frame_mem != NULL) bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); if (sc->chain_frame_tag != NULL) bus_dma_tag_destroy(sc->chain_frame_tag); /* * Free IO Request memory */ if (sc->io_request_phys_addr) bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); if (sc->io_request_mem != NULL) bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); if (sc->io_request_tag != NULL) bus_dma_tag_destroy(sc->io_request_tag); /* * Free Reply Descriptor memory */ if (sc->reply_desc_phys_addr) bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); if (sc->reply_desc_mem != NULL) bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); if (sc->reply_desc_tag != NULL) bus_dma_tag_destroy(sc->reply_desc_tag); /* * Free event detail memory */ if (sc->evt_detail_phys_addr) bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); if (sc->evt_detail_mem != NULL) bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); if (sc->evt_detail_tag != NULL) bus_dma_tag_destroy(sc->evt_detail_tag); /* * Free PD info memory */ if (sc->pd_info_phys_addr) bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); if (sc->pd_info_mem != NULL) bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); if (sc->pd_info_tag != NULL) bus_dma_tag_destroy(sc->pd_info_tag); /* * Free MFI frames */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { mfi_cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, mfi_cmd); } } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); /* * Free MPT internal command list */ max_fw_cmds = sc->max_fw_cmds; if (sc->mpt_cmd_list) { for (i = 0; i < max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); free(sc->mpt_cmd_list[i], M_MRSAS); } free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; } /* * Free MFI internal command list */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { free(sc->mfi_cmd_list[i], M_MRSAS); } free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; } /* * Free request descriptor memory */ free(sc->req_desc, M_MRSAS); sc->req_desc = NULL; /* * Destroy parent tag */ if (sc->mrsas_parent_tag != NULL) bus_dma_tag_destroy(sc->mrsas_parent_tag); /* * Free ctrl_info memory */ if (sc->ctrl_info != NULL) free(sc->ctrl_info, M_MRSAS); } /* * mrsas_teardown_intr: Teardown interrupt * input: Adapter instance soft state * * This function is called from mrsas_detach() to teardown and release bus * interrupt resourse. */ void mrsas_teardown_intr(struct mrsas_softc *sc) { int i; if (!sc->msix_enable) { if (sc->intr_handle[0]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); if (sc->mrsas_irq[0] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[0], sc->mrsas_irq[0]); sc->intr_handle[0] = NULL; } else { for (i = 0; i < sc->msix_vectors; i++) { if (sc->intr_handle[i]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], sc->intr_handle[i]); if (sc->mrsas_irq[i] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[i], sc->mrsas_irq[i]); sc->intr_handle[i] = NULL; } pci_release_msi(sc->mrsas_dev); } } /* * mrsas_suspend: Suspend entry point * input: Device struct pointer * * This function is the entry point for system suspend from the OS. */ static int mrsas_suspend(device_t dev) { /* This will be filled when the driver will have hibernation support */ return (0); } /* * mrsas_resume: Resume entry point * input: Device struct pointer * * This function is the entry point for system resume from the OS. */ static int mrsas_resume(device_t dev) { /* This will be filled when the driver will have hibernation support */ return (0); } /** * mrsas_get_softc_instance: Find softc instance based on cmd type * * This function will return softc instance based on cmd type. * In some case, application fire ioctl on required management instance and * do not provide host_no. Use cdev->si_drv1 to get softc instance for those * case, else get the softc instance from host_no provided by application in * user data. */ static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) { struct mrsas_softc *sc = NULL; struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; if (cmd == MRSAS_IOC_GET_PCI_INFO) { sc = dev->si_drv1; } else { /* * get the Host number & the softc from data sent by the * Application */ sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; if (sc == NULL) printf("There is no Controller number %d\n", user_ioc->host_no); else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) mrsas_dprint(sc, MRSAS_FAULT, "Invalid Controller number %d\n", user_ioc->host_no); } return sc; } /* * mrsas_ioctl: IOCtl commands entry point. * * This function is the entry point for IOCtls from the OS. It calls the * appropriate function for processing depending on the command received. */ static int mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mrsas_softc *sc; int ret = 0, i = 0; MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; - sc = mrsas_get_softc_instance(dev, cmd, arg); + switch (cmd) { + case MFIIO_PASSTHRU: + sc = (struct mrsas_softc *)(dev->si_drv1); + break; + default: + sc = mrsas_get_softc_instance(dev, cmd, arg); + break; + } if (!sc) return ENOENT; if (sc->remove_in_progress || (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { mrsas_dprint(sc, MRSAS_INFO, "Either driver remove or shutdown called or " "HW is in unrecoverable critical error state.\n"); return ENOENT; } mtx_lock_spin(&sc->ioctl_lock); if (!sc->reset_in_progress) { mtx_unlock_spin(&sc->ioctl_lock); goto do_ioctl; } mtx_unlock_spin(&sc->ioctl_lock); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_ioctl", hz); } do_ioctl: switch (cmd) { case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: #ifdef COMPAT_FREEBSD32 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: #endif /* * Decrement the Ioctl counting Semaphore before getting an * mfi command */ sema_wait(&sc->ioctl_count_sema); ret = mrsas_passthru(sc, (void *)arg, cmd); /* Increment the Ioctl counting semaphore value */ sema_post(&sc->ioctl_count_sema); break; case MRSAS_IOC_SCAN_BUS: ret = mrsas_bus_scan(sc); break; case MRSAS_IOC_GET_PCI_INFO: pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," "pci device no: %d, pci function no: %d," "pci domain ID: %d\n", pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, pciDrvInfo->functionNumber, pciDrvInfo->domainID); ret = 0; break; + case MFIIO_PASSTHRU: + ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg); + break; + default: mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); ret = ENOENT; } return (ret); } /* * mrsas_poll: poll entry point for mrsas driver fd * * This function is the entry point for poll from the OS. It waits for some AEN * events to be triggered from the controller and notifies back. */ static int mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mrsas_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mrsas_aen_triggered) { revents |= poll_events & (POLLIN | POLLRDNORM); } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { mtx_lock(&sc->aen_lock); sc->mrsas_poll_waiting = 1; selrecord(td, &sc->mrsas_select); mtx_unlock(&sc->aen_lock); } } return revents; } /* * mrsas_setup_irq: Set up interrupt * input: Adapter instance soft state * * This function sets up interrupts as a bus resource, with flags indicating * resource permitting contemporaneous sharing and for resource to activate * atomically. */ static int mrsas_setup_irq(struct mrsas_softc *sc) { if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); else { device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); sc->irq_context[0].sc = sc; sc->irq_context[0].MSIxIndex = 0; sc->irq_id[0] = 0; sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); if (sc->mrsas_irq[0] == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate legcay" "interrupt\n"); return (FAIL); } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[0], &sc->intr_handle[0])) { device_printf(sc->mrsas_dev, "Cannot set up legacy" "interrupt\n"); return (FAIL); } } return (0); } /* * mrsas_isr: ISR entry point * input: argument pointer * * This function is the interrupt service routine entry point. There are two * types of interrupts, state change interrupt and response interrupt. If an * interrupt is not ours, we just return. */ void mrsas_isr(void *arg) { struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; struct mrsas_softc *sc = irq_context->sc; int status = 0; if (sc->mask_interrupts) return; if (!sc->msix_vectors) { status = mrsas_clear_intr(sc); if (!status) return; } /* If we are resetting, bail */ if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { printf(" Entered into ISR when OCR is going active. \n"); mrsas_clear_intr(sc); return; } /* Process for reply request and clear response interrupt */ if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) mrsas_clear_intr(sc); return; } /* * mrsas_complete_cmd: Process reply request * input: Adapter instance soft state * * This function is called from mrsas_isr() to process reply request and clear * response interrupt. Processing of the reply request entails walking * through the reply descriptor array for the command request pended from * Firmware. We look at the Function field to determine the command type and * perform the appropriate action. Before we return, we clear the response * interrupt. */ int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) { Mpi2ReplyDescriptorsUnion_t *desc; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; struct mrsas_mfi_cmd *cmd_mfi; u_int8_t reply_descript_type, *sense; u_int16_t smid, num_completed; u_int8_t status, extStatus; union desc_value desc_val; PLD_LOAD_BALANCE_INFO lbinfo; u_int32_t device_id, data_length; int threshold_reply_count = 0; #if TM_DEBUG MR_TASK_MANAGE_REQUEST *mr_tm_req; MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; #endif /* If we have a hardware error, not need to continue */ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return (DONE); desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) + sc->last_reply_idx[MSIxIndex]; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; num_completed = 0; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; /* Find our reply descriptor for the command and process */ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { smid = le16toh(reply_desc->SMID); cmd_mpt = sc->mpt_cmd_list[smid - 1]; scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; status = scsi_io_req->RaidContext.raid_context.status; extStatus = scsi_io_req->RaidContext.raid_context.exStatus; sense = cmd_mpt->sense; data_length = scsi_io_req->DataLength; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: #if TM_DEBUG mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_tm_req->TmRequest; device_printf(sc->mrsas_dev, "TM completion type 0x%X, " "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); #endif wakeup_one((void *)&sc->ocr_chan); break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; lbinfo = &sc->load_balance_info[device_id]; /* R1 load balancing for READ */ if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; } /* Fall thru and complete IO */ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, extStatus, le32toh(data_length), sense); mrsas_cmd_done(sc, cmd_mpt); mrsas_atomic_dec(&sc->fw_outstanding); } else { /* * If the peer Raid 1/10 fast path failed, * mark IO as failed to the scsi layer. * Overwrite the current status by the failed status * and make sure that if any command fails, * driver returns fail status to CAM. */ cmd_mpt->cmd_completed = 1; r1_cmd = cmd_mpt->peer_cmd; if (r1_cmd->cmd_completed) { if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { status = r1_cmd->io_request->RaidContext.raid_context.status; extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; data_length = r1_cmd->io_request->DataLength; sense = r1_cmd->sense; } r1_cmd->ccb_ptr = NULL; if (r1_cmd->callout_owner) { callout_stop(&r1_cmd->cm_callout); r1_cmd->callout_owner = false; } mrsas_release_mpt_cmd(r1_cmd); mrsas_atomic_dec(&sc->fw_outstanding); mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, extStatus, le32toh(data_length), sense); mrsas_cmd_done(sc, cmd_mpt); mrsas_atomic_dec(&sc->fw_outstanding); } } break; case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; /* * Make sure NOT TO release the mfi command from the called * function's context if it is fired with issue_polled call. * And also make sure that the issue_polled call should only be * used if INTERRUPT IS DISABLED. */ if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) mrsas_release_mfi_cmd(cmd_mfi); else mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); break; } sc->last_reply_idx[MSIxIndex]++; if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) sc->last_reply_idx[MSIxIndex] = 0; desc->Words = ~((uint64_t)0x00); /* set it back to all * 0xFFFFFFFFs */ num_completed++; threshold_reply_count++; /* Get the next reply descriptor */ if (!sc->last_reply_idx[MSIxIndex]) { desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); } else desc++; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; /* * Write to reply post index after completing threshold reply * count and still there are more replies in reply queue * pending to be completed. */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { if (sc->msix_enable) { if (sc->msix_combined) mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); threshold_reply_count = 0; } } /* No match, just return */ if (num_completed == 0) return (DONE); /* Clear response interrupt */ if (sc->msix_enable) { if (sc->msix_combined) { mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); return (0); } /* * mrsas_map_mpt_cmd_status: Allocate DMAable memory. * input: Adapter instance soft state * * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. * It checks the command status and maps the appropriate CAM status for the * CCB. */ void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) { struct mrsas_softc *sc = cmd->sc; u_int8_t *sense_data; switch (status) { case MFI_STAT_OK: ccb_ptr->ccb_h.status = CAM_REQ_CMP; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_SCSI_DONE_WITH_ERROR: ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; if (sense_data) { /* For now just copy 18 bytes back */ memcpy(sense_data, sense, 18); ccb_ptr->csio.sense_len = 18; ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: if (ccb_ptr->ccb_h.target_lun) ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; else ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; break; default: device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; ccb_ptr->csio.scsi_status = status; } return; } /* * mrsas_alloc_mem: Allocate DMAable memory * input: Adapter instance soft state * * This function creates the parent DMA tag and allocates DMAable memory. DMA * tag describes constraints of DMA mapping. Memory allocated is mapped into * Kernel virtual address. Callback argument is physical memory address. */ static int mrsas_alloc_mem(struct mrsas_softc *sc) { u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, evt_detail_size, count, pd_info_size; /* * Allocate parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(sc->mrsas_dev), /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mrsas_parent_tag /* tag */ )) { device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); return (ENOMEM); } /* * Allocate for version buffer */ verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, verbuf_size, 1, verbuf_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->verbuf_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); return (ENOMEM); } bzero(sc->verbuf_mem, verbuf_size); if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); return (ENOMEM); } /* * Allocate IO Request Frames */ io_req_size = sc->io_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, io_req_size, 1, io_req_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->io_request_tag)) { device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); return (ENOMEM); } bzero(sc->io_request_mem, io_req_size); if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, sc->io_request_mem, io_req_size, mrsas_addr_cb, &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (ENOMEM); } /* * Allocate Chain Frames */ chain_frame_size = sc->chain_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, chain_frame_size, 1, chain_frame_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->chain_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); return (ENOMEM); } bzero(sc->chain_frame_mem, chain_frame_size); if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); return (ENOMEM); } count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; /* * Allocate Reply Descriptor Array */ reply_desc_size = sc->reply_alloc_sz * count; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reply_desc_size, 1, reply_desc_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->reply_desc_tag)) { device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); return (ENOMEM); } /* * Allocate Sense Buffer Array. Keep in lower 4GB */ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; if (bus_dma_tag_create(sc->mrsas_parent_tag, 64, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sense_size, 1, sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, BUS_DMA_NOWAIT, &sc->sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); return (ENOMEM); } /* * Allocate for Event detail structure */ evt_detail_size = sizeof(struct mrsas_evt_detail); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, evt_detail_size, 1, evt_detail_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->evt_detail_tag)) { device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); return (ENOMEM); } bzero(sc->evt_detail_mem, evt_detail_size); if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); return (ENOMEM); } /* * Allocate for PD INFO structure */ pd_info_size = sizeof(struct mrsas_pd_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, pd_info_size, 1, pd_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->pd_info_tag)) { device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); return (ENOMEM); } bzero(sc->pd_info_mem, pd_info_size); if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, sc->pd_info_mem, pd_info_size, mrsas_addr_cb, &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); return (ENOMEM); } /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (280kB). */ if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxphys, sc->max_num_sge, /* nsegments */ maxphys, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->io_lock, &sc->data_tag)) { device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); return (ENOMEM); } return (0); } /* * mrsas_addr_cb: Callback function of bus_dmamap_load() * input: callback argument, machine dependent type * that describes DMA segments, number of segments, error code * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_setup_raidmap: Set up RAID map. * input: Adapter instance soft state * * Allocate DMA memory for the RAID maps and perform setup. */ static int mrsas_setup_raidmap(struct mrsas_softc *sc) { int i; for (i = 0; i < 2; i++) { sc->ld_drv_map[i] = (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); /* Do Error handling */ if (!sc->ld_drv_map[i]) { device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); if (i == 1) free(sc->ld_drv_map[0], M_MRSAS); /* ABORT driver initialization */ goto ABORT; } } for (int i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->max_map_sz, 1, sc->max_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->raidmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); return (ENOMEM); } bzero(sc->raidmap_mem[i], sc->max_map_sz); if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], sc->raidmap_mem[i], sc->max_map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); return (ENOMEM); } if (!sc->raidmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); return (ENOMEM); } } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); return (0); ABORT: return (1); } /** * megasas_setup_jbod_map - setup jbod map for FP seq_number. * @sc: Adapter soft state * * Return 0 on success. */ void megasas_setup_jbod_map(struct mrsas_softc *sc) { int i; uint32_t pd_seq_map_sz; pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { sc->use_seqnum_jbod_fp = 0; return; } if (sc->jbodmap_mem[0]) goto skip_alloc; for (i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, pd_seq_map_sz, 1, pd_seq_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->jbodmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate jbod map tag.\n"); return; } if (bus_dmamem_alloc(sc->jbodmap_tag[i], (void **)&sc->jbodmap_mem[i], BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate jbod map memory.\n"); return; } bzero(sc->jbodmap_mem[i], pd_seq_map_sz); if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], sc->jbodmap_mem[i], pd_seq_map_sz, mrsas_addr_cb, &sc->jbodmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); return; } if (!sc->jbodmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for jbod map.\n"); sc->use_seqnum_jbod_fp = 0; return; } } skip_alloc: if (!megasas_sync_pd_seq_num(sc, false) && !megasas_sync_pd_seq_num(sc, true)) sc->use_seqnum_jbod_fp = 1; else sc->use_seqnum_jbod_fp = 0; device_printf(sc->mrsas_dev, "Jbod map is supported\n"); } /* * mrsas_init_fw: Initialize Firmware * input: Adapter soft state * * Calls transition_to_ready() to make sure Firmware is in operational state and * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It * issues internal commands to get the controller info after the IOC_INIT * command response is received by Firmware. Note: code relating to * get_pdlist, get_ld_list and max_sectors are currently not being used, it * is left here as placeholder. */ static int mrsas_init_fw(struct mrsas_softc *sc) { int ret, loop, ocr = 0; u_int32_t max_sectors_1; u_int32_t max_sectors_2; u_int32_t tmp_sectors; u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; int msix_enable = 0; int fw_msix_count = 0; int i, j; /* Make sure Firmware is ready */ ret = mrsas_transition_to_ready(sc, ocr); if (ret != SUCCESS) { return (ret); } if (sc->is_ventura || sc->is_aero) { scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); #endif sc->maxRaidMapSize = ((scratch_pad_3 >> MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & MR_MAX_RAID_MAP_SIZE_MASK); } /* MSI-x index 0- reply post host index register */ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; /* Check if MSI-X is supported while in ready state */ msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; if (msix_enable) { scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); /* Check max MSI-X vectors */ if (sc->device_id == MRSAS_TBOLT) { sc->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = sc->msix_vectors; } else { /* Invader/Fury supports 96 MSI-X vectors */ sc->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; fw_msix_count = sc->msix_vectors; if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) sc->msix_combined = true; /* * Save 1-15 reply post index * address to local memory Index 0 * is already saved from reg offset * MPI2_REPLY_POST_HOST_INDEX_OFFSET */ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { sc->msix_reg_offset[loop] = MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10); } } /* Don't bother allocating more MSI-X vectors than cpus */ sc->msix_vectors = min(sc->msix_vectors, mp_ncpus); /* Allocate MSI-x vectors */ if (mrsas_allocate_msix(sc) == SUCCESS) sc->msix_enable = 1; else sc->msix_enable = 0; device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," "Online CPU %d Current MSIX <%d>\n", fw_msix_count, mp_ncpus, sc->msix_vectors); } /* * MSI-X host index 0 is common for all adapter. * It is used for all MPT based Adapters. */ if (sc->msix_combined) { sc->msix_reg_offset[0] = MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; } if (mrsas_init_adapter(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); return (1); } if (sc->is_ventura || sc->is_aero) { scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_4)); if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); } /* Allocate internal commands for pass-thru */ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); return (1); } sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); if (!sc->ctrl_info) { device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); return (1); } /* * Get the controller info from FW, so that the MAX VD support * availability can be decided. */ if (mrsas_get_ctrl_info(sc)) { device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); return (1); } sc->secure_jbod_support = (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; if (sc->secure_jbod_support) device_printf(sc->mrsas_dev, "FW supports SED \n"); if (sc->use_seqnum_jbod_fp) device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); if (sc->support_morethan256jbod) device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); if (mrsas_setup_raidmap(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " "There seems to be some problem in the controller\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } megasas_setup_jbod_map(sc); memset(sc->target_list, 0, MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) sc->target_list[i].target_id = 0xffff; /* For pass-thru, get PD/LD list and controller info */ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); if (mrsas_get_pd_list(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Get PD list failed.\n"); return (1); } memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); if (mrsas_get_ld_list(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); return (1); } if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); if (!sc->streamDetectByLD) { device_printf(sc->mrsas_dev, "unable to allocate stream detection for pool of LDs\n"); return (1); } for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); if (!sc->streamDetectByLD[i]) { device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); for (j = 0; j < i; ++j) free(sc->streamDetectByLD[j], M_MRSAS); free(sc->streamDetectByLD, M_MRSAS); sc->streamDetectByLD = NULL; return (1); } memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; } } /* * Compute the max allowed sectors per IO: The controller info has * two limits on max sectors. Driver should use the minimum of these * two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information to * calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * sc->ctrl_info->max_strips_per_io; max_sectors_2 = sc->ctrl_info->max_request_size; tmp_sectors = min(max_sectors_1, max_sectors_2); sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512; if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) sc->max_sectors_per_req = tmp_sectors; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; sc->UnevenSpanSupport = sc->ctrl_info->adapterOperations2.supportUnevenSpans; if (sc->UnevenSpanSupport) { device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", sc->UnevenSpanSupport); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 1; else sc->fast_path_io = 0; } device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n", sc->max_fw_cmds, sc->max_scsi_cmds); return (0); } /* * mrsas_init_adapter: Initializes the adapter/controller * input: Adapter soft state * * Prepares for the issuing of the IOC Init cmd to FW for initializing the * ROC/controller. The FW register is read to determined the number of * commands that is supported. All memory allocations for IO is based on * max_cmd. Appropriate calculations are performed in this function. */ int mrsas_init_adapter(struct mrsas_softc *sc) { uint32_t status; u_int32_t scratch_pad_2; int ret; int i = 0; /* Read FW status register */ status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; /* Decrement the max supported by 1, to correlate with FW */ sc->max_fw_cmds = sc->max_fw_cmds - 1; sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS; /* Determine allocation size of command frames */ sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x," "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x," "sc->io_frames_alloc_sz 0x%x\n", __func__, sc->reply_q_depth, sc->request_alloc_sz, sc->reply_alloc_sz, sc->io_frames_alloc_sz); /* * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, * Firmware support extended IO chain frame which is 4 time more * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K */ if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) sc->max_chain_frame_sz = ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) * MEGASAS_1MB_IO; else sc->max_chain_frame_sz = ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) * MEGASAS_256K_IO; sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; mrsas_dprint(sc, MRSAS_INFO, "max sge: 0x%x, max chain frame size: 0x%x, " "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n", sc->max_num_sge, sc->max_chain_frame_sz, sc->max_fw_cmds, sc->chain_frames_alloc_sz); /* Used for pass thru MFI frame (DCMD) */ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(MPI2_SGE_IO_UNION)) / 16; int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; ret = mrsas_alloc_mem(sc); if (ret != SUCCESS) return (ret); ret = mrsas_alloc_mpt_cmds(sc); if (ret != SUCCESS) return (ret); ret = mrsas_ioc_init(sc); if (ret != SUCCESS) return (ret); return (0); } /* * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) { int ioc_init_size; /* Allocate IOC INIT command */ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioc_init_size, 1, ioc_init_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ioc_init_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); return (ENOMEM); } bzero(sc->ioc_init_mem, ioc_init_size); if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); return (ENOMEM); } return (0); } /* * mrsas_free_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Deallocates memory of the IOC Init cmd. */ void mrsas_free_ioc_cmd(struct mrsas_softc *sc) { if (sc->ioc_init_phys_mem) bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); if (sc->ioc_init_mem != NULL) bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); if (sc->ioc_init_tag != NULL) bus_dma_tag_destroy(sc->ioc_init_tag); } /* * mrsas_ioc_init: Sends IOC Init command to FW * input: Adapter soft state * * Issues the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_ioc_init(struct mrsas_softc *sc) { struct mrsas_init_frame *init_frame; pMpi2IOCInitRequest_t IOCInitMsg; MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; bus_addr_t phys_addr; int i, retcode = 0; u_int32_t scratch_pad_2; /* Allocate memory for the IOC INIT command */ if (mrsas_alloc_ioc_cmd(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); return (1); } if (!sc->block_sync_cache) { scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); sc->fw_sync_cache_support = (scratch_pad_2 & MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; } IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMsg->MsgVersion = htole16(MPI2_VERSION); IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION); IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth); IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr); IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr); IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* driver support Extended MSIX */ if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { init_frame->driver_operations. mfi_capabilities.support_additional_msix = 1; } if (sc->verbuf_mem) { snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", MRSAS_VERSION); init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; init_frame->driver_ver_hi = 0; } init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg); phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr); init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t)); req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem); req_desc.MFAIo.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); mrsas_disable_intr(sc); mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (init_frame->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (init_frame->cmd_status == 0xFF) DELAY(1000); else break; } } if (init_frame->cmd_status == 0) mrsas_dprint(sc, MRSAS_OCR, "IOC INIT response received from FW.\n"); else { if (init_frame->cmd_status == 0xFF) device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); else device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); retcode = 1; } if (sc->is_aero) { scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); sc->atomic_desc_support = (scratch_pad_2 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", sc->atomic_desc_support ? "Yes" : "No"); } mrsas_free_ioc_cmd(sc); return (retcode); } /* * mrsas_alloc_mpt_cmds: Allocates the command packets * input: Adapter instance soft state * * This function allocates the internal commands for IOs. Each command that is * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An * array is allocated with mrsas_mpt_cmd context. The free commands are * maintained in a linked list (cmd pool). SMID value range is from 1 to * max_fw_cmds. */ int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_fw_cmds, count; struct mrsas_mpt_cmd *cmd; pMpi2ReplyDescriptorsUnion_t reply_desc; u_int32_t offset, chain_offset, sense_offset; bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; u_int8_t *io_req_base, *chain_frame_base, *sense_base; max_fw_cmds = sc->max_fw_cmds; sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); if (!sc->req_desc) { device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); return (ENOMEM); } memset(sc->req_desc, 0, sc->request_alloc_sz); /* * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); return (ENOMEM); } memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); for (i = 0; i < max_fw_cmds; i++) { sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mpt_cmd_list[j], M_MRSAS); free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; return (ENOMEM); } } io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; chain_frame_base = (u_int8_t *)sc->chain_frame_mem; chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; sense_base = (u_int8_t *)sc->sense_mem; sense_base_phys = (bus_addr_t)sc->sense_phys_addr; for (i = 0; i < max_fw_cmds; i++) { cmd = sc->mpt_cmd_list[i]; offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; chain_offset = sc->max_chain_frame_sz * i; sense_offset = MRSAS_SENSE_LEN * i; memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); cmd->index = i + 1; cmd->ccb_ptr = NULL; cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; cmd->sc = sc; cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; cmd->sense = sense_base + sense_offset; cmd->sense_phys_addr = sense_base_phys + sense_offset; if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { return (FAIL); } TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); } /* Initialize reply descriptor array to 0xFFFFFFFF */ reply_desc = sc->reply_desc_mem; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } return (0); } /* * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW * input: Adapter softstate * request descriptor address low * request descriptor address high */ void mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi) { mtx_lock(&sc->pci_lock); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), le32toh(req_desc_lo)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), le32toh(req_desc_hi)); mtx_unlock(&sc->pci_lock); } /* * mrsas_fire_cmd: Sends command to FW * input: Adapter softstate * request descriptor address low * request descriptor address high * * This functions fires the command to Firmware by writing to the * inbound_low_queue_port and inbound_high_queue_port. */ void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi) { if (sc->atomic_desc_support) mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), le32toh(req_desc_lo)); else mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); } /* * mrsas_transition_to_ready: Move FW to Ready state input: * Adapter instance soft state * * During the initialization, FW passes can potentially be in any one of several * possible states. If the FW in operational, waiting-for-handshake states, * driver must take steps to bring it to ready state. Otherwise, it has to * wait for the ready state. */ int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) { int i; u_int8_t max_wait; u_int32_t val, fw_state; u_int32_t cur_state __unused; u_int32_t abs_state, curr_abs_state; val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = val & MFI_STATE_MASK; max_wait = MRSAS_RESET_WAIT_TIME; if (fw_state != MFI_STATE_READY) device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); while (fw_state != MFI_STATE_READY) { abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); switch (fw_state) { case MFI_STATE_FAULT: device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); if (ocr) { cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* Set the CLR bit in inbound doorbell */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_HOTPLUG); cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 * secs */ mrsas_disable_intr(sc); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); for (i = 0; i < max_wait * 1000; i++) { if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) DELAY(1000); else break; } cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 * seconds */ cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: cur_state = MFI_STATE_FLUSH_CACHE; break; default: device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK); curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); if (abs_state == curr_abs_state) DELAY(1000); else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); return 0; } /* * mrsas_get_mfi_cmd: Get a cmd from free command pool * input: Adapter soft state * * This function removes an MFI command from the command list. */ struct mrsas_mfi_cmd * mrsas_get_mfi_cmd(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd = NULL; mtx_lock(&sc->mfi_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); } mtx_unlock(&sc->mfi_cmd_pool_lock); return cmd; } /* * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. * input: Adapter Context. * * This function will check FW status register and flag do_timeout_reset flag. * It will do OCR/Kill adapter if FW is in fault state or IO timed out has * trigger reset. */ static void mrsas_ocr_thread(void *arg) { struct mrsas_softc *sc; u_int32_t fw_status, fw_state; u_int8_t tm_target_reset_failed = 0; sc = (struct mrsas_softc *)arg; mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); sc->ocr_thread_active = 1; mtx_lock(&sc->sim_lock); for (;;) { /* Sleep for 1 second and check the queue status */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); if (sc->remove_in_progress || sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { mrsas_dprint(sc, MRSAS_OCR, "Exit due to %s from %s\n", sc->remove_in_progress ? "Shutdown" : "Hardware critical error", __func__); break; } fw_status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || mrsas_atomic_read(&sc->target_reset_outstanding)) { /* First, freeze further IOs to come to the SIM */ mrsas_xpt_freeze(sc); /* If this is an IO timeout then go for target reset */ if (mrsas_atomic_read(&sc->target_reset_outstanding)) { device_printf(sc->mrsas_dev, "Initiating Target RESET " "because of SCSI IO timeout!\n"); /* Let the remaining IOs to complete */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_targets", 5 * hz); /* Try to reset the target device */ if (mrsas_reset_targets(sc) == FAIL) tm_target_reset_failed = 1; } /* If this is a DCMD timeout or FW fault, * then go for controller reset */ if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { if (tm_target_reset_failed) device_printf(sc->mrsas_dev, "Initiaiting OCR because of " "TM FAILURE!\n"); else device_printf(sc->mrsas_dev, "Initiaiting OCR " "because of %s!\n", sc->do_timedout_reset ? "DCMD IO Timeout" : "FW fault"); mtx_lock_spin(&sc->ioctl_lock); sc->reset_in_progress = 1; mtx_unlock_spin(&sc->ioctl_lock); sc->reset_count++; /* * Wait for the AEN task to be completed if it is running. */ mtx_unlock(&sc->sim_lock); taskqueue_drain(sc->ev_tq, &sc->ev_task); mtx_lock(&sc->sim_lock); taskqueue_block(sc->ev_tq); /* Try to reset the controller */ mrsas_reset_ctrl(sc, sc->do_timedout_reset); sc->do_timedout_reset = 0; sc->reset_in_progress = 0; tm_target_reset_failed = 0; mrsas_atomic_set(&sc->target_reset_outstanding, 0); memset(sc->target_reset_pool, 0, sizeof(sc->target_reset_pool)); taskqueue_unblock(sc->ev_tq); } /* Now allow IOs to come to the SIM */ mrsas_xpt_release(sc); } } mtx_unlock(&sc->sim_lock); sc->ocr_thread_active = 0; mrsas_kproc_exit(0); } /* * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. * input: Adapter Context. * * This function will clear reply descriptor so that post OCR driver and FW will * lost old history. */ void mrsas_reset_reply_desc(struct mrsas_softc *sc) { int i, count; pMpi2ReplyDescriptorsUnion_t reply_desc; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; reply_desc = sc->reply_desc_mem; for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } } /* * mrsas_reset_ctrl: Core function to OCR/Kill adapter. * input: Adapter Context. * * This function will run from thread context so that it can sleep. 1. Do not * handle OCR if FW is in HW critical error. 2. Wait for outstanding command * to complete for 180 seconds. 3. If #2 does not find any outstanding * command Controller is in working state, so skip OCR. Otherwise, do * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post * OCR, Re-fire Management command and move Controller to Operation state. */ int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) { int retval = SUCCESS, i, j, retry = 0; u_int32_t host_diag, abs_state, status_reg, reset_adapter; union ccb *ccb; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; union mrsas_evt_class_locale class_locale; MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { device_printf(sc->mrsas_dev, "mrsas: Hardware critical error, returning FAIL.\n"); return FAIL; } mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; mrsas_disable_intr(sc); msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); /* First try waiting for commands to complete */ if (mrsas_wait_for_outstanding(sc, reset_reason)) { mrsas_dprint(sc, MRSAS_OCR, "resetting adapter from %s.\n", __func__); /* Now return commands back to the CAM layer */ mtx_unlock(&sc->sim_lock); for (i = 0; i < sc->max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; if (mpt_cmd->peer_cmd) { mrsas_dprint(sc, MRSAS_OCR, "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", i, mpt_cmd, mpt_cmd->peer_cmd); } if (mpt_cmd->ccb_ptr) { if (mpt_cmd->callout_owner) { ccb = (union ccb *)(mpt_cmd->ccb_ptr); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; mrsas_cmd_done(sc, mpt_cmd); } else { mpt_cmd->ccb_ptr = NULL; mrsas_release_mpt_cmd(mpt_cmd); } } } mrsas_atomic_set(&sc->fw_outstanding, 0); mtx_lock(&sc->sim_lock); status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (sc->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; goto out; } /* Now try to reset the chip */ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_FLUSH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_1ST_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_2ND_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_3RD_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_4TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_5TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_6TH_KEY_VALUE); /* Check that the diag write enable (DRWE) bit is on */ host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { DELAY(100 * 1000); host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 100) { mrsas_dprint(sc, MRSAS_OCR, "Host diag unlock failed!\n"); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) continue; /* Send chip reset command */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), host_diag | HOST_DIAG_RESET_ADAPTER); DELAY(3000 * 1000); /* Make sure reset adapter bit is cleared */ host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { DELAY(100 * 1000); host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 1000) { mrsas_dprint(sc, MRSAS_OCR, "Diag reset adapter never cleared!\n"); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) continue; abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { DELAY(100 * 1000); abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," " state = 0x%x\n", abs_state); continue; } /* Wait for FW to become ready */ if (mrsas_transition_to_ready(sc, 1)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas: Failed to transition controller to ready.\n"); continue; } mrsas_reset_reply_desc(sc); if (mrsas_ioc_init(sc)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); continue; } for (j = 0; j < sc->max_fw_cmds; j++) { mpt_cmd = sc->mpt_cmd_list[j]; if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; /* If not an IOCTL then release the command else re-fire */ if (!mfi_cmd->sync_cmd) { mrsas_release_mfi_cmd(mfi_cmd); } else { req_desc = mrsas_get_request_desc(sc, mfi_cmd->cmd_id.context.smid - 1); mrsas_dprint(sc, MRSAS_OCR, "Re-fire command DCMD opcode 0x%x index %d\n ", mfi_cmd->frame->dcmd.opcode, j); if (!req_desc) device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); else mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); } } } /* Reset load balance info */ memset(sc->load_balance_info, 0, sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); if (mrsas_get_ctrl_info(sc)) { mrsas_kill_hba(sc); retval = FAIL; goto out; } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); megasas_setup_jbod_map(sc); if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; } } mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; /* Register AEN with FW for last sequence number */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; mtx_unlock(&sc->sim_lock); if (mrsas_register_aen(sc, sc->last_seq_num, class_locale.word)) { device_printf(sc->mrsas_dev, "ERROR: AEN registration FAILED from OCR !!! " "Further events from the controller cannot be notified." "Either there is some problem in the controller" "or the controller does not support AEN.\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } mtx_lock(&sc->sim_lock); /* Adapter reset completed successfully */ device_printf(sc->mrsas_dev, "Reset successful\n"); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; } else { mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; } out: mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_dprint(sc, MRSAS_OCR, "Reset Exit with %d.\n", retval); return retval; } /* * mrsas_kill_hba: Kill HBA when OCR is not supported * input: Adapter Context. * * This function will kill HBA when OCR is not supported. */ void mrsas_kill_hba(struct mrsas_softc *sc) { sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; DELAY(1000 * 1000); mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_STOP_ADP); /* Flush */ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); mrsas_complete_outstanding_ioctls(sc); } /** * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba * input: Controller softc * * Returns void */ void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) { int i; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int32_t count, MSIxIndex; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->max_fw_cmds; i++) { cmd_mpt = sc->mpt_cmd_list[i]; if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_mptmfi_passthru(sc, cmd_mfi, cmd_mpt->io_request->RaidContext.raid_context.status); } } } } /* * mrsas_wait_for_outstanding: Wait for outstanding commands * input: Adapter Context. * * This function will wait for 180 seconds for outstanding commands to be * completed. */ int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) { int i, outstanding, retval = 0; u_int32_t fw_state, count, MSIxIndex; for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_OCR, "Driver remove or shutdown called.\n"); retval = 1; goto out; } /* Check if firmware is in fault state */ fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { mrsas_dprint(sc, MRSAS_OCR, "Found FW in FAULT state, will reset adapter.\n"); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; mtx_unlock(&sc->sim_lock); for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); mtx_lock(&sc->sim_lock); retval = 1; goto out; } if (check_reason == MFI_DCMD_TIMEOUT_OCR) { mrsas_dprint(sc, MRSAS_OCR, "DCMD IO TIMEOUT detected, will reset adapter.\n"); retval = 1; goto out; } outstanding = mrsas_atomic_read(&sc->fw_outstanding); if (!outstanding) goto out; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " "commands to complete\n", i, outstanding); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; mtx_unlock(&sc->sim_lock); for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); mtx_lock(&sc->sim_lock); } DELAY(1000 * 1000); } if (mrsas_atomic_read(&sc->fw_outstanding)) { mrsas_dprint(sc, MRSAS_OCR, " pending commands remain after waiting," " will reset adapter.\n"); retval = 1; } out: return retval; } /* * mrsas_release_mfi_cmd: Return a cmd to free command pool * input: Command packet for return to free cmd pool * * This function returns the MFI & MPT command to the command list. */ void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) { struct mrsas_softc *sc = cmd_mfi->sc; struct mrsas_mpt_cmd *cmd_mpt; mtx_lock(&sc->mfi_cmd_pool_lock); /* * Release the mpt command (if at all it is allocated * associated with the mfi command */ if (cmd_mfi->cmd_id.context.smid) { mtx_lock(&sc->mpt_cmd_pool_lock); /* Get the mpt cmd from mfi cmd frame's smid value */ cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; cmd_mpt->flags = 0; cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); mtx_unlock(&sc->mpt_cmd_pool_lock); } /* Release the mfi command */ cmd_mfi->ccb_ptr = NULL; cmd_mfi->cmd_id.frame_count = 0; TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); mtx_unlock(&sc->mfi_cmd_pool_lock); return; } /* * mrsas_get_controller_info: Returns FW's controller structure * input: Adapter soft state * Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. This * information is mainly used to find out the maximum IO transfer per command * supported by the FW. */ static int mrsas_get_ctrl_info(struct mrsas_softc *sc) { int retcode = 0; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info)); dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; else { memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties); le32_to_cpus(&sc->ctrl_info->adapterOperations2); le32_to_cpus(&sc->ctrl_info->adapterOperations3); le16_to_cpus(&sc->ctrl_info->adapterOperations4); } do_ocr = 0; mrsas_update_ext_vd_details(sc); sc->use_seqnum_jbod_fp = sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; sc->support_morethan256jbod = sc->ctrl_info->adapterOperations4.supportPdMapTargetId; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; dcmd_timeout: mrsas_free_ctlr_info_cmd(sc); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_update_ext_vd_details : Update details w.r.t Extended VD * input: * sc - Controller's softc */ static void mrsas_update_ext_vd_details(struct mrsas_softc *sc) { u_int32_t ventura_map_sz = 0; sc->max256vdSupport = sc->ctrl_info->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (sc->ctrl_info->max_lds > 64) sc->max256vdSupport = 1; sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; if (sc->max256vdSupport) { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } if (sc->maxRaidMapSize) { ventura_map_sz = sc->maxRaidMapSize * MR_MIN_MAP_SIZE; sc->current_map_sz = ventura_map_sz; sc->max_map_sz = ventura_map_sz; } else { sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); if (sc->max256vdSupport) sc->current_map_sz = sc->new_map_sz; else sc->current_map_sz = sc->old_map_sz; } sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", sc->maxRaidMapSize); device_printf(sc->mrsas_dev, "new_map_sz = 0x%x, old_map_sz = 0x%x, " "ventura_map_sz = 0x%x, current_map_sz = 0x%x " "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", sc->new_map_sz, sc->old_map_sz, ventura_map_sz, sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); #endif } /* * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command * input: Adapter soft state * * Allocates DMAable memory for the controller info internal command. */ int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) { int ctlr_info_size; /* Allocate get controller info command */ ctlr_info_size = sizeof(struct mrsas_ctrl_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ctlr_info_size, 1, ctlr_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ctlr_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); return (ENOMEM); } memset(sc->ctlr_info_mem, 0, ctlr_info_size); return (0); } /* * mrsas_free_ctlr_info_cmd: Free memory for controller info command * input: Adapter soft state * * Deallocates memory of the get controller info cmd. */ void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) { if (sc->ctlr_info_phys_addr) bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); if (sc->ctlr_info_mem != NULL) bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); if (sc->ctlr_info_tag != NULL) bus_dma_tag_destroy(sc->ctlr_info_tag); } /* * mrsas_issue_polled: Issues a polling command * inputs: Adapter soft state * Command packet to be issued * * This function is for posting of internal commands to Firmware. MFI requires * the cmd_status to be set to 0xFF before posting. The maximun wait time of * the poll response timer is 180 seconds. */ int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { struct mrsas_header *frame_hdr = &cmd->frame->hdr; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; int i, retcode = SUCCESS; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* Issue the frame using inbound queue port */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (frame_hdr->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (frame_hdr->cmd_status == 0xFF) DELAY(1000); else break; } } if (frame_hdr->cmd_status == 0xFF) { device_printf(sc->mrsas_dev, "DCMD timed out after %d " "seconds from %s\n", max_wait, __func__); device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", cmd->frame->dcmd.opcode); retcode = ETIMEDOUT; } return (retcode); } /* * mrsas_issue_dcmd: Issues a MFI Pass thru cmd * input: Adapter soft state mfi cmd pointer * * This function is called by mrsas_issued_blocked_cmd() and * mrsas_issued_polled(), to build the MPT command and then fire the command * to Firmware. */ int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = mrsas_build_mpt_cmd(sc, cmd); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); return (1); } mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return (0); } /* * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd * input: Adapter soft state mfi cmd to build * * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru * command and prepares the MPT command to send to Firmware. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; u_int16_t index; if (mrsas_build_mptmfi_passthru(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); return NULL; } index = cmd->cmd_id.context.smid; req_desc = mrsas_get_request_desc(sc, index - 1); if (!req_desc) return NULL; req_desc->addr.Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = htole16(index); return (req_desc); } /* * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command * input: Adapter soft state mfi cmd pointer * * The MPT command and the io_request are setup as a passthru command. The SGE * chain address is set to frame_phys_addr of the MFI command. */ u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) { MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; struct mrsas_mpt_cmd *mpt_cmd; struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; mpt_cmd = mrsas_get_mpt_cmd(sc); if (!mpt_cmd) return (1); /* Save the smid. To be used for returning the cmd */ mfi_cmd->cmd_id.context.smid = mpt_cmd->index; mpt_cmd->sync_cmd_idx = mfi_cmd->index; /* * For cmds where the flag is set, store the flag and check on * completion. For cmds with this flag, don't call * mrsas_complete_cmd. */ if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; io_req = mpt_cmd->io_request; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = sc->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr); mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz); return (0); } /* * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds * input: Adapter soft state Command to be issued * * This function waits on an event for the command to be returned from the ISR. * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing * internal and ioctl commands. */ int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; unsigned long total_time = 0; int retcode = SUCCESS; /* Initialize cmd_status */ cmd->cmd_status = 0xFF; /* Build MPT-MFI command for issue to FW */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL * command */ total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait); retcode = 1; break; } } } sc->chan = NULL; if (cmd->cmd_status == 0xFF) { device_printf(sc->mrsas_dev, "DCMD timed out after %d " "seconds from %s\n", max_wait, __func__); device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", cmd->frame->dcmd.opcode); retcode = ETIMEDOUT; } return (retcode); } /* * mrsas_complete_mptmfi_passthru: Completes a command * input: @sc: Adapter soft state * @cmd: Command to be completed * @status: cmd completion status * * This function is called from mrsas_complete_cmd() after an interrupt is * received from Firmware, and io_request->Function is * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. */ void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status) { struct mrsas_header *hdr = &cmd->frame->hdr; u_int8_t cmd_status = cmd->frame->hdr.cmd_status; /* Reset the retry counter for future re-tries */ cmd->retry_for_fw_reset = 0; if (cmd->ccb_ptr) cmd->ccb_ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; mrsas_wakeup(sc, cmd); break; } case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: /* Check for LD map update */ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { sc->fast_path_io = 0; mtx_lock(&sc->raidmap_lock); sc->map_update_cmd = NULL; if (cmd_status != 0) { if (cmd_status != MFI_STAT_NOT_FOUND) device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); else { mrsas_release_mfi_cmd(cmd); mtx_unlock(&sc->raidmap_lock); break; } } else sc->map_id++; mrsas_release_mfi_cmd(cmd); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 0; else sc->fast_path_io = 1; mrsas_sync_map_info(sc); mtx_unlock(&sc->raidmap_lock); break; } if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { sc->mrsas_aen_triggered = 0; } /* FW has an updated PD sequence */ if ((cmd->frame->dcmd.opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[0] == 1)) { mtx_lock(&sc->raidmap_lock); sc->jbod_seq_cmd = NULL; mrsas_release_mfi_cmd(cmd); if (cmd_status == MFI_STAT_OK) { sc->pd_seq_map_id++; /* Re-register a pd sync seq num cmd */ if (megasas_sync_pd_seq_num(sc, true)) sc->use_seqnum_jbod_fp = 0; } else { sc->use_seqnum_jbod_fp = 0; device_printf(sc->mrsas_dev, "Jbod map sync failed, status=%x\n", cmd_status); } mtx_unlock(&sc->raidmap_lock); break; } /* See if got an event notification */ if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT) mrsas_complete_aen(sc, cmd); else mrsas_wakeup(sc, cmd); break; case MFI_CMD_ABORT: /* Command issued to abort another cmd return */ mrsas_complete_abort(sc, cmd); break; default: device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /* * mrsas_wakeup: Completes an internal command * input: Adapter soft state * Command to be completed * * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait * timer is started. This function is called from * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up * from the command wait. */ void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == 0xFF) cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); return; } /* * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: * Adapter soft state Shutdown/Hibernate * * This function issues a DCMD internal command to Firmware to initiate shutdown * of the controller. */ static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); return; } if (sc->aen_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (sc->map_update_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); if (sc->jbod_seq_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } /* * mrsas_flush_cache: Requests FW to flush all its caches input: * Adapter soft state * * This function is issues a DCMD internal command to Firmware to initiate * flushing of all caches. */ static void mrsas_flush_cache(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); return; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) { int retcode = 0; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t pd_seq_map_sz; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; bus_addr_t pd_seq_h; pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; if (!pd_sync) { device_printf(sc->mrsas_dev, "Failed to alloc mem for jbod map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(pd_sync, 0, pd_seq_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(pd_seq_map_sz); dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz); if (pend) { dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); sc->jbod_seq_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return 1; } else return 0; } else dcmd->flags = htole16(MFI_FRAME_DIR_READ); retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) { device_printf(sc->mrsas_dev, "driver supports max %d JBOD, but FW reports %d\n", MAX_PHYSICAL_DEVICES, pd_sync->count); retcode = -EINVAL; } if (!retcode) sc->pd_seq_map_id++; do_ocr = 0; dcmd_timeout: if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; return (retcode); } /* * mrsas_get_map_info: Load and validate RAID map input: * Adapter instance soft state * * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load * and validate RAID map. It returns 0 if successful, 1 other- wise. */ static int mrsas_get_map_info(struct mrsas_softc *sc) { uint8_t retcode = 0; sc->fast_path_io = 0; if (!mrsas_get_ld_map_info(sc)) { retcode = MR_ValidateMapInfo(sc); if (retcode == 0) { sc->fast_path_io = 1; return 0; } } return 1; } /* * mrsas_get_ld_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_get_ld_map_info(struct mrsas_softc *sc) { int retcode = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; void *map; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; if (!map) { device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(map, 0, sizeof(sc->max_map_sz)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sc->current_map_sz); dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; return (retcode); } /* * mrsas_sync_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_sync_map_info(struct mrsas_softc *sc) { int retcode = 0, i; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t num_lds; MR_LD_TARGET_SYNC *target_map = NULL; MR_DRV_RAID_MAP_ALL *map; MR_LD_RAID *raid; MR_LD_TARGET_SYNC *ld_sync; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); return ENOMEM; } map = sc->ld_drv_map[sc->map_id & 1]; num_lds = map->raidMap.ldCount; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; memset(target_map, 0, sc->max_map_sz); map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; ld_sync = (MR_LD_TARGET_SYNC *) target_map; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sc->current_map_sz); dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); sc->map_update_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return (1); } return (retcode); } /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO * dcmd.mbox.s[0] - deviceId for this physical drive * dcmd.sge IN - ptr to returned MR_PD_INFO structure * Desc: Firmware return the physical drive info structure * */ static void mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) { int retcode; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get PD info cmd\n"); return; } dcmd = &cmd->frame->dcmd; memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.s[0] = htole16(device_id); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info)); dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO); dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; sc->target_list[device_id].interface_type = le16toh(sc->pd_info_mem->state.ddf.pdType.intf); do_ocr = 0; dcmd_timeout: if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); } /* * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. * sc: Adapter's soft state * target_id: Unique target id per controller(managed by driver) * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS * return: void * Descripton: This function will be called whenever system PD or VD is created. */ static void mrsas_add_target(struct mrsas_softc *sc, u_int16_t target_id) { sc->target_list[target_id].target_id = target_id; device_printf(sc->mrsas_dev, "%s created target ID: 0x%x\n", (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); /* * If interrupts are enabled, then only fire DCMD to get pd_info * for system PDs */ if (!sc->mask_interrupts && sc->pd_info_mem && (target_id < MRSAS_MAX_PD)) mrsas_get_pd_info(sc, target_id); } /* * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. * sc: Adapter's soft state * target_id: Unique target id per controller(managed by driver) * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS * return: void * Descripton: This function will be called whenever system PD or VD is deleted */ static void mrsas_remove_target(struct mrsas_softc *sc, u_int16_t target_id) { sc->target_list[target_id].target_id = 0xffff; device_printf(sc->mrsas_dev, "%s deleted target ID: 0x%x\n", (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); } /* * mrsas_get_pd_list: Returns FW's PD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about system * supported by Firmware. */ static int mrsas_get_pd_list(struct mrsas_softc *sc) { int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_PD_LIST *pd_list_mem; struct MR_PD_ADDRESS *pd_addr; bus_addr_t pd_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; u_int16_t dev_id; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); mrsas_release_mfi_cmd(cmd); mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); return (ENOMEM); } else { pd_list_mem = tcmd->tmp_dcmd_mem; pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY); dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF); dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; /* Get the instance PD list */ pd_count = MRSAS_MAX_PD; pd_addr = pd_list_mem->addr; if (le32toh(pd_list_mem->count) < pd_count) { memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) { dev_id = le16toh(pd_addr->deviceId); sc->local_pd_list[dev_id].tid = dev_id; sc->local_pd_list[dev_id].driveType = le16toh(pd_addr->scsiDevType); sc->local_pd_list[dev_id].driveState = MR_PD_STATE_SYSTEM; if (sc->target_list[dev_id].target_id == 0xffff) mrsas_add_target(sc, dev_id); pd_addr++; } for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { if ((sc->local_pd_list[pd_index].driveState != MR_PD_STATE_SYSTEM) && (sc->target_list[pd_index].target_id != 0xffff)) { mrsas_remove_target(sc, pd_index); } } /* * Use mutext/spinlock if pd_list component size increase more than * 32 bit. */ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); do_ocr = 0; } dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_get_ld_list: Returns FW's LD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about supported by * the FW. */ static int mrsas_get_ld_list(struct mrsas_softc *sc) { int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_LD_LIST *ld_list_mem; bus_addr_t ld_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); ld_list_size = sizeof(struct MR_LD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); mrsas_release_mfi_cmd(cmd); mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); return (ENOMEM); } else { ld_list_mem = tcmd->tmp_dcmd_mem; ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (sc->max256vdSupport) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST)); dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST); dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr); dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; #if VD_EXT_DEBUG printf("Number of LDs %d\n", ld_list_mem->ldCount); #endif /* Get the instance LD list */ if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) { sc->CurLdCount = le32toh(ld_list_mem->ldCount); memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) { ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; drv_tgt_id = ids + MRSAS_MAX_PD; if (ld_list_mem->ldList[ld_index].state != 0) { sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; if (sc->target_list[drv_tgt_id].target_id == 0xffff) mrsas_add_target(sc, drv_tgt_id); } else { if (sc->target_list[drv_tgt_id].target_id != 0xffff) mrsas_remove_target(sc, drv_tgt_id); } } do_ocr = 0; } dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: * Adapter soft state Temp command Size of allocation * * Allocates DMAable memory for a temporary internal command. The allocated * memory is initialized to all zeros upon successful loading of the dma * mapped memory. */ int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_ALLOCNOW, NULL, NULL, &tcmd->tmp_dcmd_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); return (ENOMEM); } memset(tcmd->tmp_dcmd_mem, 0, size); return (0); } /* * mrsas_free_tmp_dcmd: Free memory for temporary command input: * temporary dcmd pointer * * Deallocates memory of the temporary command for use in the construction of * the internal DCMD. */ void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) { if (tmp->tmp_dcmd_phys_addr) bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_mem != NULL) bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_tag != NULL) bus_dma_tag_destroy(tmp->tmp_dcmd_tag); } /* * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: * Adapter soft state Previously issued cmd to be aborted * * This function is used to abort previously issued commands, such as AEN and * RAID map sync map commands. The abort command is sent as a DCMD internal * command and subsequently the driver will wait for a return status. The * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. */ static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort) { struct mrsas_mfi_cmd *cmd; struct mrsas_abort_frame *abort_fr; u_int8_t retcode = 0; unsigned long total_time = 0; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); return (1); } abort_fr = &cmd->frame->abort; /* Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); return (1); } /* Wait for this cmd to complete */ sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); retcode = 1; break; } } cmd->sync_cmd = 0; mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_complete_abort: Completes aborting a command input: * Adapter soft state Cmd that was issued to abort another cmd * * The mrsas_issue_blocked_abort_cmd() function waits for the command status to * change after sending the command. This function is called from * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. */ void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); } return; } /* * mrsas_aen_handler: AEN processing callback function from thread context * input: Adapter soft state * * Asynchronous event handler */ void mrsas_aen_handler(struct mrsas_softc *sc) { union mrsas_evt_class_locale class_locale; int doscan = 0; u_int32_t seq_num; int error, fail_aen = 0; if (sc == NULL) { printf("invalid instance!\n"); return; } if (sc->remove_in_progress || sc->reset_in_progress) { device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", __func__, __LINE__); return; } if (sc->evt_detail_mem) { switch (sc->evt_detail_mem->code) { case MR_EVT_PD_INSERTED: fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_1); else goto skip_register_aen; break; case MR_EVT_PD_REMOVED: fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_1); else goto skip_register_aen; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: mrsas_bus_scan_sim(sc, sc->sim_0); break; case MR_EVT_LD_CREATED: fail_aen = mrsas_get_ld_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_0); else goto skip_register_aen; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; case MR_EVT_CTRL_PROP_CHANGED: fail_aen = mrsas_get_ctrl_info(sc); if (fail_aen) goto skip_register_aen; break; default: break; } } else { device_printf(sc->mrsas_dev, "invalid evt_detail\n"); return; } if (doscan) { fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) { mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); mrsas_bus_scan_sim(sc, sc->sim_1); } else goto skip_register_aen; fail_aen = mrsas_get_ld_list(sc); if (!fail_aen) { mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); mrsas_bus_scan_sim(sc, sc->sim_0); } else goto skip_register_aen; } seq_num = sc->evt_detail_mem->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (sc->aen_cmd != NULL) return; mtx_lock(&sc->aen_lock); error = mrsas_register_aen(sc, seq_num, class_locale.word); mtx_unlock(&sc->aen_lock); if (error) device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); skip_register_aen: return; } /* * mrsas_complete_aen: Completes AEN command * input: Adapter soft state * Cmd that was issued to abort another cmd * * This function will be called from ISR and will continue event processing from * thread context by enqueuing task in ev_tq (callback function * "mrsas_aen_handler"). */ void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { /* * Don't signal app if it is just an aborted previously registered * aen */ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { sc->mrsas_aen_triggered = 1; mtx_lock(&sc->aen_lock); if (sc->mrsas_poll_waiting) { sc->mrsas_poll_waiting = 0; selwakeup(&sc->mrsas_select); } mtx_unlock(&sc->aen_lock); } else cmd->abort_aen = 0; sc->aen_cmd = NULL; mrsas_release_mfi_cmd(cmd); taskqueue_enqueue(sc->ev_tq, &sc->ev_task); return; } static device_method_t mrsas_methods[] = { DEVMETHOD(device_probe, mrsas_probe), DEVMETHOD(device_attach, mrsas_attach), DEVMETHOD(device_detach, mrsas_detach), DEVMETHOD(device_shutdown, mrsas_shutdown), DEVMETHOD(device_suspend, mrsas_suspend), DEVMETHOD(device_resume, mrsas_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), {0, 0} }; static driver_t mrsas_driver = { "mrsas", mrsas_methods, sizeof(struct mrsas_softc) }; DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0); MODULE_DEPEND(mrsas, cam, 1, 1, 1); diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h index 76894bbdbc4e..fa9a5cf118e6 100644 --- a/sys/dev/mrsas/mrsas.h +++ b/sys/dev/mrsas/mrsas.h @@ -1,3651 +1,3654 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Authors: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Authors: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #ifndef MRSAS_H #define MRSAS_H #include /* defines used in kernel.h */ #include #include #include #include #include /* types used in module initialization */ #include /* cdevsw struct */ #include /* uio struct */ #include #include /* structs, prototypes for pci bus * stuff */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For pci_get macros! */ #include #define IOCTL_SEMA_DESCRIPTION "mrsas semaphore for MFI pool" /* * Device IDs and PCI */ #define MRSAS_TBOLT 0x005b #define MRSAS_INVADER 0x005d #define MRSAS_FURY 0x005f #define MRSAS_INTRUDER 0x00ce #define MRSAS_INTRUDER_24 0x00cf #define MRSAS_CUTLASS_52 0x0052 #define MRSAS_CUTLASS_53 0x0053 /* Gen3.5 Conroller */ #define MRSAS_VENTURA 0x0014 #define MRSAS_CRUSADER 0x0015 #define MRSAS_HARPOON 0x0016 #define MRSAS_TOMCAT 0x0017 #define MRSAS_VENTURA_4PORT 0x001B #define MRSAS_CRUSADER_4PORT 0x001C #define MRSAS_AERO_10E0 0x10E0 #define MRSAS_AERO_10E1 0x10E1 #define MRSAS_AERO_10E2 0x10E2 #define MRSAS_AERO_10E3 0x10E3 #define MRSAS_AERO_10E4 0x10E4 #define MRSAS_AERO_10E5 0x10E5 #define MRSAS_AERO_10E6 0x10E6 #define MRSAS_AERO_10E7 0x10E7 /* * Firmware State Defines */ #define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF #define MRSAS_FWSTATE_SGE_MASK 0x00FF0000 #define MRSAS_FW_STATE_CHNG_INTERRUPT 1 /* * Message Frame Defines */ #define MRSAS_SENSE_LEN 96 #define MRSAS_FUSION_MAX_RESET_TRIES 3 /* * Miscellaneous Defines */ #define BYTE_ALIGNMENT 1 #define MRSAS_MAX_NAME_LENGTH 32 #define MRSAS_VERSION "07.709.04.00-fbsd" #define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF #define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */ #define DONE 0 #define MRSAS_PAGE_SIZE 4096 #define MRSAS_RESET_NOTICE_INTERVAL 5 #define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */ #define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */ #define THRESHOLD_REPLY_COUNT 50 #define MAX_MSIX_COUNT 128 #define MAX_STREAMS_TRACKED 8 #define MR_STREAM_BITMAP 0x76543210 #define BITS_PER_INDEX_STREAM 4 /* number of bits per index in U32 TrackStream */ #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1) #define ZERO_LAST_STREAM 0x0fffffff /* * Boolean types */ enum err { SUCCESS, FAIL }; MALLOC_DECLARE(M_MRSAS); SYSCTL_DECL(_hw_mrsas); #define MRSAS_INFO (1 << 0) #define MRSAS_TRACE (1 << 1) #define MRSAS_FAULT (1 << 2) #define MRSAS_OCR (1 << 3) #define MRSAS_TOUT MRSAS_OCR #define MRSAS_AEN (1 << 4) #define MRSAS_PRL11 (1 << 5) #define mrsas_dprint(sc, level, msg, args...) \ do { \ if (sc->mrsas_debug & level) \ device_printf(sc->mrsas_dev, msg, ##args); \ } while (0) #define le32_to_cpus(x) do { *((u_int32_t *)(x)) = le32toh((*(u_int32_t *)x)); } while (0) #define le16_to_cpus(x) do { *((u_int16_t *)(x)) = le16toh((*(u_int16_t *)x)); } while (0) /**************************************************************************** * Raid Context structure which describes MegaRAID specific IO Paramenters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames ****************************************************************************/ typedef struct _RAID_CONTEXT { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t Type:4; u_int8_t nseg:4; #else u_int8_t nseg:4; u_int8_t Type:4; #endif u_int8_t resvd0; u_int16_t timeoutValue; u_int8_t regLockFlags; u_int8_t resvd1; u_int16_t VirtualDiskTgtId; u_int64_t regLockRowLBA; u_int32_t regLockLength; u_int16_t nextLMId; u_int8_t exStatus; u_int8_t status; u_int8_t RAIDFlags; u_int8_t numSGE; u_int16_t configSeqNum; u_int8_t spanArm; u_int8_t priority; /* 0x1D MR_PRIORITY_RANGE */ u_int8_t numSGEExt; /* 0x1E 1M IO support */ u_int8_t resvd2; /* 0x1F */ } RAID_CONTEXT; /* * Raid Context structure which describes ventura MegaRAID specific IO Paramenters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames */ typedef struct _RAID_CONTEXT_G35 { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t Type:4; u_int16_t nseg:4; u_int16_t resvd0:8; #else u_int16_t resvd0:8; u_int16_t nseg:4; u_int16_t Type:4; #endif u_int16_t timeoutValue; union { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t reserved:1; u_int16_t sld:1; u_int16_t c2f:1; u_int16_t fwn:1; u_int16_t sqn:1; u_int16_t sbs:1; u_int16_t rw:1; u_int16_t log:1; u_int16_t cpuSel:4; u_int16_t setDivert:4; #else u_int16_t setDivert:4; u_int16_t cpuSel:4; u_int16_t log:1; u_int16_t rw:1; u_int16_t sbs:1; u_int16_t sqn:1; u_int16_t fwn:1; u_int16_t c2f:1; u_int16_t sld:1; u_int16_t reserved:1; #endif } bits; u_int16_t s; } routingFlags; u_int16_t VirtualDiskTgtId; u_int64_t regLockRowLBA; u_int32_t regLockLength; union { u_int16_t nextLMId; u_int16_t peerSMID; } smid; u_int8_t exStatus; u_int8_t status; u_int8_t RAIDFlags; u_int8_t spanArm; u_int16_t configSeqNum; #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t numSGE:12; u_int16_t reserved:3; u_int16_t streamDetected:1; #else u_int16_t streamDetected:1; u_int16_t reserved:3; u_int16_t numSGE:12; #endif u_int8_t resvd2[2]; } RAID_CONTEXT_G35; typedef union _RAID_CONTEXT_UNION { RAID_CONTEXT raid_context; RAID_CONTEXT_G35 raid_context_g35; } RAID_CONTEXT_UNION, *PRAID_CONTEXT_UNION; /************************************************************************* * MPI2 Defines ************************************************************************/ #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ #define MPI2_WHOINIT_HOST_DRIVER (0x04) #define MPI2_VERSION_MAJOR (0x02) #define MPI2_VERSION_MINOR (0x00) #define MPI2_VERSION_MAJOR_MASK (0xFF00) #define MPI2_VERSION_MAJOR_SHIFT (8) #define MPI2_VERSION_MINOR_MASK (0x00FF) #define MPI2_VERSION_MINOR_SHIFT (0) #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ MPI2_VERSION_MINOR) #define MPI2_HEADER_VERSION_UNIT (0x10) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) #define MPI2_HEADER_VERSION_DEV_SHIFT (0) #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV) #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) #define MPI2_SCSIIO_CONTROL_READ (0x02000000) #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) #ifndef MPI2_POINTER #define MPI2_POINTER * #endif /*************************************** * MPI2 Structures ***************************************/ typedef struct _MPI25_IEEE_SGE_CHAIN64 { u_int64_t Address; u_int32_t Length; u_int16_t Reserved1; u_int8_t NextChainOffset; u_int8_t Flags; } MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; typedef struct _MPI2_SGE_SIMPLE_UNION { u_int32_t FlagsLength; union { u_int32_t Address32; u_int64_t Address64; } u; } MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; typedef struct { u_int8_t CDB[20]; /* 0x00 */ u_int32_t PrimaryReferenceTag; /* 0x14 */ u_int16_t PrimaryApplicationTag;/* 0x18 */ u_int16_t PrimaryApplicationTagMask; /* 0x1A */ u_int32_t TransferLength; /* 0x1C */ } MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; typedef struct _MPI2_SGE_CHAIN_UNION { u_int16_t Length; u_int8_t NextChainOffset; u_int8_t Flags; union { u_int32_t Address32; u_int64_t Address64; } u; } MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; typedef struct _MPI2_IEEE_SGE_SIMPLE32 { u_int32_t Address; u_int32_t FlagsLength; } MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; typedef struct _MPI2_IEEE_SGE_SIMPLE64 { u_int64_t Address; u_int32_t Length; u_int16_t Reserved1; u_int8_t Reserved2; u_int8_t Flags; } MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { MPI2_IEEE_SGE_SIMPLE32 Simple32; MPI2_IEEE_SGE_SIMPLE64 Simple64; } MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; typedef union _MPI2_IEEE_SGE_CHAIN_UNION { MPI2_IEEE_SGE_CHAIN32 Chain32; MPI2_IEEE_SGE_CHAIN64 Chain64; } MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; typedef union _MPI2_SGE_IO_UNION { MPI2_SGE_SIMPLE_UNION MpiSimple; MPI2_SGE_CHAIN_UNION MpiChain; MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; } MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; typedef union { u_int8_t CDB32[32]; MPI2_SCSI_IO_CDB_EEDP32 EEDP32; MPI2_SGE_SIMPLE_UNION SGE; } MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; /**************************************************************************** * * SCSI Task Management messages * ****************************************************************************/ /*SCSI Task Management Request Message */ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { u_int16_t DevHandle; /*0x00 */ u_int8_t ChainOffset; /*0x02 */ u_int8_t Function; /*0x03 */ u_int8_t Reserved1; /*0x04 */ u_int8_t TaskType; /*0x05 */ u_int8_t Reserved2; /*0x06 */ u_int8_t MsgFlags; /*0x07 */ u_int8_t VP_ID; /*0x08 */ u_int8_t VF_ID; /*0x09 */ u_int16_t Reserved3; /*0x0A */ u_int8_t LUN[8]; /*0x0C */ u_int32_t Reserved4[7]; /*0x14 */ u_int16_t TaskMID; /*0x30 */ u_int16_t Reserved5; /*0x32 */ } MPI2_SCSI_TASK_MANAGE_REQUEST; /*SCSI Task Management Reply Message */ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { u_int16_t DevHandle; /*0x00 */ u_int8_t MsgLength; /*0x02 */ u_int8_t Function; /*0x03 */ u_int8_t ResponseCode; /*0x04 */ u_int8_t TaskType; /*0x05 */ u_int8_t Reserved1; /*0x06 */ u_int8_t MsgFlags; /*0x07 */ u_int8_t VP_ID; /*0x08 */ u_int8_t VF_ID; /*0x09 */ u_int16_t Reserved2; /*0x0A */ u_int16_t Reserved3; /*0x0C */ u_int16_t IOCStatus; /*0x0E */ u_int32_t IOCLogInfo; /*0x10 */ u_int32_t TerminationCount; /*0x14 */ u_int32_t ResponseInfo; /*0x18 */ } MPI2_SCSI_TASK_MANAGE_REPLY; typedef struct _MR_TM_REQUEST { char request[128]; } MR_TM_REQUEST; typedef struct _MR_TM_REPLY { char reply[128]; } MR_TM_REPLY; /* SCSI Task Management Request Message */ typedef struct _MR_TASK_MANAGE_REQUEST { /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */ MR_TM_REQUEST TmRequest; union { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t isTMForLD:1; u_int32_t isTMForPD:1; u_int32_t reserved1:30; #else u_int32_t reserved1:30; u_int32_t isTMForPD:1; u_int32_t isTMForLD:1; #endif u_int32_t reserved2; } tmReqFlags; MR_TM_REPLY TMReply; } uTmReqReply; } MR_TASK_MANAGE_REQUEST; /* TaskType values */ #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) /* ResponseCode values */ #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) /* * RAID SCSI IO Request Message Total SGE count will be one less than * _MPI2_SCSI_IO_REQUEST */ typedef struct _MPI2_RAID_SCSI_IO_REQUEST { u_int16_t DevHandle; /* 0x00 */ u_int8_t ChainOffset; /* 0x02 */ u_int8_t Function; /* 0x03 */ u_int16_t Reserved1; /* 0x04 */ u_int8_t Reserved2; /* 0x06 */ u_int8_t MsgFlags; /* 0x07 */ u_int8_t VP_ID; /* 0x08 */ u_int8_t VF_ID; /* 0x09 */ u_int16_t Reserved3; /* 0x0A */ u_int32_t SenseBufferLowAddress;/* 0x0C */ u_int16_t SGLFlags; /* 0x10 */ u_int8_t SenseBufferLength; /* 0x12 */ u_int8_t Reserved4; /* 0x13 */ u_int8_t SGLOffset0; /* 0x14 */ u_int8_t SGLOffset1; /* 0x15 */ u_int8_t SGLOffset2; /* 0x16 */ u_int8_t SGLOffset3; /* 0x17 */ u_int32_t SkipCount; /* 0x18 */ u_int32_t DataLength; /* 0x1C */ u_int32_t BidirectionalDataLength; /* 0x20 */ u_int16_t IoFlags; /* 0x24 */ u_int16_t EEDPFlags; /* 0x26 */ u_int32_t EEDPBlockSize; /* 0x28 */ u_int32_t SecondaryReferenceTag;/* 0x2C */ u_int16_t SecondaryApplicationTag; /* 0x30 */ u_int16_t ApplicationTagTranslationMask; /* 0x32 */ u_int8_t LUN[8]; /* 0x34 */ u_int32_t Control; /* 0x3C */ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ RAID_CONTEXT_UNION RaidContext; /* 0x60 */ MPI2_SGE_IO_UNION SGL; /* 0x80 */ } MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST, MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t; /* * MPT RAID MFA IO Descriptor. */ typedef struct _MRSAS_RAID_MFA_IO_DESCRIPTOR { u_int32_t RequestFlags:8; u_int32_t MessageAddress1:24; /* bits 31:8 */ u_int32_t MessageAddress2; /* bits 61:32 */ } MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR, *PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR; /* Default Request Descriptor */ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t DescriptorTypeDependent; /* 0x06 */ } MPI2_DEFAULT_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t; /* High Priority Request Descriptor */ typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t Reserved1; /* 0x06 */ } MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, Mpi2HighPriorityRequestDescriptor_t, MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; /* SCSI IO Request Descriptor */ typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t DevHandle; /* 0x06 */ } MPI2_SCSI_IO_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; /* SCSI Target Request Descriptor */ typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, Mpi2SCSITargetRequestDescriptor_t, MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; /* RAID Accelerator Request Descriptor */ typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t Reserved; /* 0x06 */ } MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, Mpi2RAIDAcceleratorRequestDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; /* union of Request Descriptors */ typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION { MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; union { struct { u_int32_t low; u_int32_t high; } u; u_int64_t Words; } addr; } MRSAS_REQUEST_DESCRIPTOR_UNION; /* Default Reply Descriptor */ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t DescriptorTypeDependent1; /* 0x02 */ u_int32_t DescriptorTypeDependent2; /* 0x04 */ } MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; /* Address Reply Descriptor */ typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int32_t ReplyFrameAddress; /* 0x04 */ } MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; /* SCSI IO Success Reply Descriptor */ typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t TaskTag; /* 0x04 */ u_int16_t Reserved1; /* 0x06 */ } MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, Mpi2SCSIIOSuccessReplyDescriptor_t, MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; /* TargetAssist Success Reply Descriptor */ typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int8_t SequenceNumber; /* 0x04 */ u_int8_t Reserved1; /* 0x05 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, Mpi2TargetAssistSuccessReplyDescriptor_t, MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; /* Target Command Buffer Reply Descriptor */ typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int8_t VP_ID; /* 0x02 */ u_int8_t Flags; /* 0x03 */ u_int16_t InitiatorDevHandle; /* 0x04 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, Mpi2TargetCommandBufferReplyDescriptor_t, MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; /* RAID Accelerator Success Reply Descriptor */ typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int32_t Reserved; /* 0x04 */ } MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; /* union of Reply Descriptors */ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { MPI2_DEFAULT_REPLY_DESCRIPTOR Default; MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; u_int64_t Words; } MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; typedef union { volatile unsigned int val; unsigned int val_rdonly; } mrsas_atomic_t; #define mrsas_atomic_read(v) atomic_load_acq_int(&(v)->val) #define mrsas_atomic_set(v,i) atomic_store_rel_int(&(v)->val, i) #define mrsas_atomic_dec(v) atomic_subtract_int(&(v)->val, 1) #define mrsas_atomic_inc(v) atomic_add_int(&(v)->val, 1) static inline int mrsas_atomic_inc_return(mrsas_atomic_t *v) { return 1 + atomic_fetchadd_int(&(v)->val, 1); } /* IOCInit Request message */ typedef struct _MPI2_IOC_INIT_REQUEST { u_int8_t WhoInit; /* 0x00 */ u_int8_t Reserved1; /* 0x01 */ u_int8_t ChainOffset; /* 0x02 */ u_int8_t Function; /* 0x03 */ u_int16_t Reserved2; /* 0x04 */ u_int8_t Reserved3; /* 0x06 */ u_int8_t MsgFlags; /* 0x07 */ u_int8_t VP_ID; /* 0x08 */ u_int8_t VF_ID; /* 0x09 */ u_int16_t Reserved4; /* 0x0A */ u_int16_t MsgVersion; /* 0x0C */ u_int16_t HeaderVersion; /* 0x0E */ u_int32_t Reserved5; /* 0x10 */ u_int16_t Reserved6; /* 0x14 */ u_int8_t HostPageSize; /* 0x16 */ u_int8_t HostMSIxVectors; /* 0x17 */ u_int16_t Reserved8; /* 0x18 */ u_int16_t SystemRequestFrameSize; /* 0x1A */ u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */ u_int16_t ReplyFreeQueueDepth; /* 0x1E */ u_int32_t SenseBufferAddressHigh; /* 0x20 */ u_int32_t SystemReplyAddressHigh; /* 0x24 */ u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */ u_int64_t ReplyDescriptorPostQueueAddress; /* 0x30 */ u_int64_t ReplyFreeQueueAddress;/* 0x38 */ u_int64_t TimeStamp; /* 0x40 */ } MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; /* * MR private defines */ #define MR_PD_INVALID 0xFFFF #define MR_DEVHANDLE_INVALID 0xFFFF #define MAX_SPAN_DEPTH 8 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) #define MAX_ROW_SIZE 32 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 #define MAX_LOGICAL_DRIVES_EXT 256 #define MAX_LOGICAL_DRIVES_DYN 512 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) #define MAX_ARRAYS 128 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) #define MAX_ARRAYS_EXT 256 #define MAX_API_ARRAYS_EXT MAX_ARRAYS_EXT #define MAX_API_ARRAYS_DYN 512 #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 #define MR_DCMD_PD_MFI_TASK_MGMT 0x0200e100 #define MR_DCMD_PD_GET_INFO 0x02020000 #define MRSAS_MAX_PD_CHANNELS 1 #define MRSAS_MAX_LD_CHANNELS 1 #define MRSAS_MAX_DEV_PER_CHANNEL 256 #define MRSAS_DEFAULT_INIT_ID -1 #define MRSAS_MAX_LUN 8 #define MRSAS_DEFAULT_CMD_PER_LUN 256 #define MRSAS_MAX_PD (MRSAS_MAX_PD_CHANNELS * \ MRSAS_MAX_DEV_PER_CHANNEL) #define MRSAS_MAX_LD_IDS (MRSAS_MAX_LD_CHANNELS * \ MRSAS_MAX_DEV_PER_CHANNEL) #define VD_EXT_DEBUG 0 #define TM_DEBUG 1 /******************************************************************* * RAID map related structures ********************************************************************/ #pragma pack(1) typedef struct _MR_DEV_HANDLE_INFO { u_int16_t curDevHdl; u_int8_t validHandles; u_int8_t interfaceType; u_int16_t devHandle[2]; } MR_DEV_HANDLE_INFO; #pragma pack() typedef struct _MR_ARRAY_INFO { u_int16_t pd[MAX_RAIDMAP_ROW_SIZE]; } MR_ARRAY_INFO; typedef struct _MR_QUAD_ELEMENT { u_int64_t logStart; u_int64_t logEnd; u_int64_t offsetInSpan; u_int32_t diff; u_int32_t reserved1; } MR_QUAD_ELEMENT; typedef struct _MR_SPAN_INFO { u_int32_t noElements; u_int32_t reserved1; MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; } MR_SPAN_INFO; typedef struct _MR_LD_SPAN_ { u_int64_t startBlk; u_int64_t numBlks; u_int16_t arrayRef; u_int8_t spanRowSize; u_int8_t spanRowDataSize; u_int8_t reserved[4]; } MR_LD_SPAN; typedef struct _MR_SPAN_BLOCK_INFO { u_int64_t num_rows; MR_LD_SPAN span; MR_SPAN_INFO block_span_info; } MR_SPAN_BLOCK_INFO; typedef struct _MR_LD_RAID { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t fpCapable:1; u_int32_t raCapable:1; u_int32_t reserved5:2; u_int32_t ldPiMode:4; u_int32_t pdPiMode:4; u_int32_t encryptionType:8; u_int32_t fpWriteCapable:1; u_int32_t fpReadCapable:1; u_int32_t fpWriteAcrossStripe:1; u_int32_t fpReadAcrossStripe:1; u_int32_t fpNonRWCapable:1; u_int32_t tmCapable:1; u_int32_t fpCacheBypassCapable:1; u_int32_t reserved4:5; #else u_int32_t reserved4:5; u_int32_t fpCacheBypassCapable:1; u_int32_t tmCapable:1; u_int32_t fpNonRWCapable:1; u_int32_t fpReadAcrossStripe:1; u_int32_t fpWriteAcrossStripe:1; u_int32_t fpReadCapable:1; u_int32_t fpWriteCapable:1; u_int32_t encryptionType:8; u_int32_t pdPiMode:4; u_int32_t ldPiMode:4; u_int32_t reserved5:2; u_int32_t raCapable:1; u_int32_t fpCapable:1; #endif } capability; u_int32_t reserved6; u_int64_t size; u_int8_t spanDepth; u_int8_t level; u_int8_t stripeShift; u_int8_t rowSize; u_int8_t rowDataSize; u_int8_t writeMode; u_int8_t PRL; u_int8_t SRL; u_int16_t targetId; u_int8_t ldState; u_int8_t regTypeReqOnWrite; u_int8_t modFactor; u_int8_t regTypeReqOnRead; u_int16_t seqNum; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t reserved:30; u_int32_t regTypeReqOnReadLsValid:1; u_int32_t ldSyncRequired:1; #else u_int32_t ldSyncRequired:1; u_int32_t regTypeReqOnReadLsValid:1; u_int32_t reserved:30; #endif } flags; u_int8_t LUN[8]; u_int8_t fpIoTimeoutForLd; u_int8_t reserved2[3]; u_int32_t logicalBlockLength; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t reserved1:24; u_int32_t LdLogicalBlockExp:4; u_int32_t LdPiExp:4; #else u_int32_t LdPiExp:4; u_int32_t LdLogicalBlockExp:4; u_int32_t reserved1:24; #endif } exponent; u_int8_t reserved3[0x80 - 0x38]; } MR_LD_RAID; typedef struct _MR_LD_SPAN_MAP { MR_LD_RAID ldRaid; u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE]; MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; } MR_LD_SPAN_MAP; typedef struct _MR_FW_RAID_MAP { u_int32_t totalSize; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } raid_desc; u_int32_t ldCount; u_int32_t Reserved1; /* * This doesn't correspond to FW Ld Tgt Id to LD, but will purge. For * example: if tgt Id is 4 and FW LD is 2, and there is only one LD, * FW will populate the array like this. [0xFF, 0xFF, 0xFF, 0xFF, * 0x0,.....]. This is to help reduce the entire strcture size if * there are few LDs or driver is looking info for 1 LD only. */ u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS]; u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; MR_LD_SPAN_MAP ldSpanMap[1]; } MR_FW_RAID_MAP; typedef struct _MR_FW_RAID_MAP_EXT { /* Not used in new map */ u_int32_t reserved; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } fw_raid_desc; u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; u_int16_t ldCount; u_int16_t arCount; u_int16_t spanCount; u_int16_t reserve3; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT]; } MR_FW_RAID_MAP_EXT; typedef struct _MR_DRV_RAID_MAP { /* * Total size of this structure, including this field. This field * will be manupulated by driver for ext raid map, else pick the * value from firmware raid map. */ u_int32_t totalSize; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } drv_raid_desc; /* timeout value used by driver in FP IOs */ u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; u_int16_t ldCount; u_int16_t arCount; u_int16_t spanCount; u_int16_t reserve3; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN]; u_int16_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN]; MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN]; MR_LD_SPAN_MAP ldSpanMap[1]; } MR_DRV_RAID_MAP; /* * Driver raid map size is same as raid map ext MR_DRV_RAID_MAP_ALL is * created to sync with old raid. And it is mainly for code re-use purpose. */ #pragma pack(1) typedef struct _MR_DRV_RAID_MAP_ALL { MR_DRV_RAID_MAP raidMap; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1]; } MR_DRV_RAID_MAP_ALL; #pragma pack() typedef struct _LD_LOAD_BALANCE_INFO { u_int8_t loadBalanceFlag; u_int8_t reserved1; mrsas_atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES]; u_int64_t last_accessed_block[MAX_PHYSICAL_DEVICES]; } LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO; /* SPAN_SET is info caclulated from span info from Raid map per ld */ typedef struct _LD_SPAN_SET { u_int64_t log_start_lba; u_int64_t log_end_lba; u_int64_t span_row_start; u_int64_t span_row_end; u_int64_t data_strip_start; u_int64_t data_strip_end; u_int64_t data_row_start; u_int64_t data_row_end; u_int8_t strip_offset[MAX_SPAN_DEPTH]; u_int32_t span_row_data_width; u_int32_t diff; u_int32_t reserved[2]; } LD_SPAN_SET, *PLD_SPAN_SET; typedef struct LOG_BLOCK_SPAN_INFO { LD_SPAN_SET span_set[MAX_SPAN_DEPTH]; } LD_SPAN_INFO, *PLD_SPAN_INFO; #pragma pack(1) typedef struct _MR_FW_RAID_MAP_ALL { MR_FW_RAID_MAP raidMap; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; } MR_FW_RAID_MAP_ALL; #pragma pack() struct IO_REQUEST_INFO { u_int64_t ldStartBlock; u_int32_t numBlocks; u_int16_t ldTgtId; u_int8_t isRead; u_int16_t devHandle; u_int8_t pdInterface; u_int64_t pdBlock; u_int8_t fpOkForIo; u_int8_t IoforUnevenSpan; u_int8_t start_span; u_int8_t reserved; u_int64_t start_row; /* span[7:5], arm[4:0] */ u_int8_t span_arm; u_int8_t pd_after_lb; boolean_t raCapable; u_int16_t r1_alt_dev_handle; }; /* * define MR_PD_CFG_SEQ structure for system PDs */ struct MR_PD_CFG_SEQ { u_int16_t seqNum; u_int16_t devHandle; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t tmCapable:1; u_int8_t reserved:7; #else u_int8_t reserved:7; u_int8_t tmCapable:1; #endif } capability; u_int8_t reserved; u_int16_t pdTargetId; } __packed; struct MR_PD_CFG_SEQ_NUM_SYNC { u_int32_t size; u_int32_t count; struct MR_PD_CFG_SEQ seq[1]; } __packed; typedef struct _STREAM_DETECT { u_int64_t nextSeqLBA; struct megasas_cmd_fusion *first_cmd_fusion; struct megasas_cmd_fusion *last_cmd_fusion; u_int32_t countCmdsInStream; u_int16_t numSGEsInGroup; u_int8_t isRead; u_int8_t groupDepth; boolean_t groupFlush; u_int8_t reserved[7]; } STREAM_DETECT, *PTR_STREAM_DETECT; typedef struct _LD_STREAM_DETECT { boolean_t writeBack; boolean_t FPWriteEnabled; boolean_t membersSSDs; boolean_t fpCacheBypassCapable; u_int32_t mruBitMap; volatile long iosToFware; volatile long writeBytesOutstanding; STREAM_DETECT streamTrack[MAX_STREAMS_TRACKED]; } LD_STREAM_DETECT, *PTR_LD_STREAM_DETECT; typedef struct _MR_LD_TARGET_SYNC { u_int8_t targetId; u_int8_t reserved; u_int16_t seqNum; } MR_LD_TARGET_SYNC; /* * RAID Map descriptor Types. * Each element should uniquely idetify one data structure in the RAID map */ typedef enum _MR_RAID_MAP_DESC_TYPE { RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0, /* MR_DEV_HANDLE_INFO data */ RAID_MAP_DESC_TYPE_TGTID_INFO = 1, /* target to Ld num Index map */ RAID_MAP_DESC_TYPE_ARRAY_INFO = 2, /* MR_ARRAY_INFO data */ RAID_MAP_DESC_TYPE_SPAN_INFO = 3, /* MR_LD_SPAN_MAP data */ RAID_MAP_DESC_TYPE_COUNT, } MR_RAID_MAP_DESC_TYPE; /* * This table defines the offset, size and num elements of each descriptor * type in the RAID Map buffer */ typedef struct _MR_RAID_MAP_DESC_TABLE { /* Raid map descriptor type */ u_int32_t raidMapDescType; /* Offset into the RAID map buffer where descriptor data is saved */ u_int32_t raidMapDescOffset; /* total size of the descriptor buffer */ u_int32_t raidMapDescBufferSize; /* Number of elements contained in the descriptor buffer */ u_int32_t raidMapDescElements; } MR_RAID_MAP_DESC_TABLE; /* * Dynamic Raid Map Structure. */ typedef struct _MR_FW_RAID_MAP_DYNAMIC { u_int32_t raidMapSize; u_int32_t descTableOffset; u_int32_t descTableSize; u_int32_t descTableNumElements; u_int64_t PCIThresholdBandwidth; u_int32_t reserved2[3]; u_int8_t fpPdIoTimeoutSec; u_int8_t reserved3[3]; u_int32_t rmwFPSeqNum; u_int16_t ldCount; u_int16_t arCount; u_int16_t spanCount; u_int16_t reserved4[3]; /* * The below structure of pointers is only to be used by the driver. * This is added in the API to reduce the amount of code changes needed in * the driver to support dynamic RAID map. * Firmware should not update these pointers while preparing the raid map */ union { struct { MR_DEV_HANDLE_INFO *devHndlInfo; u_int16_t *ldTgtIdToLd; MR_ARRAY_INFO *arMapInfo; MR_LD_SPAN_MAP *ldSpanMap; } ptrStruct; u_int64_t ptrStructureSize[RAID_MAP_DESC_TYPE_COUNT]; } RaidMapDescPtrs; /* * RAID Map descriptor table defines the layout of data in the RAID Map. * The size of the descriptor table itself could change. */ /* Variable Size descriptor Table. */ MR_RAID_MAP_DESC_TABLE raidMapDescTable[RAID_MAP_DESC_TYPE_COUNT]; /* Variable Size buffer containing all data */ u_int32_t raidMapDescData[1]; } MR_FW_RAID_MAP_DYNAMIC; #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) #define IEEE_SGE_FLAGS_END_OF_LIST (0x40) /* Few NVME flags defines*/ #define MPI2_SGE_FLAGS_SHIFT (0x02) #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0) #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00) #define IEEE_SGE_FLAGS_FORMAT_PQI (0x01) #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02) #define IEEE_SGE_FLAGS_FORMAT_AHCI (0x03) #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) #define MPI26_IEEE_SGE_FLAGS_NSF_PQI (0x04) #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) #define MPI26_IEEE_SGE_FLAGS_NSF_AHCI_PRDT (0x0C) #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) union desc_value { u_int64_t word; struct { u_int32_t low; u_int32_t high; } u; }; /******************************************************************* * Temporary command ********************************************************************/ struct mrsas_tmp_dcmd { bus_dma_tag_t tmp_dcmd_tag; bus_dmamap_t tmp_dcmd_dmamap; void *tmp_dcmd_mem; bus_addr_t tmp_dcmd_phys_addr; }; #define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16 #define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF #define MR_MIN_MAP_SIZE 0x10000 /******************************************************************* * Register set, included legacy controllers 1068 and 1078, * structure extended for 1078 registers *******************************************************************/ #pragma pack(1) typedef struct _mrsas_register_set { u_int32_t doorbell; /* 0000h */ u_int32_t fusion_seq_offset; /* 0004h */ u_int32_t fusion_host_diag; /* 0008h */ u_int32_t reserved_01; /* 000Ch */ u_int32_t inbound_msg_0; /* 0010h */ u_int32_t inbound_msg_1; /* 0014h */ u_int32_t outbound_msg_0; /* 0018h */ u_int32_t outbound_msg_1; /* 001Ch */ u_int32_t inbound_doorbell; /* 0020h */ u_int32_t inbound_intr_status; /* 0024h */ u_int32_t inbound_intr_mask; /* 0028h */ u_int32_t outbound_doorbell; /* 002Ch */ u_int32_t outbound_intr_status; /* 0030h */ u_int32_t outbound_intr_mask; /* 0034h */ u_int32_t reserved_1[2]; /* 0038h */ u_int32_t inbound_queue_port; /* 0040h */ u_int32_t outbound_queue_port; /* 0044h */ u_int32_t reserved_2[9]; /* 0048h */ u_int32_t reply_post_host_index;/* 006Ch */ u_int32_t reserved_2_2[12]; /* 0070h */ u_int32_t outbound_doorbell_clear; /* 00A0h */ u_int32_t reserved_3[3]; /* 00A4h */ u_int32_t outbound_scratch_pad; /* 00B0h */ u_int32_t outbound_scratch_pad_2; /* 00B4h */ u_int32_t outbound_scratch_pad_3; /* 00B8h */ u_int32_t outbound_scratch_pad_4; /* 00BCh */ u_int32_t inbound_low_queue_port; /* 00C0h */ u_int32_t inbound_high_queue_port; /* 00C4h */ u_int32_t inbound_single_queue_port; /* 00C8h */ u_int32_t res_6[11]; /* CCh */ u_int32_t host_diag; u_int32_t seq_offset; u_int32_t index_registers[807]; /* 00CCh */ } mrsas_reg_set; #pragma pack() /******************************************************************* * Firmware Interface Defines ******************************************************************* * MFI stands for MegaRAID SAS FW Interface. This is just a moniker * for protocol between the software and firmware. Commands are * issued using "message frames". ******************************************************************/ /* * FW posts its state in upper 4 bits of outbound_msg_0 register */ #define MFI_STATE_MASK 0xF0000000 #define MFI_STATE_UNDEFINED 0x00000000 #define MFI_STATE_BB_INIT 0x10000000 #define MFI_STATE_FW_INIT 0x40000000 #define MFI_STATE_WAIT_HANDSHAKE 0x60000000 #define MFI_STATE_FW_INIT_2 0x70000000 #define MFI_STATE_DEVICE_SCAN 0x80000000 #define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000 #define MFI_STATE_FLUSH_CACHE 0xA0000000 #define MFI_STATE_READY 0xB0000000 #define MFI_STATE_OPERATIONAL 0xC0000000 #define MFI_STATE_FAULT 0xF0000000 #define MFI_RESET_REQUIRED 0x00000001 #define MFI_RESET_ADAPTER 0x00000002 #define MEGAMFI_FRAME_SIZE 64 #define MRSAS_MFI_FRAME_SIZE 1024 #define MRSAS_MFI_SENSE_SIZE 128 /* * During FW init, clear pending cmds & reset state using inbound_msg_0 * * ABORT : Abort all pending cmds READY : Move from OPERATIONAL to * READY state; discard queue info MFIMODE : Discard (possible) low MFA * posted in 64-bit mode (??) CLR_HANDSHAKE: FW is waiting for HANDSHAKE from * BIOS or Driver HOTPLUG : Resume from Hotplug MFI_STOP_ADP : Send * signal to FW to stop processing */ #define WRITE_SEQUENCE_OFFSET (0x0000000FC) #define HOST_DIAGNOSTIC_OFFSET (0x000000F8) #define DIAG_WRITE_ENABLE (0x00000080) #define DIAG_RESET_ADAPTER (0x00000004) #define MFI_ADP_RESET 0x00000040 #define MFI_INIT_ABORT 0x00000001 #define MFI_INIT_READY 0x00000002 #define MFI_INIT_MFIMODE 0x00000004 #define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 #define MFI_INIT_HOTPLUG 0x00000010 #define MFI_STOP_ADP 0x00000020 #define MFI_RESET_FLAGS MFI_INIT_READY| \ MFI_INIT_MFIMODE| \ MFI_INIT_ABORT /* * MFI frame flags */ #define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 #define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 #define MFI_FRAME_SGL32 0x0000 #define MFI_FRAME_SGL64 0x0002 #define MFI_FRAME_SENSE32 0x0000 #define MFI_FRAME_SENSE64 0x0004 #define MFI_FRAME_DIR_NONE 0x0000 #define MFI_FRAME_DIR_WRITE 0x0008 #define MFI_FRAME_DIR_READ 0x0010 #define MFI_FRAME_DIR_BOTH 0x0018 #define MFI_FRAME_IEEE 0x0020 /* * Definition for cmd_status */ #define MFI_CMD_STATUS_POLL_MODE 0xFF /* * MFI command opcodes */ #define MFI_CMD_INIT 0x00 #define MFI_CMD_LD_READ 0x01 #define MFI_CMD_LD_WRITE 0x02 #define MFI_CMD_LD_SCSI_IO 0x03 #define MFI_CMD_PD_SCSI_IO 0x04 #define MFI_CMD_DCMD 0x05 #define MFI_CMD_ABORT 0x06 #define MFI_CMD_SMP 0x07 #define MFI_CMD_STP 0x08 #define MFI_CMD_INVALID 0xff #define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_LD_GET_LIST 0x03010000 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 #define MR_FLUSH_CTRL_CACHE 0x01 #define MR_FLUSH_DISK_CACHE 0x02 #define MR_DCMD_CTRL_SHUTDOWN 0x01050000 #define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000 #define MR_ENABLE_DRIVE_SPINDOWN 0x01 #define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 #define MR_DCMD_CTRL_EVENT_GET 0x01040300 #define MR_DCMD_CTRL_EVENT_WAIT 0x01040500 #define MR_DCMD_LD_GET_PROPERTIES 0x03030000 #define MR_DCMD_CLUSTER 0x08000000 #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 #define MR_DCMD_PD_LIST_QUERY 0x02010100 #define MR_DCMD_CTRL_MISC_CPX 0x0100e200 #define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201 #define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202 #define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203 #define MAX_MR_ROW_SIZE 32 #define MR_CPX_DIR_WRITE 1 #define MR_CPX_DIR_READ 0 #define MR_CPX_VERSION 1 #define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200 #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b #define MR_EVT_PD_REMOVED 0x0070 #define MR_EVT_LD_CREATED 0x008a #define MR_EVT_LD_DELETED 0x008b #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db #define MR_EVT_LD_OFFLINE 0x00fc #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 #define MR_EVT_CTRL_PERF_COLLECTION 0x017e /* * MFI command completion codes */ enum MFI_STAT { MFI_STAT_OK = 0x00, MFI_STAT_INVALID_CMD = 0x01, MFI_STAT_INVALID_DCMD = 0x02, MFI_STAT_INVALID_PARAMETER = 0x03, MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04, MFI_STAT_ABORT_NOT_POSSIBLE = 0x05, MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06, MFI_STAT_APP_IN_USE = 0x07, MFI_STAT_APP_NOT_INITIALIZED = 0x08, MFI_STAT_ARRAY_INDEX_INVALID = 0x09, MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a, MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b, MFI_STAT_DEVICE_NOT_FOUND = 0x0c, MFI_STAT_DRIVE_TOO_SMALL = 0x0d, MFI_STAT_FLASH_ALLOC_FAIL = 0x0e, MFI_STAT_FLASH_BUSY = 0x0f, MFI_STAT_FLASH_ERROR = 0x10, MFI_STAT_FLASH_IMAGE_BAD = 0x11, MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12, MFI_STAT_FLASH_NOT_OPEN = 0x13, MFI_STAT_FLASH_NOT_STARTED = 0x14, MFI_STAT_FLUSH_FAILED = 0x15, MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16, MFI_STAT_LD_CC_IN_PROGRESS = 0x17, MFI_STAT_LD_INIT_IN_PROGRESS = 0x18, MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19, MFI_STAT_LD_MAX_CONFIGURED = 0x1a, MFI_STAT_LD_NOT_OPTIMAL = 0x1b, MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c, MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d, MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e, MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f, MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, MFI_STAT_MFC_HW_ERROR = 0x21, MFI_STAT_NO_HW_PRESENT = 0x22, MFI_STAT_NOT_FOUND = 0x23, MFI_STAT_NOT_IN_ENCL = 0x24, MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25, MFI_STAT_PD_TYPE_WRONG = 0x26, MFI_STAT_PR_DISABLED = 0x27, MFI_STAT_ROW_INDEX_INVALID = 0x28, MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29, MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a, MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b, MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c, MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d, MFI_STAT_SCSI_IO_FAILED = 0x2e, MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f, MFI_STAT_SHUTDOWN_FAILED = 0x30, MFI_STAT_TIME_NOT_SET = 0x31, MFI_STAT_WRONG_STATE = 0x32, MFI_STAT_LD_OFFLINE = 0x33, MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34, MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35, MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, MFI_STAT_I2C_ERRORS_DETECTED = 0x37, MFI_STAT_PCI_ERRORS_DETECTED = 0x38, MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67, MFI_STAT_INVALID_STATUS = 0xFF }; /* * Number of mailbox bytes in DCMD message frame */ #define MFI_MBOX_SIZE 12 enum MR_EVT_CLASS { MR_EVT_CLASS_DEBUG = -2, MR_EVT_CLASS_PROGRESS = -1, MR_EVT_CLASS_INFO = 0, MR_EVT_CLASS_WARNING = 1, MR_EVT_CLASS_CRITICAL = 2, MR_EVT_CLASS_FATAL = 3, MR_EVT_CLASS_DEAD = 4, }; enum MR_EVT_LOCALE { MR_EVT_LOCALE_LD = 0x0001, MR_EVT_LOCALE_PD = 0x0002, MR_EVT_LOCALE_ENCL = 0x0004, MR_EVT_LOCALE_BBU = 0x0008, MR_EVT_LOCALE_SAS = 0x0010, MR_EVT_LOCALE_CTRL = 0x0020, MR_EVT_LOCALE_CONFIG = 0x0040, MR_EVT_LOCALE_CLUSTER = 0x0080, MR_EVT_LOCALE_ALL = 0xffff, }; enum MR_EVT_ARGS { MR_EVT_ARGS_NONE, MR_EVT_ARGS_CDB_SENSE, MR_EVT_ARGS_LD, MR_EVT_ARGS_LD_COUNT, MR_EVT_ARGS_LD_LBA, MR_EVT_ARGS_LD_OWNER, MR_EVT_ARGS_LD_LBA_PD_LBA, MR_EVT_ARGS_LD_PROG, MR_EVT_ARGS_LD_STATE, MR_EVT_ARGS_LD_STRIP, MR_EVT_ARGS_PD, MR_EVT_ARGS_PD_ERR, MR_EVT_ARGS_PD_LBA, MR_EVT_ARGS_PD_LBA_LD, MR_EVT_ARGS_PD_PROG, MR_EVT_ARGS_PD_STATE, MR_EVT_ARGS_PCI, MR_EVT_ARGS_RATE, MR_EVT_ARGS_STR, MR_EVT_ARGS_TIME, MR_EVT_ARGS_ECC, MR_EVT_ARGS_LD_PROP, MR_EVT_ARGS_PD_SPARE, MR_EVT_ARGS_PD_INDEX, MR_EVT_ARGS_DIAG_PASS, MR_EVT_ARGS_DIAG_FAIL, MR_EVT_ARGS_PD_LBA_LBA, MR_EVT_ARGS_PORT_PHY, MR_EVT_ARGS_PD_MISSING, MR_EVT_ARGS_PD_ADDRESS, MR_EVT_ARGS_BITMAP, MR_EVT_ARGS_CONNECTOR, MR_EVT_ARGS_PD_PD, MR_EVT_ARGS_PD_FRU, MR_EVT_ARGS_PD_PATHINFO, MR_EVT_ARGS_PD_POWER_STATE, MR_EVT_ARGS_GENERIC, }; /* * Thunderbolt (and later) Defines */ #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009) #define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256 #define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0 #define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1 #define MRSAS_LOAD_BALANCE_FLAG 0x1 #define MRSAS_DCMD_MBOX_PEND_FLAG 0x1 #define HOST_DIAG_WRITE_ENABLE 0x80 #define HOST_DIAG_RESET_ADAPTER 0x4 #define MRSAS_TBOLT_MAX_RESET_TRIES 3 #define MRSAS_MAX_MFI_CMDS 16 #define MRSAS_MAX_IOCTL_CMDS 3 /* * Invader Defines */ #define MPI2_TYPE_CUDA 0x2 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 #define MR_RL_WRITE_THROUGH_MODE 0x00 #define MR_RL_WRITE_BACK_MODE 0x01 /* * T10 PI defines */ #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 #define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f #define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9 #define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB #define MRSAS_SCSI_ADDL_CDB_LEN 0x18 #define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20 #define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60 #define MRSAS_SCSIBLOCKSIZE 512 /* * Raid context flags */ #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30 typedef enum MR_RAID_FLAGS_IO_SUB_TYPE { MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2, MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3, MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4, MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6, MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7 } MR_RAID_FLAGS_IO_SUB_TYPE; /* * Request descriptor types */ #define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 #define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1 #define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2 #define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 #define MRSAS_FP_CMD_LEN 16 #define MRSAS_FUSION_IN_RESET 0 #define RAID_CTX_SPANARM_ARM_SHIFT (0) #define RAID_CTX_SPANARM_ARM_MASK (0x1f) #define RAID_CTX_SPANARM_SPAN_SHIFT (5) #define RAID_CTX_SPANARM_SPAN_MASK (0xE0) /* * Define region lock types */ typedef enum _REGION_TYPE { REGION_TYPE_UNUSED = 0, REGION_TYPE_SHARED_READ = 1, REGION_TYPE_SHARED_WRITE = 2, REGION_TYPE_EXCLUSIVE = 3, } REGION_TYPE; /* * SCSI-CAM Related Defines */ #define MRSAS_SCSI_MAX_LUNS 0 #define MRSAS_SCSI_INITIATOR_ID 255 #define MRSAS_SCSI_MAX_CMDS 8 #define MRSAS_SCSI_MAX_CDB_LEN 16 #define MRSAS_SCSI_SENSE_BUFFERSIZE 96 #define MRSAS_INTERNAL_CMDS 32 #define MRSAS_FUSION_INT_CMDS 8 #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000 #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0 #define MEGASAS_256K_IO 128 #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4) /* Request types */ #define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0 #define MRSAS_REQ_TYPE_AEN_FETCH 0x1 #define MRSAS_REQ_TYPE_PASSTHRU 0x2 #define MRSAS_REQ_TYPE_GETSET_PARAM 0x3 #define MRSAS_REQ_TYPE_SCSI_IO 0x4 /* Request states */ #define MRSAS_REQ_STATE_FREE 0 #define MRSAS_REQ_STATE_BUSY 1 #define MRSAS_REQ_STATE_TRAN 2 #define MRSAS_REQ_STATE_COMPLETE 3 typedef enum _MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, NON_READ_WRITE_LDIO = 1, READ_WRITE_SYSPDIO = 2, NON_READ_WRITE_SYSPDIO = 3, } MR_SCSI_CMD_TYPE; enum mrsas_req_flags { MRSAS_DIR_UNKNOWN = 0x1, MRSAS_DIR_IN = 0x2, MRSAS_DIR_OUT = 0x4, MRSAS_DIR_NONE = 0x8, }; /* * Adapter Reset States */ enum { MRSAS_HBA_OPERATIONAL = 0, MRSAS_ADPRESET_SM_INFAULT = 1, MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, MRSAS_ADPRESET_SM_OPERATIONAL = 3, MRSAS_HW_CRITICAL_ERROR = 4, MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, }; /* * MPT Command Structure */ struct mrsas_mpt_cmd { MRSAS_RAID_SCSI_IO_REQUEST *io_request; bus_addr_t io_request_phys_addr; MPI2_SGE_IO_UNION *chain_frame; bus_addr_t chain_frame_phys_addr; u_int32_t sge_count; u_int8_t *sense; bus_addr_t sense_phys_addr; u_int8_t retry_for_fw_reset; MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc; u_int32_t sync_cmd_idx; u_int32_t index; u_int8_t flags; u_int8_t pd_r1_lb; u_int8_t load_balance; bus_size_t length; u_int32_t error_code; bus_dmamap_t data_dmamap; void *data; union ccb *ccb_ptr; struct callout cm_callout; struct mrsas_softc *sc; boolean_t tmCapable; u_int16_t r1_alt_dev_handle; boolean_t cmd_completed; struct mrsas_mpt_cmd *peer_cmd; bool callout_owner; TAILQ_ENTRY(mrsas_mpt_cmd) next; u_int8_t pdInterface; }; /* * MFI Command Structure */ struct mrsas_mfi_cmd { union mrsas_frame *frame; bus_dmamap_t frame_dmamap; void *frame_mem; bus_addr_t frame_phys_addr; u_int8_t *sense; bus_dmamap_t sense_dmamap; void *sense_mem; bus_addr_t sense_phys_addr; u_int32_t index; u_int8_t sync_cmd; u_int8_t cmd_status; u_int8_t abort_aen; u_int8_t retry_for_fw_reset; struct mrsas_softc *sc; union ccb *ccb_ptr; union { struct { u_int16_t smid; u_int16_t resvd; } context; u_int32_t frame_count; } cmd_id; TAILQ_ENTRY(mrsas_mfi_cmd) next; }; /* * define constants for device list query options */ enum MR_PD_QUERY_TYPE { MR_PD_QUERY_TYPE_ALL = 0, MR_PD_QUERY_TYPE_STATE = 1, MR_PD_QUERY_TYPE_POWER_STATE = 2, MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, MR_PD_QUERY_TYPE_SPEED = 4, MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b #define MR_EVT_PD_REMOVED 0x0070 #define MR_EVT_LD_CREATED 0x008a #define MR_EVT_LD_DELETED 0x008b #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db #define MR_EVT_LD_OFFLINE 0x00fc #define MR_EVT_CTRL_PROP_CHANGED 0x012f #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 enum MR_PD_STATE { MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, MR_PD_STATE_UNCONFIGURED_BAD = 0x01, MR_PD_STATE_HOT_SPARE = 0x02, MR_PD_STATE_OFFLINE = 0x10, MR_PD_STATE_FAILED = 0x11, MR_PD_STATE_REBUILD = 0x14, MR_PD_STATE_ONLINE = 0x18, MR_PD_STATE_COPYBACK = 0x20, MR_PD_STATE_SYSTEM = 0x40, }; /* * defines the physical drive address structure */ #pragma pack(1) struct MR_PD_ADDRESS { u_int16_t deviceId; u_int16_t enclDeviceId; union { struct { u_int8_t enclIndex; u_int8_t slotNumber; } mrPdAddress; struct { u_int8_t enclPosition; u_int8_t enclConnectorIndex; } mrEnclAddress; } u1; u_int8_t scsiDevType; union { u_int8_t connectedPortBitmap; u_int8_t connectedPortNumbers; } u2; u_int64_t sasAddr[2]; }; #pragma pack() /* * defines the physical drive list structure */ #pragma pack(1) struct MR_PD_LIST { u_int32_t size; u_int32_t count; struct MR_PD_ADDRESS addr[1]; }; #pragma pack() #pragma pack(1) struct mrsas_pd_list { u_int16_t tid; u_int8_t driveType; u_int8_t driveState; }; #pragma pack() /* * defines the logical drive reference structure */ typedef union _MR_LD_REF { struct { u_int8_t targetId; u_int8_t reserved; u_int16_t seqNum; } ld_context; u_int32_t ref; } MR_LD_REF; /* * defines the logical drive list structure */ #pragma pack(1) struct MR_LD_LIST { u_int32_t ldCount; u_int32_t reserved; struct { MR_LD_REF ref; u_int8_t state; u_int8_t reserved[3]; u_int64_t size; } ldList[MAX_LOGICAL_DRIVES_EXT]; }; #pragma pack() /* * SAS controller properties */ #pragma pack(1) struct mrsas_ctrl_prop { u_int16_t seq_num; u_int16_t pred_fail_poll_interval; u_int16_t intr_throttle_count; u_int16_t intr_throttle_timeouts; u_int8_t rebuild_rate; u_int8_t patrol_read_rate; u_int8_t bgi_rate; u_int8_t cc_rate; u_int8_t recon_rate; u_int8_t cache_flush_interval; u_int8_t spinup_drv_count; u_int8_t spinup_delay; u_int8_t cluster_enable; u_int8_t coercion_mode; u_int8_t alarm_enable; u_int8_t disable_auto_rebuild; u_int8_t disable_battery_warn; u_int8_t ecc_bucket_size; u_int16_t ecc_bucket_leak_rate; u_int8_t restore_hotspare_on_insertion; u_int8_t expose_encl_devices; u_int8_t maintainPdFailHistory; u_int8_t disallowHostRequestReordering; u_int8_t abortCCOnError; u_int8_t loadBalanceMode; u_int8_t disableAutoDetectBackplane; u_int8_t snapVDSpace; /* * Add properties that can be controlled by a bit in the following * structure. */ struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t copyBackDisabled:1; u_int32_t SMARTerEnabled:1; u_int32_t prCorrectUnconfiguredAreas:1; u_int32_t useFdeOnly:1; u_int32_t disableNCQ:1; u_int32_t SSDSMARTerEnabled:1; u_int32_t SSDPatrolReadEnabled:1; u_int32_t enableSpinDownUnconfigured:1; u_int32_t autoEnhancedImport:1; u_int32_t enableSecretKeyControl:1; u_int32_t disableOnlineCtrlReset:1; u_int32_t allowBootWithPinnedCache:1; u_int32_t disableSpinDownHS:1; u_int32_t enableJBOD:1; u_int32_t disableCacheBypass:1; u_int32_t useDiskActivityForLocate:1; u_int32_t enablePI:1; u_int32_t preventPIImport:1; u_int32_t useGlobalSparesForEmergency:1; u_int32_t useUnconfGoodForEmergency:1; u_int32_t useEmergencySparesforSMARTer:1; u_int32_t forceSGPIOForQuadOnly:1; u_int32_t enableConfigAutoBalance:1; u_int32_t enableVirtualCache:1; u_int32_t enableAutoLockRecovery:1; u_int32_t disableImmediateIO:1; u_int32_t disableT10RebuildAssist:1; u_int32_t ignore64ldRestriction:1; u_int32_t enableSwZone:1; u_int32_t limitMaxRateSATA3G:1; u_int32_t reserved:2; #else u_int32_t reserved:2; u_int32_t limitMaxRateSATA3G:1; u_int32_t enableSwZone:1; u_int32_t ignore64ldRestriction:1; u_int32_t disableT10RebuildAssist:1; u_int32_t disableImmediateIO:1; u_int32_t enableAutoLockRecovery:1; u_int32_t enableVirtualCache:1; u_int32_t enableConfigAutoBalance:1; u_int32_t forceSGPIOForQuadOnly:1; u_int32_t useEmergencySparesforSMARTer:1; u_int32_t useUnconfGoodForEmergency:1; u_int32_t useGlobalSparesForEmergency:1; u_int32_t preventPIImport:1; u_int32_t enablePI:1; u_int32_t useDiskActivityForLocate:1; u_int32_t disableCacheBypass:1; u_int32_t enableJBOD:1; u_int32_t disableSpinDownHS:1; u_int32_t allowBootWithPinnedCache:1; u_int32_t disableOnlineCtrlReset:1; u_int32_t enableSecretKeyControl:1; u_int32_t autoEnhancedImport:1; u_int32_t enableSpinDownUnconfigured:1; u_int32_t SSDPatrolReadEnabled:1; u_int32_t SSDSMARTerEnabled:1; u_int32_t disableNCQ:1; u_int32_t useFdeOnly:1; u_int32_t prCorrectUnconfiguredAreas:1; u_int32_t SMARTerEnabled:1; u_int32_t copyBackDisabled:1; #endif } OnOffProperties; u_int8_t autoSnapVDSpace; u_int8_t viewSpace; u_int16_t spinDownTime; u_int8_t reserved[24]; }; #pragma pack() /* * SAS controller information */ struct mrsas_ctrl_info { /* * PCI device information */ struct { u_int16_t vendor_id; u_int16_t device_id; u_int16_t sub_vendor_id; u_int16_t sub_device_id; u_int8_t reserved[24]; } __packed pci; /* * Host interface information */ struct { u_int8_t PCIX:1; u_int8_t PCIE:1; u_int8_t iSCSI:1; u_int8_t SAS_3G:1; u_int8_t reserved_0:4; u_int8_t reserved_1[6]; u_int8_t port_count; u_int64_t port_addr[8]; } __packed host_interface; /* * Device (backend) interface information */ struct { u_int8_t SPI:1; u_int8_t SAS_3G:1; u_int8_t SATA_1_5G:1; u_int8_t SATA_3G:1; u_int8_t reserved_0:4; u_int8_t reserved_1[6]; u_int8_t port_count; u_int64_t port_addr[8]; } __packed device_interface; u_int32_t image_check_word; u_int32_t image_component_count; struct { char name[8]; char version[32]; char build_date[16]; char built_time[16]; } __packed image_component[8]; u_int32_t pending_image_component_count; struct { char name[8]; char version[32]; char build_date[16]; char build_time[16]; } __packed pending_image_component[8]; u_int8_t max_arms; u_int8_t max_spans; u_int8_t max_arrays; u_int8_t max_lds; char product_name[80]; char serial_no[32]; /* * Other physical/controller/operation information. Indicates the * presence of the hardware */ struct { u_int32_t bbu:1; u_int32_t alarm:1; u_int32_t nvram:1; u_int32_t uart:1; u_int32_t reserved:28; } __packed hw_present; u_int32_t current_fw_time; /* * Maximum data transfer sizes */ u_int16_t max_concurrent_cmds; u_int16_t max_sge_count; u_int32_t max_request_size; /* * Logical and physical device counts */ u_int16_t ld_present_count; u_int16_t ld_degraded_count; u_int16_t ld_offline_count; u_int16_t pd_present_count; u_int16_t pd_disk_present_count; u_int16_t pd_disk_pred_failure_count; u_int16_t pd_disk_failed_count; /* * Memory size information */ u_int16_t nvram_size; u_int16_t memory_size; u_int16_t flash_size; /* * Error counters */ u_int16_t mem_correctable_error_count; u_int16_t mem_uncorrectable_error_count; /* * Cluster information */ u_int8_t cluster_permitted; u_int8_t cluster_active; /* * Additional max data transfer sizes */ u_int16_t max_strips_per_io; /* * Controller capabilities structures */ struct { u_int32_t raid_level_0:1; u_int32_t raid_level_1:1; u_int32_t raid_level_5:1; u_int32_t raid_level_1E:1; u_int32_t raid_level_6:1; u_int32_t reserved:27; } __packed raid_levels; struct { u_int32_t rbld_rate:1; u_int32_t cc_rate:1; u_int32_t bgi_rate:1; u_int32_t recon_rate:1; u_int32_t patrol_rate:1; u_int32_t alarm_control:1; u_int32_t cluster_supported:1; u_int32_t bbu:1; u_int32_t spanning_allowed:1; u_int32_t dedicated_hotspares:1; u_int32_t revertible_hotspares:1; u_int32_t foreign_config_import:1; u_int32_t self_diagnostic:1; u_int32_t mixed_redundancy_arr:1; u_int32_t global_hot_spares:1; u_int32_t reserved:17; } __packed adapter_operations; struct { u_int32_t read_policy:1; u_int32_t write_policy:1; u_int32_t io_policy:1; u_int32_t access_policy:1; u_int32_t disk_cache_policy:1; u_int32_t reserved:27; } __packed ld_operations; struct { u_int8_t min; u_int8_t max; u_int8_t reserved[2]; } __packed stripe_sz_ops; struct { u_int32_t force_online:1; u_int32_t force_offline:1; u_int32_t force_rebuild:1; u_int32_t reserved:29; } __packed pd_operations; struct { u_int32_t ctrl_supports_sas:1; u_int32_t ctrl_supports_sata:1; u_int32_t allow_mix_in_encl:1; u_int32_t allow_mix_in_ld:1; u_int32_t allow_sata_in_cluster:1; u_int32_t reserved:27; } __packed pd_mix_support; /* * Define ECC single-bit-error bucket information */ u_int8_t ecc_bucket_count; u_int8_t reserved_2[11]; /* * Include the controller properties (changeable items) */ struct mrsas_ctrl_prop properties; /* * Define FW pkg version (set in envt v'bles on OEM basis) */ char package_version[0x60]; u_int64_t deviceInterfacePortAddr2[8]; u_int8_t reserved3[128]; struct { u_int16_t minPdRaidLevel_0:4; u_int16_t maxPdRaidLevel_0:12; u_int16_t minPdRaidLevel_1:4; u_int16_t maxPdRaidLevel_1:12; u_int16_t minPdRaidLevel_5:4; u_int16_t maxPdRaidLevel_5:12; u_int16_t minPdRaidLevel_1E:4; u_int16_t maxPdRaidLevel_1E:12; u_int16_t minPdRaidLevel_6:4; u_int16_t maxPdRaidLevel_6:12; u_int16_t minPdRaidLevel_10:4; u_int16_t maxPdRaidLevel_10:12; u_int16_t minPdRaidLevel_50:4; u_int16_t maxPdRaidLevel_50:12; u_int16_t minPdRaidLevel_60:4; u_int16_t maxPdRaidLevel_60:12; u_int16_t minPdRaidLevel_1E_RLQ0:4; u_int16_t maxPdRaidLevel_1E_RLQ0:12; u_int16_t minPdRaidLevel_1E0_RLQ0:4; u_int16_t maxPdRaidLevel_1E0_RLQ0:12; u_int16_t reserved[6]; } pdsForRaidLevels; u_int16_t maxPds; /* 0x780 */ u_int16_t maxDedHSPs; /* 0x782 */ u_int16_t maxGlobalHSPs; /* 0x784 */ u_int16_t ddfSize; /* 0x786 */ u_int8_t maxLdsPerArray; /* 0x788 */ u_int8_t partitionsInDDF; /* 0x789 */ u_int8_t lockKeyBinding; /* 0x78a */ u_int8_t maxPITsPerLd; /* 0x78b */ u_int8_t maxViewsPerLd; /* 0x78c */ u_int8_t maxTargetId; /* 0x78d */ u_int16_t maxBvlVdSize; /* 0x78e */ u_int16_t maxConfigurableSSCSize; /* 0x790 */ u_int16_t currentSSCsize; /* 0x792 */ char expanderFwVersion[12]; /* 0x794 */ u_int16_t PFKTrialTimeRemaining;/* 0x7A0 */ u_int16_t cacheMemorySize; /* 0x7A2 */ struct { /* 0x7A4 */ #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t supportPIcontroller:1; u_int32_t supportLdPIType1:1; u_int32_t supportLdPIType2:1; u_int32_t supportLdPIType3:1; u_int32_t supportLdBBMInfo:1; u_int32_t supportShieldState:1; u_int32_t blockSSDWriteCacheChange:1; u_int32_t supportSuspendResumeBGops:1; u_int32_t supportEmergencySpares:1; u_int32_t supportSetLinkSpeed:1; u_int32_t supportBootTimePFKChange:1; u_int32_t supportJBOD:1; u_int32_t disableOnlinePFKChange:1; u_int32_t supportPerfTuning:1; u_int32_t supportSSDPatrolRead:1; u_int32_t realTimeScheduler:1; u_int32_t supportResetNow:1; u_int32_t supportEmulatedDrives:1; u_int32_t headlessMode:1; u_int32_t dedicatedHotSparesLimited:1; u_int32_t supportUnevenSpans:1; u_int32_t reserved:11; #else u_int32_t reserved:11; u_int32_t supportUnevenSpans:1; u_int32_t dedicatedHotSparesLimited:1; u_int32_t headlessMode:1; u_int32_t supportEmulatedDrives:1; u_int32_t supportResetNow:1; u_int32_t realTimeScheduler:1; u_int32_t supportSSDPatrolRead:1; u_int32_t supportPerfTuning:1; u_int32_t disableOnlinePFKChange:1; u_int32_t supportJBOD:1; u_int32_t supportBootTimePFKChange:1; u_int32_t supportSetLinkSpeed:1; u_int32_t supportEmergencySpares:1; u_int32_t supportSuspendResumeBGops:1; u_int32_t blockSSDWriteCacheChange:1; u_int32_t supportShieldState:1; u_int32_t supportLdBBMInfo:1; u_int32_t supportLdPIType3:1; u_int32_t supportLdPIType2:1; u_int32_t supportLdPIType1:1; u_int32_t supportPIcontroller:1; #endif } adapterOperations2; u_int8_t driverVersion[32]; /* 0x7A8 */ u_int8_t maxDAPdCountSpinup60; /* 0x7C8 */ u_int8_t temperatureROC; /* 0x7C9 */ u_int8_t temperatureCtrl; /* 0x7CA */ u_int8_t reserved4; /* 0x7CB */ u_int16_t maxConfigurablePds; /* 0x7CC */ u_int8_t reserved5[2]; /* 0x7CD reserved */ struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t peerIsPresent:1; u_int32_t peerIsIncompatible:1; u_int32_t hwIncompatible:1; u_int32_t fwVersionMismatch:1; u_int32_t ctrlPropIncompatible:1; u_int32_t premiumFeatureMismatch:1; u_int32_t reserved:26; #else u_int32_t reserved:26; u_int32_t premiumFeatureMismatch:1; u_int32_t ctrlPropIncompatible:1; u_int32_t fwVersionMismatch:1; u_int32_t hwIncompatible:1; u_int32_t peerIsIncompatible:1; u_int32_t peerIsPresent:1; #endif } cluster; char clusterId[16]; /* 0x7D4 */ char reserved6[4]; /* 0x7E4 RESERVED FOR IOV */ struct { /* 0x7E8 */ #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t supportPersonalityChange:2; u_int32_t supportThermalPollInterval:1; u_int32_t supportDisableImmediateIO:1; u_int32_t supportT10RebuildAssist:1; u_int32_t supportMaxExtLDs:1; u_int32_t supportCrashDump:1; u_int32_t supportSwZone:1; u_int32_t supportDebugQueue:1; u_int32_t supportNVCacheErase:1; u_int32_t supportForceTo512e:1; u_int32_t supportHOQRebuild:1; u_int32_t supportAllowedOpsforDrvRemoval:1; u_int32_t supportDrvActivityLEDSetting:1; u_int32_t supportNVDRAM:1; u_int32_t supportForceFlash:1; u_int32_t supportDisableSESMonitoring:1; u_int32_t supportCacheBypassModes:1; u_int32_t supportSecurityonJBOD:1; u_int32_t discardCacheDuringLDDelete:1; u_int32_t supportTTYLogCompression:1; u_int32_t supportCPLDUpdate:1; u_int32_t supportDiskCacheSettingForSysPDs:1; u_int32_t supportExtendedSSCSize:1; u_int32_t useSeqNumJbodFP:1; u_int32_t reserved:7; #else u_int32_t reserved:7; u_int32_t useSeqNumJbodFP:1; u_int32_t supportExtendedSSCSize:1; u_int32_t supportDiskCacheSettingForSysPDs:1; u_int32_t supportCPLDUpdate:1; u_int32_t supportTTYLogCompression:1; u_int32_t discardCacheDuringLDDelete:1; u_int32_t supportSecurityonJBOD:1; u_int32_t supportCacheBypassModes:1; u_int32_t supportDisableSESMonitoring:1; u_int32_t supportForceFlash:1; u_int32_t supportNVDRAM:1; u_int32_t supportDrvActivityLEDSetting:1; u_int32_t supportAllowedOpsforDrvRemoval:1; u_int32_t supportHOQRebuild:1; u_int32_t supportForceTo512e:1; u_int32_t supportNVCacheErase:1; u_int32_t supportDebugQueue:1; u_int32_t supportSwZone:1; u_int32_t supportCrashDump:1; u_int32_t supportMaxExtLDs:1; u_int32_t supportT10RebuildAssist:1; u_int32_t supportDisableImmediateIO:1; u_int32_t supportThermalPollInterval:1; u_int32_t supportPersonalityChange:2; #endif } adapterOperations3; u_int8_t pad_cpld[16]; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t ctrlInfoExtSupported:1; u_int16_t supportIbuttonLess:1; u_int16_t supportedEncAlgo:1; u_int16_t supportEncryptedMfc:1; u_int16_t imageUploadSupported:1; u_int16_t supportSESCtrlInMultipathCfg:1; u_int16_t supportPdMapTargetId:1; u_int16_t FWSwapsBBUVPDInfo:1; u_int16_t reserved:8; #else u_int16_t reserved:8; u_int16_t FWSwapsBBUVPDInfo:1; u_int16_t supportPdMapTargetId:1; u_int16_t supportSESCtrlInMultipathCfg:1; u_int16_t imageUploadSupported:1; u_int16_t supportEncryptedMfc:1; u_int16_t supportedEncAlgo:1; u_int16_t supportIbuttonLess:1; u_int16_t ctrlInfoExtSupported:1; #endif } adapterOperations4; u_int8_t pad[0x800 - 0x7FE]; /* 0x7FE */ } __packed; /* * When SCSI mid-layer calls driver's reset routine, driver waits for * MRSAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note * that the driver cannot _actually_ abort or reset pending commands. While * it is waiting for the commands to complete, it prints a diagnostic message * every MRSAS_RESET_NOTICE_INTERVAL seconds */ #define MRSAS_RESET_WAIT_TIME 180 #define MRSAS_INTERNAL_CMD_WAIT_TIME 180 #define MRSAS_RESET_NOTICE_INTERVAL 5 #define MRSAS_IOCTL_CMD 0 #define MRSAS_DEFAULT_CMD_TIMEOUT 90 #define MRSAS_THROTTLE_QUEUE_DEPTH 16 /* * MSI-x regsiters offset defines */ #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) #define MR_MAX_REPLY_QUEUES_OFFSET (0x0000001F) #define MR_MAX_REPLY_QUEUES_EXT_OFFSET (0x003FC000) #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 /* * SYNC CACHE offset define */ #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 #define MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET (1 << 24) /* * FW reports the maximum of number of commands that it can accept (maximum * commands that can be outstanding) at any time. The driver must report a * lower number to the mid layer because it can issue a few internal commands * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs * is shown below */ #define MRSAS_INT_CMDS 32 #define MRSAS_SKINNY_INT_CMDS 5 #define MRSAS_MAX_MSIX_QUEUES 128 /* * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit SGLs * based on the size of bus_addr_t */ #define IS_DMA64 (sizeof(bus_addr_t) == 8) #define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 #define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001 #define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002 #define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 #define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001 #define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 #define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) #define MFI_1068_PCSR_OFFSET 0x84 #define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 #define MFI_1068_FW_READY 0xDDDD0000 typedef union _MFI_CAPABILITIES { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t support_fp_remote_lun:1; u_int32_t support_additional_msix:1; u_int32_t support_fastpath_wb:1; u_int32_t support_max_255lds:1; u_int32_t support_ndrive_r1_lb:1; u_int32_t support_core_affinity:1; u_int32_t security_protocol_cmds_fw:1; u_int32_t support_ext_queue_depth:1; u_int32_t support_ext_io_size:1; u_int32_t reserved:23; #else u_int32_t reserved:23; u_int32_t support_ext_io_size:1; u_int32_t support_ext_queue_depth:1; u_int32_t security_protocol_cmds_fw:1; u_int32_t support_core_affinity:1; u_int32_t support_ndrive_r1_lb:1; u_int32_t support_max_255lds:1; u_int32_t support_fastpath_wb:1; u_int32_t support_additional_msix:1; u_int32_t support_fp_remote_lun:1; #endif } mfi_capabilities; u_int32_t reg; } MFI_CAPABILITIES; #pragma pack(1) struct mrsas_sge32 { u_int32_t phys_addr; u_int32_t length; }; #pragma pack() #pragma pack(1) struct mrsas_sge64 { u_int64_t phys_addr; u_int32_t length; }; #pragma pack() #pragma pack() union mrsas_sgl { struct mrsas_sge32 sge32[1]; struct mrsas_sge64 sge64[1]; }; #pragma pack() #pragma pack(1) struct mrsas_header { u_int8_t cmd; /* 00e */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t lun; /* 05h */ u_int8_t cdb_len; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xferlen; /* 14h */ }; #pragma pack() #pragma pack(1) struct mrsas_init_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1; /* 03h */ MFI_CAPABILITIES driver_operations; /* 04h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t reserved_3; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t queue_info_new_phys_addr_lo; /* 18h */ u_int32_t queue_info_new_phys_addr_hi; /* 1Ch */ u_int32_t queue_info_old_phys_addr_lo; /* 20h */ u_int32_t queue_info_old_phys_addr_hi; /* 24h */ u_int32_t driver_ver_lo; /* 28h */ u_int32_t driver_ver_hi; /* 2Ch */ u_int32_t reserved_4[4]; /* 30h */ }; #pragma pack() #pragma pack(1) struct mrsas_io_frame { u_int8_t cmd; /* 00h */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t access_byte; /* 05h */ u_int8_t reserved_0; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t lba_count; /* 14h */ u_int32_t sense_buf_phys_addr_lo; /* 18h */ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */ u_int32_t start_lba_lo; /* 20h */ u_int32_t start_lba_hi; /* 24h */ union mrsas_sgl sgl; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_pthru_frame { u_int8_t cmd; /* 00h */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t lun; /* 05h */ u_int8_t cdb_len; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t sense_buf_phys_addr_lo; /* 18h */ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */ u_int8_t cdb[16]; /* 20h */ union mrsas_sgl sgl; /* 30h */ }; #pragma pack() #pragma pack(1) struct mrsas_dcmd_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1[4]; /* 03h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t opcode; /* 18h */ union { /* 1Ch */ u_int8_t b[12]; u_int16_t s[6]; u_int32_t w[3]; } mbox; union mrsas_sgl sgl; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_abort_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1; /* 03h */ MFI_CAPABILITIES driver_operations; /* 04h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t reserved_3; /* 12h */ u_int32_t reserved_4; /* 14h */ u_int32_t abort_context; /* 18h */ u_int32_t pad_1; /* 1Ch */ u_int32_t abort_mfi_phys_addr_lo; /* 20h */ u_int32_t abort_mfi_phys_addr_hi; /* 24h */ u_int32_t reserved_5[6]; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_smp_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_1; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t connection_status; /* 03h */ u_int8_t reserved_2[3]; /* 04h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int64_t sas_addr; /* 18h */ union { struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */ } sgl; }; #pragma pack() #pragma pack(1) struct mrsas_stp_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_1; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_2; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t reserved_3[2]; /* 05h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int16_t fis[10]; /* 18h */ u_int32_t stp_flags; union { struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */ } sgl; }; #pragma pack() union mrsas_frame { struct mrsas_header hdr; struct mrsas_init_frame init; struct mrsas_io_frame io; struct mrsas_pthru_frame pthru; struct mrsas_dcmd_frame dcmd; struct mrsas_abort_frame abort; struct mrsas_smp_frame smp; struct mrsas_stp_frame stp; u_int8_t raw_bytes[64]; }; #pragma pack(1) union mrsas_evt_class_locale { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t locale; u_int8_t reserved; int8_t class; #else int8_t class; u_int8_t reserved; u_int16_t locale; #endif } __packed members; u_int32_t word; } __packed; #pragma pack() #pragma pack(1) struct mrsas_evt_log_info { u_int32_t newest_seq_num; u_int32_t oldest_seq_num; u_int32_t clear_seq_num; u_int32_t shutdown_seq_num; u_int32_t boot_seq_num; } __packed; #pragma pack() struct mrsas_progress { u_int16_t progress; u_int16_t elapsed_seconds; } __packed; struct mrsas_evtarg_ld { u_int16_t target_id; u_int8_t ld_index; u_int8_t reserved; } __packed; struct mrsas_evtarg_pd { u_int16_t device_id; u_int8_t encl_index; u_int8_t slot_number; } __packed; struct mrsas_evt_detail { u_int32_t seq_num; u_int32_t time_stamp; u_int32_t code; union mrsas_evt_class_locale cl; u_int8_t arg_type; u_int8_t reserved1[15]; union { struct { struct mrsas_evtarg_pd pd; u_int8_t cdb_length; u_int8_t sense_length; u_int8_t reserved[2]; u_int8_t cdb[16]; u_int8_t sense[64]; } __packed cdbSense; struct mrsas_evtarg_ld ld; struct { struct mrsas_evtarg_ld ld; u_int64_t count; } __packed ld_count; struct { u_int64_t lba; struct mrsas_evtarg_ld ld; } __packed ld_lba; struct { struct mrsas_evtarg_ld ld; u_int32_t prevOwner; u_int32_t newOwner; } __packed ld_owner; struct { u_int64_t ld_lba; u_int64_t pd_lba; struct mrsas_evtarg_ld ld; struct mrsas_evtarg_pd pd; } __packed ld_lba_pd_lba; struct { struct mrsas_evtarg_ld ld; struct mrsas_progress prog; } __packed ld_prog; struct { struct mrsas_evtarg_ld ld; u_int32_t prev_state; u_int32_t new_state; } __packed ld_state; struct { u_int64_t strip; struct mrsas_evtarg_ld ld; } __packed ld_strip; struct mrsas_evtarg_pd pd; struct { struct mrsas_evtarg_pd pd; u_int32_t err; } __packed pd_err; struct { u_int64_t lba; struct mrsas_evtarg_pd pd; } __packed pd_lba; struct { u_int64_t lba; struct mrsas_evtarg_pd pd; struct mrsas_evtarg_ld ld; } __packed pd_lba_ld; struct { struct mrsas_evtarg_pd pd; struct mrsas_progress prog; } __packed pd_prog; struct { struct mrsas_evtarg_pd pd; u_int32_t prevState; u_int32_t newState; } __packed pd_state; struct { u_int16_t vendorId; u_int16_t deviceId; u_int16_t subVendorId; u_int16_t subDeviceId; } __packed pci; u_int32_t rate; char str[96]; struct { u_int32_t rtc; u_int32_t elapsedSeconds; } __packed time; struct { u_int32_t ecar; u_int32_t elog; char str[64]; } __packed ecc; u_int8_t b[96]; u_int16_t s[48]; u_int32_t w[24]; u_int64_t d[12]; } args; char description[128]; } __packed; struct mrsas_irq_context { struct mrsas_softc *sc; uint32_t MSIxIndex; }; enum MEGASAS_OCR_REASON { FW_FAULT_OCR = 0, MFI_DCMD_TIMEOUT_OCR = 1, }; /* Controller management info added to support Linux Emulator */ #define MAX_MGMT_ADAPTERS 1024 struct mrsas_mgmt_info { u_int16_t count; struct mrsas_softc *sc_ptr[MAX_MGMT_ADAPTERS]; int max_index; }; #define PCI_TYPE0_ADDRESSES 6 #define PCI_TYPE1_ADDRESSES 2 #define PCI_TYPE2_ADDRESSES 5 typedef struct _MRSAS_DRV_PCI_COMMON_HEADER { u_int16_t vendorID; //(ro) u_int16_t deviceID; //(ro) u_int16_t command; //Device control u_int16_t status; u_int8_t revisionID; //(ro) u_int8_t progIf; //(ro) u_int8_t subClass; //(ro) u_int8_t baseClass; //(ro) u_int8_t cacheLineSize; //(ro +) u_int8_t latencyTimer; //(ro +) u_int8_t headerType; //(ro) u_int8_t bist; //Built in self test union { struct _MRSAS_DRV_PCI_HEADER_TYPE_0 { u_int32_t baseAddresses[PCI_TYPE0_ADDRESSES]; u_int32_t cis; u_int16_t subVendorID; u_int16_t subSystemID; u_int32_t romBaseAddress; u_int8_t capabilitiesPtr; u_int8_t reserved1[3]; u_int32_t reserved2; u_int8_t interruptLine; u_int8_t interruptPin; //(ro) u_int8_t minimumGrant; //(ro) u_int8_t maximumLatency; //(ro) } type0; /* * PCI to PCI Bridge */ struct _MRSAS_DRV_PCI_HEADER_TYPE_1 { u_int32_t baseAddresses[PCI_TYPE1_ADDRESSES]; u_int8_t primaryBus; u_int8_t secondaryBus; u_int8_t subordinateBus; u_int8_t secondaryLatency; u_int8_t ioBase; u_int8_t ioLimit; u_int16_t secondaryStatus; u_int16_t memoryBase; u_int16_t memoryLimit; u_int16_t prefetchBase; u_int16_t prefetchLimit; u_int32_t prefetchBaseUpper32; u_int32_t prefetchLimitUpper32; u_int16_t ioBaseUpper16; u_int16_t ioLimitUpper16; u_int8_t capabilitiesPtr; u_int8_t reserved1[3]; u_int32_t romBaseAddress; u_int8_t interruptLine; u_int8_t interruptPin; u_int16_t bridgeControl; } type1; /* * PCI to CARDBUS Bridge */ struct _MRSAS_DRV_PCI_HEADER_TYPE_2 { u_int32_t socketRegistersBaseAddress; u_int8_t capabilitiesPtr; u_int8_t reserved; u_int16_t secondaryStatus; u_int8_t primaryBus; u_int8_t secondaryBus; u_int8_t subordinateBus; u_int8_t secondaryLatency; struct { u_int32_t base; u_int32_t limit; } range [PCI_TYPE2_ADDRESSES - 1]; u_int8_t interruptLine; u_int8_t interruptPin; u_int16_t bridgeControl; } type2; } u; } MRSAS_DRV_PCI_COMMON_HEADER, *PMRSAS_DRV_PCI_COMMON_HEADER; #define MRSAS_DRV_PCI_COMMON_HEADER_SIZE sizeof(MRSAS_DRV_PCI_COMMON_HEADER) //64 bytes typedef struct _MRSAS_DRV_PCI_LINK_CAPABILITY { union { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t linkSpeed:4; u_int32_t linkWidth:6; u_int32_t aspmSupport:2; u_int32_t losExitLatency:3; u_int32_t l1ExitLatency:3; u_int32_t rsvdp:6; u_int32_t portNumber:8; #else u_int32_t portNumber:8; u_int32_t rsvdp:6; u_int32_t l1ExitLatency:3; u_int32_t losExitLatency:3; u_int32_t aspmSupport:2; u_int32_t linkWidth:6; u_int32_t linkSpeed:4; #endif } bits; u_int32_t asUlong; } u; } MRSAS_DRV_PCI_LINK_CAPABILITY, *PMRSAS_DRV_PCI_LINK_CAPABILITY; #define MRSAS_DRV_PCI_LINK_CAPABILITY_SIZE sizeof(MRSAS_DRV_PCI_LINK_CAPABILITY) typedef struct _MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY { union { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t linkSpeed:4; u_int16_t negotiatedLinkWidth:6; u_int16_t linkTrainingError:1; u_int16_t linkTraning:1; u_int16_t slotClockConfig:1; u_int16_t rsvdZ:3; #else u_int16_t rsvdZ:3; u_int16_t slotClockConfig:1; u_int16_t linkTraning:1; u_int16_t linkTrainingError:1; u_int16_t negotiatedLinkWidth:6; u_int16_t linkSpeed:4; #endif } bits; u_int16_t asUshort; } u; u_int16_t reserved; } MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY, *PMRSAS_DRV_PCI_LINK_STATUS_CAPABILITY; #define MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY_SIZE sizeof(MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY) typedef struct _MRSAS_DRV_PCI_CAPABILITIES { MRSAS_DRV_PCI_LINK_CAPABILITY linkCapability; MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY linkStatusCapability; } MRSAS_DRV_PCI_CAPABILITIES, *PMRSAS_DRV_PCI_CAPABILITIES; #define MRSAS_DRV_PCI_CAPABILITIES_SIZE sizeof(MRSAS_DRV_PCI_CAPABILITIES) /* PCI information */ typedef struct _MRSAS_DRV_PCI_INFORMATION { u_int32_t busNumber; u_int8_t deviceNumber; u_int8_t functionNumber; u_int8_t interruptVector; u_int8_t reserved1; MRSAS_DRV_PCI_COMMON_HEADER pciHeaderInfo; MRSAS_DRV_PCI_CAPABILITIES capability; u_int32_t domainID; u_int8_t reserved2[28]; } MRSAS_DRV_PCI_INFORMATION, *PMRSAS_DRV_PCI_INFORMATION; typedef enum _MR_PD_TYPE { UNKNOWN_DRIVE = 0, PARALLEL_SCSI = 1, SAS_PD = 2, SATA_PD = 3, FC_PD = 4, NVME_PD = 5, } MR_PD_TYPE; typedef union _MR_PD_REF { struct { u_int16_t deviceId; u_int16_t seqNum; } mrPdRef; u_int32_t ref; } MR_PD_REF; /* * define the DDF Type bit structure */ union MR_PD_DDF_TYPE { struct { union { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t forcedPDGUID:1; u_int16_t inVD:1; u_int16_t isGlobalSpare:1; u_int16_t isSpare:1; u_int16_t isForeign:1; u_int16_t reserved:7; u_int16_t intf:4; #else u_int16_t intf:4; u_int16_t reserved:7; u_int16_t isForeign:1; u_int16_t isSpare:1; u_int16_t isGlobalSpare:1; u_int16_t inVD:1; u_int16_t forcedPDGUID:1; #endif } pdType; u_int16_t type; }; u_int16_t reserved; } ddf; struct { u_int32_t reserved; } nonDisk; u_int32_t type; } __packed; /* * defines the progress structure */ union MR_PROGRESS { struct { u_int16_t progress; union { u_int16_t elapsedSecs; u_int16_t elapsedSecsForLastPercent; }; } mrProgress; u_int32_t w; } __packed; /* * defines the physical drive progress structure */ struct MR_PD_PROGRESS { struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t rbld:1; u_int32_t patrol:1; u_int32_t clear:1; u_int32_t copyBack:1; u_int32_t erase:1; u_int32_t locate:1; u_int32_t reserved:26; #else u_int32_t reserved:26; u_int32_t locate:1; u_int32_t erase:1; u_int32_t copyBack:1; u_int32_t clear:1; u_int32_t patrol:1; u_int32_t rbld:1; #endif } active; union MR_PROGRESS rbld; union MR_PROGRESS patrol; union { union MR_PROGRESS clear; union MR_PROGRESS erase; }; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t rbld:1; u_int32_t patrol:1; u_int32_t clear:1; u_int32_t copyBack:1; u_int32_t erase:1; u_int32_t reserved:27; #else u_int32_t reserved:27; u_int32_t erase:1; u_int32_t copyBack:1; u_int32_t clear:1; u_int32_t patrol:1; u_int32_t rbld:1; #endif } pause; union MR_PROGRESS reserved[3]; } __packed; struct mrsas_pd_info { MR_PD_REF ref; u_int8_t inquiryData[96]; u_int8_t vpdPage83[64]; u_int8_t notSupported; u_int8_t scsiDevType; union { u_int8_t connectedPortBitmap; u_int8_t connectedPortNumbers; }; u_int8_t deviceSpeed; u_int32_t mediaErrCount; u_int32_t otherErrCount; u_int32_t predFailCount; u_int32_t lastPredFailEventSeqNum; u_int16_t fwState; u_int8_t disabledForRemoval; u_int8_t linkSpeed; union MR_PD_DDF_TYPE state; struct { u_int8_t count; #if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t isPathBroken:4; u_int8_t reserved3:3; u_int8_t widePortCapable:1; #else u_int8_t widePortCapable:1; u_int8_t reserved3:3; u_int8_t isPathBroken:4; #endif u_int8_t connectorIndex[2]; u_int8_t reserved[4]; u_int64_t sasAddr[2]; u_int8_t reserved2[16]; } pathInfo; u_int64_t rawSize; u_int64_t nonCoercedSize; u_int64_t coercedSize; u_int16_t enclDeviceId; u_int8_t enclIndex; union { u_int8_t slotNumber; u_int8_t enclConnectorIndex; }; struct MR_PD_PROGRESS progInfo; u_int8_t badBlockTableFull; u_int8_t unusableInCurrentConfig; u_int8_t vpdPage83Ext[64]; u_int8_t powerState; u_int8_t enclPosition; u_int32_t allowedOps; u_int16_t copyBackPartnerId; u_int16_t enclPartnerDeviceId; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t fdeCapable:1; u_int16_t fdeEnabled:1; u_int16_t secured:1; u_int16_t locked:1; u_int16_t foreign:1; u_int16_t needsEKM:1; u_int16_t reserved:10; #else u_int16_t reserved:10; u_int16_t needsEKM:1; u_int16_t foreign:1; u_int16_t locked:1; u_int16_t secured:1; u_int16_t fdeEnabled:1; u_int16_t fdeCapable:1; #endif } security; u_int8_t mediaType; u_int8_t notCertified; u_int8_t bridgeVendor[8]; u_int8_t bridgeProductIdentification[16]; u_int8_t bridgeProductRevisionLevel[4]; u_int8_t satBridgeExists; u_int8_t interfaceType; u_int8_t temperature; u_int8_t emulatedBlockSize; u_int16_t userDataBlockSize; u_int16_t reserved2; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t piType:3; u_int32_t piFormatted:1; u_int32_t piEligible:1; u_int32_t NCQ:1; u_int32_t WCE:1; u_int32_t commissionedSpare:1; u_int32_t emergencySpare:1; u_int32_t ineligibleForSSCD:1; u_int32_t ineligibleForLd:1; u_int32_t useSSEraseType:1; u_int32_t wceUnchanged:1; u_int32_t supportScsiUnmap:1; u_int32_t reserved:18; #else u_int32_t reserved:18; u_int32_t supportScsiUnmap:1; u_int32_t wceUnchanged:1; u_int32_t useSSEraseType:1; u_int32_t ineligibleForLd:1; u_int32_t ineligibleForSSCD:1; u_int32_t emergencySpare:1; u_int32_t commissionedSpare:1; u_int32_t WCE:1; u_int32_t NCQ:1; u_int32_t piEligible:1; u_int32_t piFormatted:1; u_int32_t piType:3; #endif } properties; u_int64_t shieldDiagCompletionTime; u_int8_t shieldCounter; u_int8_t linkSpeedOther; u_int8_t reserved4[2]; struct { #if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t bbmErrCountSupported:1; u_int32_t bbmErrCount:31; #else u_int32_t bbmErrCount:31; u_int32_t bbmErrCountSupported:1; #endif } bbmErr; u_int8_t reserved1[512-428]; } __packed; struct mrsas_target { u_int16_t target_id; u_int32_t queue_depth; u_int8_t interface_type; u_int32_t max_io_size_kb; } __packed; #define MR_NVME_PAGE_SIZE_MASK 0x000000FF #define MR_DEFAULT_NVME_PAGE_SIZE 4096 #define MR_DEFAULT_NVME_PAGE_SHIFT 12 /******************************************************************* * per-instance data ********************************************************************/ struct mrsas_softc { device_t mrsas_dev; struct cdev *mrsas_cdev; struct intr_config_hook mrsas_ich; struct cdev *mrsas_linux_emulator_cdev; uint16_t device_id; struct resource *reg_res; int reg_res_id; bus_space_tag_t bus_tag; bus_space_handle_t bus_handle; bus_dma_tag_t mrsas_parent_tag; bus_dma_tag_t verbuf_tag; bus_dmamap_t verbuf_dmamap; void *verbuf_mem; bus_addr_t verbuf_phys_addr; bus_dma_tag_t sense_tag; bus_dmamap_t sense_dmamap; void *sense_mem; bus_addr_t sense_phys_addr; bus_dma_tag_t io_request_tag; bus_dmamap_t io_request_dmamap; void *io_request_mem; bus_addr_t io_request_phys_addr; bus_dma_tag_t chain_frame_tag; bus_dmamap_t chain_frame_dmamap; void *chain_frame_mem; bus_addr_t chain_frame_phys_addr; bus_dma_tag_t reply_desc_tag; bus_dmamap_t reply_desc_dmamap; void *reply_desc_mem; bus_addr_t reply_desc_phys_addr; bus_dma_tag_t ioc_init_tag; bus_dmamap_t ioc_init_dmamap; void *ioc_init_mem; bus_addr_t ioc_init_phys_mem; bus_dma_tag_t data_tag; struct cam_sim *sim_0; struct cam_sim *sim_1; struct cam_path *path_0; struct cam_path *path_1; struct mtx sim_lock; struct mtx pci_lock; struct mtx io_lock; struct mtx ioctl_lock; struct mtx mpt_cmd_pool_lock; struct mtx mfi_cmd_pool_lock; struct mtx raidmap_lock; struct mtx aen_lock; struct mtx stream_lock; struct selinfo mrsas_select; uint32_t mrsas_aen_triggered; uint32_t mrsas_poll_waiting; struct sema ioctl_count_sema; uint32_t max_fw_cmds; uint16_t max_scsi_cmds; uint32_t max_num_sge; struct resource *mrsas_irq[MAX_MSIX_COUNT]; void *intr_handle[MAX_MSIX_COUNT]; int irq_id[MAX_MSIX_COUNT]; struct mrsas_irq_context irq_context[MAX_MSIX_COUNT]; int msix_vectors; int msix_enable; uint32_t msix_reg_offset[16]; uint8_t mask_interrupts; uint16_t max_chain_frame_sz; struct mrsas_mpt_cmd **mpt_cmd_list; struct mrsas_mfi_cmd **mfi_cmd_list; TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head; TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head; bus_addr_t req_frames_desc_phys; u_int8_t *req_frames_desc; u_int8_t *req_desc; bus_addr_t io_request_frames_phys; u_int8_t *io_request_frames; bus_addr_t reply_frames_desc_phys; u_int16_t last_reply_idx[MAX_MSIX_COUNT]; u_int32_t reply_q_depth; u_int32_t request_alloc_sz; u_int32_t reply_alloc_sz; u_int32_t io_frames_alloc_sz; u_int32_t chain_frames_alloc_sz; u_int16_t max_sge_in_main_msg; u_int16_t max_sge_in_chain; u_int8_t chain_offset_io_request; u_int8_t chain_offset_mfi_pthru; u_int32_t map_sz; u_int64_t map_id; u_int64_t pd_seq_map_id; struct mrsas_mfi_cmd *map_update_cmd; struct mrsas_mfi_cmd *jbod_seq_cmd; struct mrsas_mfi_cmd *aen_cmd; u_int8_t fast_path_io; void *chan; void *ocr_chan; u_int8_t adprecovery; u_int8_t remove_in_progress; u_int8_t ocr_thread_active; u_int8_t do_timedout_reset; u_int32_t reset_in_progress; u_int32_t reset_count; u_int32_t block_sync_cache; u_int32_t drv_stream_detection; u_int8_t fw_sync_cache_support; mrsas_atomic_t target_reset_outstanding; #define MRSAS_MAX_TM_TARGETS (MRSAS_MAX_PD + MRSAS_MAX_LD_IDS) struct mrsas_mpt_cmd *target_reset_pool[MRSAS_MAX_TM_TARGETS]; bus_dma_tag_t jbodmap_tag[2]; bus_dmamap_t jbodmap_dmamap[2]; void *jbodmap_mem[2]; bus_addr_t jbodmap_phys_addr[2]; bus_dma_tag_t raidmap_tag[2]; bus_dmamap_t raidmap_dmamap[2]; void *raidmap_mem[2]; bus_addr_t raidmap_phys_addr[2]; bus_dma_tag_t mficmd_frame_tag; bus_dma_tag_t mficmd_sense_tag; bus_addr_t evt_detail_phys_addr; bus_dma_tag_t evt_detail_tag; bus_dmamap_t evt_detail_dmamap; struct mrsas_evt_detail *evt_detail_mem; bus_addr_t pd_info_phys_addr; bus_dma_tag_t pd_info_tag; bus_dmamap_t pd_info_dmamap; struct mrsas_pd_info *pd_info_mem; struct mrsas_ctrl_info *ctrl_info; bus_dma_tag_t ctlr_info_tag; bus_dmamap_t ctlr_info_dmamap; void *ctlr_info_mem; bus_addr_t ctlr_info_phys_addr; u_int32_t max_sectors_per_req; u_int32_t disableOnlineCtrlReset; mrsas_atomic_t fw_outstanding; mrsas_atomic_t prp_count; mrsas_atomic_t sge_holes; u_int32_t mrsas_debug; u_int32_t mrsas_io_timeout; u_int32_t mrsas_fw_fault_check_delay; u_int32_t io_cmds_highwater; u_int8_t UnevenSpanSupport; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct proc *ocr_thread; u_int32_t last_seq_num; bus_dma_tag_t el_info_tag; bus_dmamap_t el_info_dmamap; void *el_info_mem; bus_addr_t el_info_phys_addr; struct mrsas_pd_list pd_list[MRSAS_MAX_PD]; struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD]; struct mrsas_target target_list[MRSAS_MAX_TM_TARGETS]; u_int8_t ld_ids[MRSAS_MAX_LD_IDS]; struct taskqueue *ev_tq; struct task ev_task; u_int32_t CurLdCount; u_int64_t reset_flags; int lb_pending_cmds; LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; u_int8_t mrsas_gen3_ctrl; u_int8_t secure_jbod_support; u_int8_t use_seqnum_jbod_fp; /* FW suport for more than 256 PD/JBOD */ u_int32_t support_morethan256jbod; u_int8_t max256vdSupport; u_int16_t fw_supported_vd_count; u_int16_t fw_supported_pd_count; u_int16_t drv_supported_vd_count; u_int16_t drv_supported_pd_count; u_int32_t max_map_sz; u_int32_t current_map_sz; u_int32_t old_map_sz; u_int32_t new_map_sz; u_int32_t drv_map_sz; u_int32_t nvme_page_size; boolean_t is_ventura; boolean_t is_aero; boolean_t msix_combined; boolean_t atomic_desc_support; u_int16_t maxRaidMapSize; /* Non dma-able memory. Driver local copy. */ MR_DRV_RAID_MAP_ALL *ld_drv_map[2]; PTR_LD_STREAM_DETECT *streamDetectByLD; }; /* Compatibility shims for different OS versions */ #define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mrsas_kproc_exit(arg) kproc_exit(arg) static __inline void mrsas_clear_bit(int b, volatile void *p) { atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f)); } static __inline void mrsas_set_bit(int b, volatile void *p) { atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f)); } static __inline int mrsas_test_bit(int b, volatile void *p) { return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f)); } +#include "mrsas_ioctl.h" +extern int mrsas_user_command(struct mrsas_softc *, struct mfi_ioc_passthru *); + #endif /* MRSAS_H */ diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c index 1044fcbbcf92..b8d88c164e81 100644 --- a/sys/dev/mrsas/mrsas_ioctl.c +++ b/sys/dev/mrsas/mrsas_ioctl.c @@ -1,530 +1,784 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include +struct mrsas_passthru_cmd { + struct iovec *kern_sge; + struct mrsas_softc *sc; + struct mrsas_mfi_cmd *cmd; + bus_dma_tag_t ioctl_data_tag; + bus_dmamap_t ioctl_data_dmamap; + + u_int32_t error_code; + u_int32_t sge_count; + int complete; +}; + /* * Function prototypes */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); static int mrsas_create_frame_pool(struct mrsas_softc *sc); static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); extern struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); +/* + * mrsas_data_load_cb: Callback entry point + * input: Pointer to command packet as argument + * Pointer to segment + * Number of segments Error + * + * This is the callback function of the bus dma map load. It builds the SG + * list. + */ +static void +mrsas_passthru_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct mrsas_passthru_cmd *cb = (struct mrsas_passthru_cmd *)arg; + struct mrsas_softc *sc = cb->sc; + int i = 0; + + if (error) { + cb->error_code = error; + if (error == EFBIG) { + device_printf(sc->mrsas_dev, "mrsas_passthru_load_cb: " + "error=%d EFBIG\n", error); + cb->complete = 1; + return; + } else { + device_printf(sc->mrsas_dev, "mrsas_passthru_load_cb: " + "error=%d UNKNOWN\n", error); + } + } + if (nseg > MAX_IOCTL_SGE) { + cb->error_code = EFBIG; + device_printf(sc->mrsas_dev, "mrsas_passthru_load_cb: " + "too many segments: %d\n", nseg); + cb->complete = 1; + return; + } + + for (i = 0; i < nseg; i++) { + cb->kern_sge[i].iov_base = PTRIN(segs[i].ds_addr); + cb->kern_sge[i].iov_len = segs[i].ds_len; + } + cb->sge_count = nseg; + + bus_dmamap_sync(cb->ioctl_data_tag, cb->ioctl_data_dmamap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + cb->complete = 1; +} + /* * mrsas_passthru: Handle pass-through commands * input: Adapter instance soft state argument pointer * * This function is called from mrsas_ioctl() to handle pass-through and ioctl * commands to Firmware. */ int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd) { struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; #ifdef COMPAT_FREEBSD32 struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg; #endif union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw); struct mrsas_mfi_cmd *cmd = NULL; bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE]; bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE]; void *ioctl_data_mem[MAX_IOCTL_SGE]; bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; bus_dma_tag_t ioctl_sense_tag = 0; bus_dmamap_t ioctl_sense_dmamap = 0; void *ioctl_sense_mem = NULL; bus_addr_t ioctl_sense_phys_addr = 0; int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0; struct mrsas_sge32 *kern_sge32; unsigned long *sense_ptr; uint8_t *iov_base_ptrin = NULL; size_t iov_len = 0; /* * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In * this case do nothing and return 0 to it as status. */ if (in_cmd->dcmd.opcode == 0) { device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__); user_ioc->frame.hdr.cmd_status = MFI_STAT_OK; return (0); } /* Validate SGL length */ if (user_ioc->sge_count > MAX_IOCTL_SGE) { device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", __func__, user_ioc->sge_count); return (ENOENT); } /* Get a command */ cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n"); return (ENOMEM); } /* * User's IOCTL packet has 2 frames (maximum). Copy those two frames * into our cmd's frames. cmd->frame's context will get overwritten * when we copy from user's frames. So set that value alone * separately */ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses MFI * frames. E.g, RAID configuration changes, LD property changes etc * are accomplishes through different kinds of MFI frames. The driver * needs to care only about substituting user buffers with kernel * buffers in SGLs. The location of SGL is embedded in the struct * iocpacket itself. */ kern_sge32 = (struct mrsas_sge32 *) ((uintptr_t)cmd->frame + user_ioc->sgl_off); memset(ioctl_data_tag, 0, (sizeof(bus_dma_tag_t) * MAX_IOCTL_SGE)); memset(ioctl_data_dmamap, 0, (sizeof(bus_dmamap_t) * MAX_IOCTL_SGE)); memset(ioctl_data_mem, 0, (sizeof(void *) * MAX_IOCTL_SGE)); memset(ioctl_data_phys_addr, 0, (sizeof(bus_addr_t) * MAX_IOCTL_SGE)); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; ioctl_data_size = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; ioctl_data_size = user_ioc32->sgl[i].iov_len; #endif } if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_data_size, 1, ioctl_data_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_data_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i], (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i], ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb, &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n"); ret = ENOMEM; goto out; } /* Save the physical address and length */ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i]; if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { kern_sge32[i].length = user_ioc->sgl[i].iov_len; iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { kern_sge32[i].length = user_ioc32->sgl[i].iov_len; iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } /* Copy in data from user space */ ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); goto out; } } ioctl_sense_size = user_ioc->sense_len; if (user_ioc->sense_len) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_sense_size, 1, ioctl_sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap, ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb, &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n"); ret = ENOMEM; goto out; } sense_ptr = (unsigned long *)((uintptr_t)cmd->frame + user_ioc->sense_off); *sense_ptr = ioctl_sense_phys_addr; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; ret = mrsas_issue_blocked_cmd(sc, cmd); if (ret == ETIMEDOUT) { mrsas_dprint(sc, MRSAS_OCR, "IOCTL command is timed out, initiating OCR\n"); sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; ret = EAGAIN; goto out; } cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n"); goto out; } } /* * copy out the sense */ if (user_ioc->sense_len) { /* * sense_buff points to the location that has the user sense * buffer address */ sense_ptr = (unsigned long *)((uintptr_t)user_ioc->frame.raw + user_ioc->sense_off); ret = copyout(ioctl_sense_mem, (unsigned long *)(uintptr_t)*sense_ptr, user_ioc->sense_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n"); goto out; } } /* * Return command status to user space */ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u_int8_t)); out: /* * Release sense buffer */ if (user_ioc->sense_len) { if (ioctl_sense_phys_addr) bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap); if (ioctl_sense_mem != NULL) bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap); if (ioctl_sense_tag != NULL) bus_dma_tag_destroy(ioctl_sense_tag); } /* * Release data buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; #endif } if (ioctl_data_phys_addr[i]) bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]); if (ioctl_data_mem[i] != NULL) bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], ioctl_data_dmamap[i]); if (ioctl_data_tag[i] != NULL) bus_dma_tag_destroy(ioctl_data_tag[i]); } /* Free command */ mrsas_release_mfi_cmd(cmd); return (ret); } +/** + * mrsas_user_command: Handle user mode DCMD and buffer + * input: Adapter instance soft state + * argument pointer + * + * This function is called from mrsas_ioctl() DCMDs to firmware for mfiutil + */ +int +mrsas_user_command(struct mrsas_softc *sc, struct mfi_ioc_passthru *ioc) +{ + struct mrsas_mfi_cmd *cmd; + struct mrsas_dcmd_frame *dcmd; + struct mrsas_passthru_cmd *passcmd; + bus_dma_tag_t ioctl_data_tag; + bus_dmamap_t ioctl_data_dmamap; + bus_addr_t ioctl_data_phys_addr; + struct iovec *kern_sge; + int ret, ioctl_data_size; + char *ioctl_temp_data_mem; + + ret = 0; + ioctl_temp_data_mem = NULL; + passcmd = NULL; + ioctl_data_phys_addr = 0; + dcmd = NULL; + cmd = NULL; + ioctl_data_tag = NULL; + ioctl_data_dmamap = NULL; + ioctl_data_dmamap = NULL; + + /* Get a command */ + cmd = mrsas_get_mfi_cmd(sc); + if (!cmd) { + device_printf(sc->mrsas_dev, + "Failed to get a free cmd for IOCTL\n"); + return(ENOMEM); + } + + /* + * Frame is DCMD + */ + dcmd = (struct mrsas_dcmd_frame *)cmd->frame; + memcpy(dcmd, &ioc->ioc_frame, sizeof(struct mrsas_dcmd_frame)); + + ioctl_data_size = ioc->buf_size; + + cmd->frame->hdr.context = cmd->index; + cmd->frame->hdr.pad_0 = 0; + cmd->frame->hdr.flags = MFI_FRAME_DIR_BOTH; + if (sizeof(bus_addr_t) == 8) + cmd->frame->hdr.flags |= MFI_FRAME_SGL64 | MFI_FRAME_SENSE64; + + kern_sge = (struct iovec *)(&dcmd->sgl); + + if (ioctl_data_size == 0) { + kern_sge[0].iov_base = 0; + kern_sge[0].iov_len = 0; + } else { + ioctl_temp_data_mem = malloc(ioc->buf_size, M_MRSAS, M_WAITOK); + if (ioctl_temp_data_mem == NULL) { + device_printf(sc->mrsas_dev, "Could not allocate " + "%d memory for temporary passthrough ioctl\n", + ioc->buf_size); + ret = ENOMEM; + goto out; + } + + /* Copy in data from user space */ + ret = copyin(ioc->buf, ioctl_temp_data_mem, ioc->buf_size); + if (ret) { + device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); + goto out; + } + + /* + * Allocate a temporary struct to hold parameters for the + * callback + */ + passcmd = malloc(sizeof(struct mrsas_passthru_cmd), M_MRSAS, + M_WAITOK); + if (passcmd == NULL) { + device_printf(sc->mrsas_dev, "Could not allocate " + "memory for temporary passthrough cb struct\n"); + ret = ENOMEM; + goto out; + } + passcmd->complete = 0; + passcmd->sc = sc; + passcmd->cmd = cmd; + passcmd->kern_sge = kern_sge; + + /* + * Create a dma tag for passthru buffers + */ + if (bus_dma_tag_create(sc->mrsas_parent_tag, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + ioctl_data_size, /* maxsize */ + MAX_IOCTL_SGE, /* msegments */ + ioctl_data_size, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + busdma_lock_mutex, /* lockfunc */ + &sc->ioctl_lock, /* lockarg */ + &ioctl_data_tag)) { + device_printf(sc->mrsas_dev, + "Cannot allocate ioctl data tag %d\n", + ioc->buf_size); + ret = ENOMEM; + goto out; + } + + /* Create memmap */ + if (bus_dmamap_create(ioctl_data_tag, 0, &ioctl_data_dmamap)) { + device_printf(sc->mrsas_dev, "Cannot create ioctl " + "passthru dmamap\n"); + ret = ENOMEM; + goto out; + } + + passcmd->ioctl_data_tag = ioctl_data_tag; + passcmd->ioctl_data_dmamap = ioctl_data_dmamap; + + /* Map data buffer into bus space */ + if (bus_dmamap_load(ioctl_data_tag, ioctl_data_dmamap, + ioctl_temp_data_mem, ioc->buf_size, mrsas_passthru_load_cb, + passcmd, BUS_DMA_NOWAIT)) { + device_printf(sc->mrsas_dev, "Cannot load ioctl " + "passthru data mem%s %d\n", curproc->p_comm, ioctl_data_size); + ret = ENOMEM; + goto out; + } + + while (passcmd->complete == 0) { + pause("mrsas_passthru", hz); + } + + cmd->frame->dcmd.sge_count = passcmd->sge_count; + } + + /* + * Set the sync_cmd flag so that the ISR knows not to complete this + * cmd to the SCSI mid-layer + */ + cmd->sync_cmd = 1; + mrsas_issue_blocked_cmd(sc, cmd); + cmd->sync_cmd = 0; + + if (ioctl_data_size != 0) { + bus_dmamap_sync(ioctl_data_tag, ioctl_data_dmamap, + BUS_DMASYNC_POSTREAD); + /* + * copy out the kernel buffers to user buffers + */ + ret = copyout(ioctl_temp_data_mem, ioc->buf, ioc->buf_size); + if (ret) { + device_printf(sc->mrsas_dev, + "IOCTL copyout failed!\n"); + goto out; + } + } + + /* + * Return command status to user space + */ + memcpy(&ioc->ioc_frame.cmd_status, &cmd->frame->hdr.cmd_status, + sizeof(u_int8_t)); + +out: + /* + * Release temporary passthrough ioctl + */ + if (ioctl_temp_data_mem) + free(ioctl_temp_data_mem, M_MRSAS); + if (passcmd) + free(passcmd, M_MRSAS); + + /* + * Release data buffers + */ + if (ioctl_data_phys_addr) { + bus_dmamap_unload(ioctl_data_tag, ioctl_data_dmamap); + bus_dmamap_destroy(ioctl_data_tag, ioctl_data_dmamap); + } + if (ioctl_data_tag != NULL) + bus_dma_tag_destroy(ioctl_data_tag); + /* Free command */ + mrsas_release_mfi_cmd(cmd); + + return(ret); +} + + /* * mrsas_alloc_mfi_cmds: Allocates the command packets * input: Adapter instance soft state * * Each IOCTL or passthru command that is issued to the FW are wrapped in a * local data structure called mrsas_mfi_cmd. The frame embedded in this * mrsas_mfi is issued to FW. The array is used only to look up the * mrsas_mfi_cmd given the context. The free commands are maintained in a * linked list. */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd; struct mrsas_mfi_cmd *cmd; max_cmd = MRSAS_MAX_MFI_CMDS; /* * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n"); return (ENOMEM); } memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd), M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mfi_cmd_list[j], M_MRSAS); free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; return (ENOMEM); } } for (i = 0; i < max_cmd; i++) { cmd = sc->mfi_cmd_list[i]; memset(cmd, 0, sizeof(struct mrsas_mfi_cmd)); cmd->index = i; cmd->ccb_ptr = NULL; cmd->sc = sc; TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); } /* create a frame pool and assign one frame to each command */ if (mrsas_create_frame_pool(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n"); /* Free the frames */ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, cmd); } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); return (ENOMEM); } return (0); } /* * mrsas_create_frame_pool: Creates DMA pool for cmd frames * input: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value * of context and could cause FW crash. */ static int mrsas_create_frame_pool(struct mrsas_softc *sc) { int i; struct mrsas_mfi_cmd *cmd; if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MRSAS_MFI_FRAME_SIZE, 1, MRSAS_MFI_FRAME_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->mficmd_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n"); return (ENOMEM); } for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; cmd->frame = mrsas_alloc_frame(sc, cmd); if (cmd->frame == NULL) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (ENOMEM); } /* * For MFI controllers. * max_num_sge = 60 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) * Totl 960 byte (15 MFI frame of 64 byte) * * Fusion adapter require only 3 extra frame. * max_num_sge = 16 (defined as MAX_IOCTL_SGE) * max_sge_sz = 12 byte (sizeof megasas_sge64) * Total 192 byte (3 MFI frame of 64 byte) */ memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } return (0); } /* * mrsas_alloc_frame: Allocates MFI Frames * input: Adapter soft state * * Create bus DMA memory tag and dmamap and load memory for MFI frames. Returns * virtual memory pointer to allocated region. */ void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE; if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem, BUS_DMA_NOWAIT, &cmd->frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (NULL); } if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap, cmd->frame_mem, frame_size, mrsas_alloc_cb, &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (NULL); } return (cmd->frame_mem); } /* * mrsas_alloc_cb: Callback function of bus_dmamap_load() * input: callback argument, * machine dependent type that describes DMA segments, * number of segments, * error code. * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_free_frames: Frees memory for MFI frames * input: Adapter soft state * * Deallocates MFI frames memory. Called from mrsas_free_mem() during detach * and error case during creation of frame pool. */ void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->frame_phys_addr) bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap); if (cmd->frame_mem != NULL) bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap); } diff --git a/sys/dev/mrsas/mrsas_ioctl.h b/sys/dev/mrsas/mrsas_ioctl.h index 07ee211714cc..3851de710b5a 100644 --- a/sys/dev/mrsas/mrsas_ioctl.h +++ b/sys/dev/mrsas/mrsas_ioctl.h @@ -1,124 +1,133 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #ifndef MRSAS_IOCTL_H #define MRSAS_IOCTL_H #ifndef _IOWR #include #endif /* !_IOWR */ #ifdef COMPAT_FREEBSD32 /* Compilation error FIX */ #include #include #endif /* * We need to use the same values as the mfi driver until MegaCli adds * support for this (mrsas) driver: M is for MegaRAID. (This is typically the * vendor or product initial) 1 arbitrary. (This may be used to segment kinds * of commands. (1-9 status, 10-20 policy, etc.) struct mrsas_iocpacket * (sizeof() this parameter will be used.) These three values are encoded * into a somewhat unique, 32-bit value. */ #define MRSAS_IOC_GET_PCI_INFO _IOR('M', 7, MRSAS_DRV_PCI_INFORMATION) #define MRSAS_IOC_FIRMWARE_PASS_THROUGH64 _IOWR('M', 1, struct mrsas_iocpacket) #ifdef COMPAT_FREEBSD32 #define MRSAS_IOC_FIRMWARE_PASS_THROUGH32 _IOWR('M', 1, struct mrsas_iocpacket32) #endif #define MRSAS_IOC_SCAN_BUS _IO('M', 10) #define MRSAS_LINUX_CMD32 0xc1144d01 #define MAX_IOCTL_SGE 16 #define MFI_FRAME_DIR_READ 0x0010 #define MFI_CMD_LD_SCSI_IO 0x03 #define INQUIRY_CMD 0x12 #define INQUIRY_CMDLEN 6 #define INQUIRY_REPLY_LEN 96 #define INQUIRY_VENDOR 8 /* Offset in reply data to * vendor name */ #define SCSI_SENSE_BUFFERSIZE 96 #define MEGAMFI_RAW_FRAME_SIZE 128 #pragma pack(1) struct mrsas_iocpacket { u_int16_t host_no; u_int16_t __pad1; u_int32_t sgl_off; u_int32_t sge_count; u_int32_t sense_off; u_int32_t sense_len; union { u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE]; struct mrsas_header hdr; } frame; struct iovec sgl[MAX_IOCTL_SGE]; }; #pragma pack() #ifdef COMPAT_FREEBSD32 #pragma pack(1) struct mrsas_iocpacket32 { u_int16_t host_no; u_int16_t __pad1; u_int32_t sgl_off; u_int32_t sge_count; u_int32_t sense_off; u_int32_t sense_len; union { u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE]; struct mrsas_header hdr; } frame; struct iovec32 sgl[MAX_IOCTL_SGE]; }; #pragma pack() #endif /* COMPAT_FREEBSD32 */ +struct mfi_ioc_passthru { + struct mrsas_dcmd_frame ioc_frame; + uint32_t pad_skinny_flag; + uint32_t buf_size; + uint8_t *buf; +} __packed; + +#define MFIIO_PASSTHRU _IOWR('C', 102, struct mfi_ioc_passthru) + #endif /* MRSAS_IOCTL_H */