Page MenuHomeFreeBSD

D41550.diff
No OneTemporary

D41550.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/share/man/man4/smartpqi.4 b/share/man/man4/smartpqi.4
--- a/share/man/man4/smartpqi.4
+++ b/share/man/man4/smartpqi.4
@@ -1,5 +1,7 @@
-.\" Copyright (c) 2018 Murthy Bhat
-.\" All rights reserved.
+.\" Copyright (C) 2019-2023, Microchip Technology Inc. and its subsidiaries
+.\" Copyright (C) 2016-2018, Microsemi Corporation
+.\" Copyright (C) 2016, PMC-Sierra, Inc.
+.\" Written by John Hall <john.hall@microchip.com>
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
@@ -22,25 +24,23 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD$ stable/10/share/man/man4/smartpqi.4 195614 2017-01-11 08:10:18Z jkim $
-.Dd April 6, 2018
+.\" $Id$
+.Dd $Mdocdate$
.Dt SMARTPQI 4
.Os
.Sh NAME
.Nm smartpqi
-.Nd Microsemi smartpqi SCSI driver for PQI controllers
+.Nd "Microchip Smart Storage SCSI driver"
.Sh SYNOPSIS
-To compile this driver into the kernel,
-place the following lines in your
-kernel configuration file:
+To compile this driver into the kernel, place these lines in the kernel
+configuration file:
.Bd -ragged -offset indent
.Cd device pci
.Cd device scbus
.Cd device smartpqi
.Ed
.Pp
-Alternatively, to load the driver as a
-module at boot time, place the following line in
+The driver can be loaded as a module at boot time by placing this line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
smartpqi_load="YES"
@@ -48,36 +48,33 @@
.Sh DESCRIPTION
The
.Nm
-SCSI driver provides support for the new generation of PQI controllers from
-Microsemi.
-The
-.Nm
-driver is the first SCSI driver to implement the PQI queuing model.
-.Pp
-The
-.Nm
-driver will replace the aacraid driver for Adaptec Series 9 controllers.
-.Pp
-The
-.Pa /dev/smartpqi?
-device nodes provide access to the management interface of the controller.
-One node exists per installed card.
+driver provides support for Microchip Technology Inc. / Adaptec SmartRaid and
+SmartHBA SATA/SAS/NVME PCIe controllers
.Sh HARDWARE
Controllers supported by the
.Nm
-driver include:
+driver include, but not limited to:
.Pp
.Bl -bullet -compact
.It
HPE Gen10 Smart Array Controller Family
.It
-OEM Controllers based on the Microsemi Chipset
+Adaptec SmartRaid and SmartHBA Controllers
+.It
+OEM Controllers based on the Microchip Technology Inc. SmartROC
+and SmartIOC Chipsets
.El
.Sh FILES
-.Bl -tag -width /boot/kernel/aac.ko -compact
+.Bl -tag -width /boot/kernel/smartpqi.ko -compact
.It Pa /dev/smartpqi?
smartpqi management interface
.El
+.Sh NOTES
+.Ss Configuration
+To configure a Microchip Smart Storage controller,
+refer to the User Guide for the controller,
+which can be found by searching for the specific controller at
+https://www.microchip.com/design-centers/storage
.Sh SEE ALSO
.Xr kld 4 ,
.Xr linux 4 ,
@@ -87,17 +84,13 @@
.Xr loader.conf 5 ,
.Xr camcontrol 8 ,
.Xr kldload 8
-.Rs
-.%T "Microsemi Website"
-.%U https://www.microsemi.com/
-.Re
.Sh HISTORY
The
.Nm
driver first appeared in
.Fx 11.1 .
.Sh AUTHORS
-.An Murthy Bhat
-.Aq murthy.bhat@microsemi.com
+.An John Hall
+.Aq john.hall@microchip.com
.Sh BUGS
The controller is not actually paused on suspend/resume.
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -372,6 +372,7 @@
dev/smartpqi/smartpqi_cmd.c optional smartpqi
dev/smartpqi/smartpqi_discovery.c optional smartpqi
dev/smartpqi/smartpqi_event.c optional smartpqi
+dev/smartpqi/smartpqi_features.c optional smartpqi
dev/smartpqi/smartpqi_helper.c optional smartpqi
dev/smartpqi/smartpqi_init.c optional smartpqi
dev/smartpqi/smartpqi_intr.c optional smartpqi
diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
--- a/sys/dev/smartpqi/smartpqi_cam.c
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -49,12 +49,15 @@
cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
cpi->hba_eng_cnt = 0;
cpi->max_lun = PQI_MAX_MULTILUN;
- cpi->max_target = 1088;
+ cpi->max_target = MAX_TARGET_DEVICES;
cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
cpi->initiator_id = 255;
- strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
- strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
- strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN-1);
+ cpi->sim_vid[sizeof(cpi->sim_vid)-1] = '\0';
+ strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN-1);
+ cpi->hba_vid[sizeof(cpi->hba_vid)-1] = '\0';
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN-1);
+ cpi->dev_name[sizeof(cpi->dev_name)-1] = '\0';
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
@@ -73,7 +76,7 @@
}
/*
- * Get transport settings of the smartpqi adapter
+ * Get transport settings of the smartpqi adapter.
*/
static void
get_transport_settings(struct pqisrc_softstate *softs,
@@ -84,7 +87,7 @@
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
DBG_FUNC("IN\n");
-
+
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_SPC4;
cts->transport = XPORT_SPI;
@@ -106,10 +109,12 @@
os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
union ccb *ccb;
+ uint64_t lun;
DBG_FUNC("IN\n");
- if(softs->os_specific.sim_registered) {
+ lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
+ if(softs->os_specific.sim_registered) {
if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
DBG_ERR("rescan failed (can't allocate CCB)\n");
return;
@@ -117,7 +122,7 @@
if (xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(softs->os_specific.sim),
- device->target, device->lun) != CAM_REQ_CMP) {
+ device->target, lun) != CAM_REQ_CMP) {
DBG_ERR("rescan failed (can't create path)\n");
xpt_free_ccb(ccb);
return;
@@ -134,20 +139,25 @@
void
os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
- struct cam_path *tmppath;
+ struct cam_path *tmppath = NULL;
+ uint64_t lun;
DBG_FUNC("IN\n");
-
+
+ lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
if(softs->os_specific.sim_registered) {
- if (xpt_create_path(&tmppath, NULL,
+ if (xpt_create_path(&tmppath, NULL,
cam_sim_path(softs->os_specific.sim),
- device->target, device->lun) != CAM_REQ_CMP) {
- DBG_ERR("unable to create path for async event");
+ device->target, lun) != CAM_REQ_CMP) {
+ DBG_ERR("unable to create path for async event\n");
return;
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
- softs->device_list[device->target][device->lun] = NULL;
+ /* softs->device_list[device->target][device->lun] = NULL; */
+ int index = pqisrc_find_device_list_index(softs,device);
+ if (index >= 0 && index < PQI_MAX_DEVICES)
+ softs->dev_list[index] = NULL;
pqisrc_free_device(softs, device);
}
@@ -191,22 +201,20 @@
return;
if (rcb->bcount != 0 ) {
- if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
+ if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
- rcb->cm_datamap,
- BUS_DMASYNC_POSTREAD);
- if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
+ rcb->cm_datamap,BUS_DMASYNC_POSTREAD);
+ if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
- rcb->cm_datamap,
- BUS_DMASYNC_POSTWRITE);
+ rcb->cm_datamap,BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
- rcb->cm_datamap);
+ rcb->cm_datamap);
}
rcb->cm_flags &= ~PQI_CMD_MAPPED;
if(rcb->sgt && rcb->nseg)
os_mem_free(rcb->softs, (void*)rcb->sgt,
- rcb->nseg*sizeof(sgt_t));
+ rcb->nseg*sizeof(sgt_t));
DBG_IO("OUT\n");
}
@@ -242,6 +250,7 @@
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
+
if(cdb[0] == INQUIRY &&
(cdb[1] & SI_EVPD) == 0 &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
@@ -249,23 +258,31 @@
inq = (struct scsi_inquiry_data *)csio->data_ptr;
- device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
+ /* device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; */
+ int target = csio->ccb_h.target_id;
+ int lun = csio->ccb_h.target_lun;
+ int index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
+ if (index != INVALID_ELEM)
+ device = softs->dev_list[index];
/* Let the disks be probed and dealt with via CAM. Only for LD
let it fall through and inquiry be tweaked */
- if (!device || !pqisrc_is_logical_device(device) ||
- (device->devtype != DISK_DEVICE) ||
+ if( !device || !pqisrc_is_logical_device(device) ||
+ (device->devtype != DISK_DEVICE) ||
pqisrc_is_external_raid_device(device)) {
return;
}
strncpy(inq->vendor, device->vendor,
- SID_VENDOR_SIZE);
+ SID_VENDOR_SIZE-1);
+ inq->vendor[sizeof(inq->vendor)-1] = '\0';
strncpy(inq->product,
pqisrc_raidlevel_to_string(device->raid_level),
- SID_PRODUCT_SIZE);
+ SID_PRODUCT_SIZE-1);
+ inq->product[sizeof(inq->product)-1] = '\0';
strncpy(inq->revision, device->volume_offline?"OFF":"OK",
- SID_REVISION_SIZE);
+ SID_REVISION_SIZE-1);
+ inq->revision[sizeof(inq->revision)-1] = '\0';
}
DBG_FUNC("OUT\n");
@@ -308,7 +325,7 @@
if (csio == NULL)
panic("csio is null");
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
csio->ccb_h.status = CAM_REQ_CMP;
pqi_complete_scsi_io(csio, rcb);
@@ -383,10 +400,11 @@
uint8_t *sense_data = NULL;
if (sense_data_len)
sense_data = err_info->data;
+
copy_sense_data_to_csio(csio, sense_data, sense_data_len);
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
- | CAM_AUTOSNS_VALID
- | CAM_REQ_CMP_ERR;
+ | CAM_AUTOSNS_VALID
+ | CAM_REQ_CMP_ERR;
}
break;
@@ -425,7 +443,7 @@
if (rcb == NULL)
panic("rcb is null");
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
if (csio == NULL)
panic("csio is null");
@@ -462,7 +480,7 @@
/* Timed out TMF response comes here */
if (rcb->tm_req) {
rcb->req_pending = false;
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
DBG_ERR("AIO Disabled for TMF\n");
return;
}
@@ -484,14 +502,14 @@
case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
rcb->req_pending = false;
return;
case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
- rcb->status = REQUEST_FAILED;
+ rcb->status = PQI_STATUS_TIMEOUT;
rcb->req_pending = false;
return;
default:
@@ -536,8 +554,9 @@
pqisrc_softstate_t *softs = rcb->softs;
union ccb *ccb;
- if (error || nseg > softs->pqi_cap.max_sg_elem) {
- DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
+ if (error || nseg > softs->pqi_cap.max_sg_elem)
+ {
+ DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%u)\n",
error, nseg, softs->pqi_cap.max_sg_elem);
goto error_io;
}
@@ -556,15 +575,15 @@
rcb->sgt[i].flags = 0;
}
- if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
- bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
- rcb->cm_datamap, BUS_DMASYNC_PREREAD);
- if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
- bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
- rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
+ if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap, BUS_DMASYNC_PREREAD);
+ if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
/* Call IO functions depending on pd or ld */
- rcb->status = REQUEST_PENDING;
+ rcb->status = PQI_STATUS_FAILURE;
error = pqisrc_build_send_io(softs, rcb);
@@ -607,7 +626,7 @@
bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
- DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
+ DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %u\n",
bsd_status, rcb->bcount);
return bsd_status;
}
@@ -618,7 +637,7 @@
* busdma.
*/
/* Call IO functions depending on pd or ld */
- rcb->status = REQUEST_PENDING;
+ rcb->status = PQI_STATUS_FAILURE;
if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
bsd_status = EIO;
@@ -695,7 +714,7 @@
return;
}
- bzero(ccb, sizeof(union ccb));
+ memset(ccb, 0, sizeof(union ccb));
xpt_setup_ccb(&ccb->ccb_h, path, 5);
ccb->ccb_h.func_code = XPT_SCAN_LUN;
ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
@@ -712,15 +731,17 @@
void
smartpqi_target_rescan(struct pqisrc_softstate *softs)
{
- int target = 0, lun = 0;
+ pqi_scsi_dev_t *device;
+ int index;
DBG_FUNC("IN\n");
- for(target = 0; target < PQI_MAX_DEVICES; target++){
- for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
- if(softs->device_list[target][lun]){
- smartpqi_lun_rescan(softs, target, lun);
- }
+ for(index = 0; index < PQI_MAX_DEVICES; index++){
+ /* if(softs->device_list[target][lun]){ */
+ if(softs->dev_list[index] != NULL) {
+ device = softs->dev_list[index];
+ DBG_INFO("calling smartpqi_lun_rescan with TL = %d:%d\n",device->target,device->lun);
+ smartpqi_lun_rescan(softs, device->target, device->lun);
}
}
@@ -758,7 +779,7 @@
os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
{
int tag = 0;
- pqi_scsi_dev_t *dvp = NULL;
+ pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN\n");
@@ -771,7 +792,6 @@
pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
if (dvp)
pqisrc_decrement_device_active_io(softs, dvp);
-
}
}
@@ -785,21 +805,36 @@
pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
{
rcb_t *rcb;
- uint32_t tag, no_transfer = 0;
+ uint32_t tag;
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
int32_t error;
pqi_scsi_dev_t *dvp;
+ int target, lun, index;
DBG_FUNC("IN\n");
- if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
+ /* if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { */
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+ index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
+
+ if (index == INVALID_ELEM) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ DBG_INFO("Invalid index/device!!!, Device BTL %u:%d:%d\n", softs->bus_id, target, lun);
+ return ENXIO;
+ }
+
+ if( softs->dev_list[index] == NULL ) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
return ENXIO;
}
- dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
+ /* DBG_INFO("starting IO on BTL = %d:%d:%d index = %d\n",softs->bus_id,target,lun,index); */
+
+ /* dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
+ dvp = softs->dev_list[index];
/* Check controller state */
if (IN_PQI_RESET(softs)) {
ccb->ccb_h.status = CAM_SCSI_BUS_RESET
@@ -827,7 +862,7 @@
}
tag = pqisrc_get_tag(&softs->taglist);
- if (tag == INVALID_ELEM) {
+ if( tag == INVALID_ELEM ) {
DBG_ERR("Get Tag failed\n");
xpt_freeze_simq(softs->os_specific.sim, 1);
softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
@@ -835,7 +870,7 @@
return EIO;
}
- DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
+ DBG_IO("tag = %u &softs->taglist : %p\n", tag, &softs->taglist);
rcb = &softs->rcb[tag];
os_reset_rcb(rcb);
@@ -844,30 +879,13 @@
rcb->cmdlen = ccb->csio.cdb_len;
ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
- switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
- case CAM_DIR_IN:
- rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
- break;
- case CAM_DIR_OUT:
- rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
- break;
- case CAM_DIR_NONE:
- no_transfer = 1;
- break;
- default:
- DBG_ERR("Unknown Dir\n");
- break;
- }
rcb->cm_ccb = ccb;
- rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
+ /* rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
+ rcb->dvp = softs->dev_list[index];
+
+ rcb->cm_data = (void *)ccb->csio.data_ptr;
+ rcb->bcount = ccb->csio.dxfer_len;
- if (!no_transfer) {
- rcb->cm_data = (void *)ccb->csio.data_ptr;
- rcb->bcount = ccb->csio.dxfer_len;
- } else {
- rcb->cm_data = NULL;
- rcb->bcount = 0;
- }
/*
* Submit the request to the adapter.
*
@@ -900,7 +918,7 @@
pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
{
if (PQI_STATUS_SUCCESS == pqi_status &&
- REQUEST_SUCCESS == rcb->status)
+ PQI_STATUS_SUCCESS == rcb->status)
return BSD_SUCCESS;
else
return EIO;
@@ -912,8 +930,8 @@
static int
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
{
- struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
uint32_t tag;
int rval;
@@ -924,7 +942,7 @@
rcb = &softs->rcb[tag];
rcb->tag = tag;
- if (!rcb->dvp) {
+ if (rcb->dvp == NULL) {
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
rval = ENXIO;
goto error_tmf;
@@ -963,8 +981,9 @@
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
+ rcb->cm_ccb = ccb;
- if (!rcb->dvp) {
+ if (rcb->dvp == NULL) {
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
rval = ENXIO;
goto error_tmf;
@@ -992,24 +1011,38 @@
static int
pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
{
+
+ /* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
struct ccb_hdr *ccb_h = &ccb->ccb_h;
- pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
rcb_t *rcb = NULL;
uint32_t tag;
int rval;
+ int bus, target, lun;
+ int index;
+
DBG_FUNC("IN\n");
+ bus = softs->bus_id;
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+
+ index = pqisrc_find_btl_list_index(softs,bus,target,lun);
+ if (index == INVALID_ELEM) {
+ DBG_ERR("device not found at BTL %d:%d:%d\n",bus,target,lun);
+ return (-1);
+ }
+
+ pqi_scsi_dev_t *devp = softs->dev_list[index];
if (devp == NULL) {
DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
- return ENXIO;
+ return (-1);
}
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
-
- devp->reset_in_progress = true;
+ rcb->cm_ccb = ccb;
rcb->tm_req = true;
@@ -1017,6 +1050,7 @@
SOP_TASK_MANAGEMENT_LUN_RESET);
rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
+
devp->reset_in_progress = false;
os_reset_rcb(rcb);
@@ -1137,9 +1171,9 @@
{
struct ccb_relsim crs;
- DBG_INFO("IN\n");
+ DBG_FUNC("IN\n");
- memset(&crs, 0, sizeof(crs));
+ memset(&crs, 0, sizeof(struct ccb_relsim));
xpt_setup_ccb(&crs.ccb_h, path, 5);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.ccb_h.flags = CAM_DEV_QFREEZE;
@@ -1150,7 +1184,7 @@
printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
}
- DBG_INFO("OUT\n");
+ DBG_FUNC("OUT\n");
}
/*
@@ -1175,15 +1209,20 @@
}
uint32_t t_id = cgd->ccb_h.target_id;
- if (t_id <= (PQI_CTLR_INDEX - 1)) {
+ /* if (t_id <= (PQI_CTLR_INDEX - 1)) { */
+ if (t_id >= PQI_CTLR_INDEX) {
if (softs != NULL) {
- pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
- if (dvp == NULL) {
- DBG_ERR("Target is null, target id=%d\n", t_id);
- break;
+ /* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
+ int lun = cgd->ccb_h.target_lun;
+ int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
+ if (index != INVALID_ELEM) {
+ pqi_scsi_dev_t *dvp = softs->dev_list[index];
+ if (dvp == NULL) {
+ DBG_ERR("Target is null, target id=%u\n", t_id);
+ break;
+ }
+ smartpqi_adjust_queue_depth(path, dvp->queue_depth);
}
- smartpqi_adjust_queue_depth(path,
- dvp->queue_depth);
}
}
break;
@@ -1203,7 +1242,7 @@
{
int max_transactions;
union ccb *ccb = NULL;
- int error;
+ cam_status status = 0;
struct ccb_setasync csa;
struct cam_sim *sim;
@@ -1230,9 +1269,9 @@
softs->os_specific.sim = sim;
mtx_lock(&softs->os_specific.cam_lock);
- error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
- if (error != CAM_SUCCESS) {
- DBG_ERR("xpt_bus_register failed errno %d\n", error);
+ status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
+ if (status != CAM_SUCCESS) {
+ DBG_ERR("xpt_bus_register failed status=%d\n", status);
cam_sim_free(softs->os_specific.sim, FALSE);
cam_simq_free(softs->os_specific.devq);
mtx_unlock(&softs->os_specific.cam_lock);
@@ -1258,11 +1297,11 @@
return ENXIO;
}
/*
- * Callback to set the queue depth per target which is
+ * Callback to set the queue depth per target which is
* derived from the FW.
- */
+ */
softs->os_specific.path = ccb->ccb_h.path;
- memset(&csa, 0, sizeof(csa));
+ memset(&csa, 0, sizeof(struct ccb_setasync));
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE;
@@ -1270,12 +1309,12 @@
csa.callback_arg = softs;
xpt_action((union ccb *)&csa);
if (csa.ccb_h.status != CAM_REQ_CMP) {
- DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
+ DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
csa.ccb_h.status);
}
mtx_unlock(&softs->os_specific.cam_lock);
- DBG_INFO("OUT\n");
+ DBG_FUNC("OUT\n");
return BSD_SUCCESS;
}
@@ -1287,15 +1326,14 @@
deregister_sim(struct pqisrc_softstate *softs)
{
struct ccb_setasync csa;
-
+
DBG_FUNC("IN\n");
if (softs->os_specific.mtx_init) {
mtx_lock(&softs->os_specific.cam_lock);
}
-
- memset(&csa, 0, sizeof(csa));
+ memset(&csa, 0, sizeof(struct ccb_setasync));
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = 0;
@@ -1331,23 +1369,23 @@
void
os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
- struct cam_path *tmppath;
+ struct cam_path *tmppath = NULL;
- DBG_FUNC("IN\n");
+ DBG_FUNC("IN\n");
- if(softs->os_specific.sim_registered) {
- if (xpt_create_path(&tmppath, NULL,
- cam_sim_path(softs->os_specific.sim),
- device->target, device->lun) != CAM_REQ_CMP) {
- DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
- device->bus, device->target, device->lun);
- return;
- }
- xpt_async(AC_INQ_CHANGED, tmppath, NULL);
- xpt_free_path(tmppath);
- }
+ if(softs->os_specific.sim_registered) {
+ if (xpt_create_path(&tmppath, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ device->target, device->lun) != CAM_REQ_CMP) {
+ DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
+ device->bus, device->target, device->lun);
+ return;
+ }
+ xpt_async(AC_INQ_CHANGED, tmppath, NULL);
+ xpt_free_path(tmppath);
+ }
- device->scsi_rescan = false;
+ device->scsi_rescan = false;
- DBG_FUNC("OUT\n");
+ DBG_FUNC("OUT\n");
}
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c
--- a/sys/dev/smartpqi/smartpqi_cmd.c
+++ b/sys/dev/smartpqi/smartpqi_cmd.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,10 +36,14 @@
char *slot = NULL;
uint32_t offset;
iu_header_t *hdr = (iu_header_t *)req;
+ /*TODO : Can be fixed a size copying of IU ? */
uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
int i = 0;
DBG_FUNC("IN\n");
+ /* The code below assumes we only take 1 element (no spanning) */
+ ASSERT(iu_len <= ib_q->elem_size);
+
PQI_LOCK(&ib_q->lock);
/* Check queue full */
@@ -55,15 +59,15 @@
/* Copy the IU */
memcpy(slot, req, iu_len);
- DBG_INFO("IU : \n");
+ DBG_IO("IU : \n");
for(i = 0; i< iu_len; i++)
- DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
+ DBG_IO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem;
- DBG_INFO("ib_q->pi_local : %x IU size : %d\n",
+ DBG_IO("ib_q->pi_local : %x IU size : %d\n",
ib_q->pi_local, hdr->iu_length);
- DBG_INFO("*ib_q->ci_virt_addr: %x\n",
+ DBG_IO("*ib_q->ci_virt_addr: %x\n",
*(ib_q->ci_virt_addr));
/* Inform the fw about the new IU */
diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h
--- a/sys/dev/smartpqi/smartpqi_defines.h
+++ b/sys/dev/smartpqi/smartpqi_defines.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,34 +27,65 @@
#ifndef _PQI_DEFINES_H
#define _PQI_DEFINES_H
-#define PQI_STATUS_FAILURE -1
-#define PQI_STATUS_TIMEOUT -2
-#define PQI_STATUS_QFULL -3
-#define PQI_STATUS_SUCCESS 0
+#define SIS_POLL_WAIT
+#define DEVICE_HINT
+
+#ifndef CT_ASSERT
+/* If the OS hasn't specified a preferred compile time assert, create one */
+#if !defined(__C_ASSERT__)
+ #define CT_ASSERT(e) extern char __assert_test_case[1 - (2*(!(e)))]
+#else
+ #define CT_ASSERT(e) typedef char __C_ASSERT__[(e)?1:-1]
+#endif
+#endif
+#define PQI_STATUS_FAILURE -1
+#define PQI_STATUS_TIMEOUT -2
+#define PQI_STATUS_QFULL -3
+#define PQI_STATUS_SUCCESS 0
+
+#define BITS_PER_BYTE 8
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
+#define PQI_REQUEST_HEADER_LENGTH 4
/* Maximum timeout for internal command completion */
-#define TIMEOUT_INFINITE ((uint32_t) (-1))
-#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
+#define TIMEOUT_INFINITE ((uint32_t) (-1))
+#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
#define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT
/* Delay in milli seconds */
-#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
+#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
/* Delay in micro seconds */
-#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
+#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
/* If want to disable atomic operations on device active io, then set to zero */
-#define PQISRC_DEVICE_IO_COUNTER 1
+#define PQISRC_DEVICE_IO_COUNTER 1
+
+/* #define SHARE_EVENT_QUEUE_FOR_IO 1 */
-#define INVALID_ELEM 0xffff
+#define INVALID_ELEM 0xffff
#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+
+/* defines for stream detection */
+#define TICKS ticks
+
+#ifndef INT_MAX
+#define INT_MAX 0x7FFFFFFF
#endif
-#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
-#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define PQISRC_ROUND_UP(x, y) (((x) + (y) - 1) / (y) * (y))
+#define PQISRC_ROUND_DOWN(x, y) (((x) / (y)) * (y))
+#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+
+#if !defined(offsetofend)
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
+#endif
#define ALIGN_BOUNDARY(a, n) { \
if (a % n) \
@@ -98,7 +129,7 @@
LOCK_SLEEP
};
-#define LOCKNAME_SIZE 32
+#define LOCKNAME_SIZE 32
#define INTR_TYPE_NONE 0x0
#define INTR_TYPE_FIXED 0x1
@@ -108,17 +139,12 @@
#define SIS_ENABLE_INTX 0x80
#define PQISRC_LEGACY_INTX_MASK 0x1
-#define DMA_TO_VIRT(mem) ((mem)->virt_addr)
-#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
-#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
-
+#define DMA_TO_VIRT(mem) ((mem)->virt_addr)
+#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
+#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
-typedef enum REQUEST_STATUS {
- REQUEST_SUCCESS = 0,
- REQUEST_PENDING = -1,
- REQUEST_FAILED = -2,
-}REQUEST_STATUS_T;
typedef enum IO_PATH {
+ UNKNOWN_PATH,
AIO_PATH,
RAID_PATH
}IO_PATH_T;
@@ -179,44 +205,84 @@
#define PQI_CTRL_KERNEL_UP_AND_RUNNING 0x80
#define PQI_CTRL_KERNEL_PANIC 0x100
-#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
-#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
-#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
-#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
+#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
+#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
+#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
+#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
#define SIS_CMD_STATUS_SUCCESS 0x1
/* PQI specific */
/* defines */
-#define PQISRC_PQI_REG_OFFSET 0x4000
-#define PQISRC_MAX_OUTSTANDING_REQ 4096
-#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
-#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
-
-
+#define PQISRC_PQI_REG_OFFSET 0x4000
-#define PQI_MIN_OP_IB_QUEUE_ID 1
-#define PQI_OP_EVENT_QUEUE_ID 1
-#define PQI_MIN_OP_OB_QUEUE_ID 2
-#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
+/* Number of Queues this driver compile can potentially support */
+#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
#define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2)
#define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q)
-#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
-#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
-#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2
-#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
-#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
-#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
-#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */
-#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
-#define PQISRC_INTR_COALSC_GRAN 0
-#define PQISRC_PROTO_BIT_MASK 0
-#define PQISRC_SGL_SUPPORTED_BIT_MASK 0
-
-#define PQISRC_NUM_EVENT_Q_ELEM 32
-#define PQISRC_EVENT_Q_ELEM_SIZE 32
+#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
+
+
+/* PQI Capability maxes (from controller) */
+#define PQISRC_MAX_ELEMENTS 8192
+#define PQISRC_OP_MIN_ELEM_SIZE 1 /* 16 bytes */
+#define PQISRC_OP_MAX_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
+#define PQISRC_MAX_SPANNING_IU_LENGTH 1152
+#define PQISRC_MAX_OUTSTANDING_REQ 4096
+/* #define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q) */
+/* #define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ */
+/* #define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2 */
+
+#ifdef DEVICE_HINT
+#define PQISRC_MIN_OUTSTANDING_REQ (PQI_RESERVED_IO_SLOTS_CNT + OS_MIN_OUTSTANDING_REQ)
+#endif
+
+
+/* Queue IDs Enumeration */
+#define PQI_ADMIN_IB_QUEUE_ID 0
+#define PQI_ADMIN_OB_QUEUE_ID 0
+#define PQI_MIN_OP_IB_QUEUE_ID 1
+#define PQI_OP_EVENT_QUEUE_ID 1
+#define PQI_MIN_OP_OB_QUEUE_ID 2
+
+
+/* PQI IU Element Sizes */
+#define PQISRC_ADMIN_IBQ_ELEM_SIZE_BYTES 64
+#define PQISRC_ADMIN_OBQ_ELEM_SIZE_BYTES 64
+#define PQISRC_OP_IBQ_ELEM_SIZE_BYTES 128
+#define PQISRC_OP_OBQ_ELEM_SIZE_BYTES 16
+#define PQISRC_EVENT_Q_ELEM_SIZE_BYTES 32
+
+
+/* Number of elements this driver compile will potentially use */
+#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
+#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
+#define PQISRC_MAX_EVENT_QUEUE_ELEM_NUM 32
+#define PQISRC_MAX_SPANNING_ELEMS 9
+
+/* setting maximums for adv aio */
+#define PQISRC_MAX_AIO_RAID5_OR_6_WRITE (8*1024) /* 8 KiB */
+#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV 0x0000 /* No Limit */
+#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV 0x0000 /* No Limit */
+#define PQISRC_MAX_AIO_NVME_CRYPTO (32*1024) /* 32 KiB */
+#define PQISRC_MAX_AIO_NO_LIMIT 0x0000 /* No Limit */
+#define PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO 0x0000 /* No Limit */
+#define PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO (32*1024)
+
+#define SENSE_FEATURES_CRYPTO_OFFSET offsetof(bmic_sense_feature_page_io_aio_subpage_t, max_aio_rw_xfer_crypto_sas_sata)
+#define MINIMUM_AIO_SUBPAGE_LENGTH \
+ (offsetofend(bmic_sense_feature_page_io_aio_subpage_t, \
+ max_aio_write_raid1_10_3drv) - \
+ (sizeof(((bmic_sense_feature_page_io_aio_subpage_t *)0)->header)))
+
+/* Not used or useful yet */
+/* #define PQISRC_INTR_COALSC_GRAN 0 */
+/* #define PQISRC_PROTO_BIT_MASK 0 */
+/* #define PQISRC_SGL_SUPPORTED_BIT_MASK 0 */
+
+#define PQISRC_MAX_SUPPORTED_MIRRORS 3
/* PQI Registers state status */
#define PQI_RESET_ACTION_RESET 0x1
@@ -249,12 +315,20 @@
PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
#define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG"
-#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64
-#define PQI_ADMINQ_CI_PI_ALIGN 64
-#define PQI_OPQ_ELEM_ARRAY_ALIGN 64
-#define PQI_OPQ_CI_PI_ALIGN 4
-#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */
-#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */
+
+#define PQI_ADDR_ALIGN_MASK_4K 0xFFF/* lsb 12 bits */
+#define PQI_ADDR_ALIGN_MASK_1K 0x3FF/* lsb 10 bits */
+#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */
+#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */
+#define PQI_ADDR_ALIGN 4096
+#define PQI_ADDR_ALIGN_MASK PQI_ADDR_ALIGN_MASK_4K
+
+
+#define PQI_FORCE_IQ_ELEMENTS 32 /* 4096/128 = 32 (see PQISRC_OP_IBQ_ELEM_SIZE_BYTES) */
+#define PQI_FORCE_OQ_ELEMENTS 256 /* 4096/16 = 256 (see PQISRC_OP_OBQ_ELEM_SIZE_BYTES) */
+
+#define PQI_CI_PI_ALIGN 64
+#define PQI_CI_PI_ALIGN_MASK PQI_ADDR_ALIGN_MASK_64
#define PQISRC_PQIMODE_READY_TIMEOUT (30 * 1000 ) /* 30 secs */
#define PQISRC_MODE_READY_POLL_INTERVAL 1000 /* 1 msec */
@@ -271,37 +345,38 @@
/* PQI configuration table section IDs */
#define PQI_CONF_TABLE_ALL_SECTIONS (-1)
-#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
+#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1
#define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2
-#define PQI_CONF_TABLE_SECTION_DEBUG 3
-#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
-
-
-#define PQI_FIRMWARE_FEATURE_OFA 0
-#define PQI_FIRMWARE_FEATURE_SMP 1
-#define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2
-#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3
-#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4
-#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5
-#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6
-#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7
-#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8
-#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9
-#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10
-#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
-#define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12
-#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13
-#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
-
-#define CTRLR_HEARTBEAT_CNT(softs) \
- LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
+#define PQI_CONF_TABLE_SECTION_DEBUG 3
+#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
+#define PQI_CONF_TABLE_SOFT_RESET 5
+
+/* PQI feature bits as defined in PQI_SPEC.doc */
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2
+#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3
+#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4
+#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5
+#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6
+#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7
+#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8
+#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9
+#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
+#define PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID 16
+
+#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
#define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */
#define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600)
/* pqi-2r00a table 36 */
-#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
+#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
#define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31)
#define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01
@@ -326,6 +401,10 @@
#define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14
#define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15
#define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16
+#define PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST 0x18
+#define PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST 0x19
+#define PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST 0x1A
+#define PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT 0x20
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
@@ -349,13 +428,13 @@
#define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14
#define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1
-#define PQI_DEFAULT_IB_QUEUE 0
+#define PQI_DEFAULT_IB_QUEUE 0
#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
-#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
-#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
+#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
+#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
#define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2
-#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
+#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
/* Interface macros */
@@ -371,16 +450,30 @@
#define PQI_GET_CTRL_MODE(softs) \
(PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0))
-#define PQI_SAVE_CTRL_MODE(softs, mode) \
- PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode)
+#define PQI_SAVE_CTRL_MODE(softs, mode) { \
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode); \
+ OS_SLEEP(1000); \
+ }
-#define PQISRC_MAX_TARGETID 1024
-#define PQISRC_MAX_TARGETLUN 64
+#define LEGACY_SIS_SCR_REG_LENGTH 4
+#define LEGACY_SIS_SCR1 LEGACY_SIS_SCR0 + LEGACY_SIS_SCR_REG_LENGTH
+#define PQI_GET_CTRL_TYPE(softs) \
+ ((PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad1, LEGACY_SIS_SCR1)) \
+ & 0x0000FFFF)
+
+/* smart raid-hba pqi functional spec, scratchpad register 1 spec */
+#define PQI_CTRL_PRODUCT_ID_GEN1 0x0000
+#define PQI_CTRL_PRODUCT_ID_GEN2_REV_A 0x0007
+#define PQI_CTRL_PRODUCT_ID_GEN2_REV_B 0x0107
+
+#define PQISRC_MAX_TARGETID 1024
+#define PQISRC_MAX_TARGETLUN 64
/* Vendor specific IU Type for Event config Cmds */
-#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
-#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
-#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+
#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
@@ -409,40 +502,42 @@
/* Device flags */
-#define PQISRC_DFLAG_VALID (1 << 0)
-#define PQISRC_DFLAG_CONFIGURING (1 << 1)
+#define PQISRC_DFLAG_VALID (1 << 0)
+#define PQISRC_DFLAG_CONFIGURING (1 << 1)
-#define MAX_EMBEDDED_SG_IN_FIRST_IU 4
-#define MAX_EMBEDDED_SG_IN_IU 8
+#define MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT 4
+#define MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO 3
+#define MAX_EMBEDDED_SG_IN_IU 8
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
-#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
-#define DEV_GONE(dev) (!dev || (dev->invalid == true))
-#define IS_AIO_PATH(dev) (dev->aio_enabled)
-#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
+#define DEV_GONE(dev) (!dev || (dev->invalid == true))
+#define IS_AIO_PATH(dev) (dev->aio_enabled)
+#define IS_RAID_PATH(dev) (!dev->aio_enabled)
-#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
+#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
/* SOP data direction flags */
-#define SOP_DATA_DIR_NONE 0x00
-#define SOP_DATA_DIR_FROM_DEVICE 0x01
-#define SOP_DATA_DIR_TO_DEVICE 0x02
-#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
-#define SOP_PARTIAL_DATA_BUFFER 0x04
+#define SOP_DATA_DIR_UNKNOWN 0xFF
+#define SOP_DATA_DIR_NONE 0x00
+#define SOP_DATA_DIR_FROM_DEVICE 0x01
+#define SOP_DATA_DIR_TO_DEVICE 0x02
+#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
+#define SOP_PARTIAL_DATA_BUFFER 0x04
-#define PQISRC_DMA_VALID (1 << 0)
-#define PQISRC_CMD_NO_INTR (1 << 1)
+#define PQISRC_DMA_VALID (1 << 0)
+#define PQISRC_CMD_NO_INTR (1 << 1)
-#define SOP_TASK_ATTRIBUTE_SIMPLE 0
+#define SOP_TASK_ATTRIBUTE_SIMPLE 0
#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
-#define SOP_TASK_ATTRIBUTE_ORDERED 2
-#define SOP_TASK_ATTRIBUTE_ACA 4
+#define SOP_TASK_ATTRIBUTE_ORDERED 2
+#define SOP_TASK_ATTRIBUTE_ACA 4
-#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
-#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
+#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
+#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
#define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5
-#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
+#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02
#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
@@ -499,13 +594,15 @@
#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
+#define NUM_STREAMS_PER_LUN 8
+
/* VPD inquiry pages */
#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
-#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
+#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */
#define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */
-#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
+#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
#define VPD_PAGE (1 << 8)
@@ -555,10 +652,17 @@
#define MAX_RAW_M16_QDEPTH 2032
#define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000
-#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+#define RAID_CTLR_LUNID ((uint8_t *) "\0\0\0\0\0\0\0\0")
+
+/* SCSI Cmds @todo: move SCMD_READ_6, etc. into library */
+#define SCSI_INQUIRY 0x12
+#define SCSI_MODE_SENSE 0x1a
+#define SCSI_REPORT_LUNS 0xa0
+#define SCSI_LOG_SENSE 0x4d
+#define SCSI_ATA_PASSTHRU16 0x85
-#define SA_CACHE_FLUSH 0x1
#define PQISRC_INQUIRY_TIMEOUT 30
+
#define SA_INQUIRY 0x12
#define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
@@ -570,10 +674,8 @@
#define SCSI_SENSE_RESPONSE_72 0x72
#define SCSI_SENSE_RESPONSE_73 0x73
-#define SA_REPORT_LOG_EXTENDED 0x1
-#define SA_REPORT_PHYS_EXTENDED 0x2
-
-#define SA_CACHE_FLUSH_BUF_LEN 4
+#define SA_REPORT_LOG_EXTENDED 0x1
+#define SA_REPORT_PHYS_EXTENDED 0x2
#define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber)
@@ -636,11 +738,14 @@
#define PQI_MAX_DEVICES (PQI_MAX_LOGICALS + PQI_MAX_PHYSICALS + 1) /* 1 for controller device entry */
#define PQI_MAX_EXT_TARGETS 32
-#define PQI_CTLR_INDEX (PQI_MAX_DEVICES - 1)
+#define PQI_CTLR_INDEX 0
#define PQI_PD_INDEX(t) (t + PQI_MAX_LOGICALS)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define MAX_TARGET_DEVICES 1024
+#define MAX_TARGET_BIT 1025
+#define SLOT_AVAILABLE false
+#define SLOT_TAKEN true
#define PQI_NO_MEM 2
@@ -648,6 +753,7 @@
DEVICE_NOT_FOUND,
DEVICE_CHANGED,
DEVICE_UNCHANGED,
+ DEVICE_IN_REMOVE,
} device_status_t;
#define SA_RAID_0 0
@@ -666,19 +772,27 @@
#define BIT3 (1 << 3)
#define BITS_PER_BYTE 8
+
+
+/* Vendor Specific (BMIC) Op Code */
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define IS_BMIC_OPCODE(opcode) (opcode == BMIC_READ || opcode == BMIC_WRITE)
/* BMIC commands */
-#define BMIC_IDENTIFY_CONTROLLER 0x11
+#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
-#define BMIC_READ 0x26
-#define BMIC_WRITE 0x27
+#define BMIC_SENSE_FEATURE 0x61
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
-#define BMIC_CACHE_FLUSH 0xc2
-#define BMIC_FLASH_FIRMWARE 0xf7
-#define BMIC_WRITE_HOST_WELLNESS 0xa5
-#define BMIC_SET_DIAGS_OPTIONS 0xf4
-#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
+#define BMIC_WRITE_HOST_WELLNESS 0xa5
+#define BMIC_CACHE_FLUSH 0xc2
+#define BMIC_SET_DIAGS_OPTIONS 0xf4
+#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
+#define BMIC_FLASH_FIRMWARE 0xf7
+/* Sense Feature Pages/Subpages */
+#define IO_SENSE_FEATURES_PAGE 0x08
+#define SENSE_FEATURES_AIO_SUBPAGE 0x02
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0)
#define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F)
@@ -700,9 +814,20 @@
PQI_RESERVED_IO_SLOTS_TMF + \
PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS)
+/* Defines for counter flags */
+#define COUNTER_FLAG_CLEAR_COUNTS 0x0001
+#define COUNTER_FLAG_ONLY_NON_ZERO 0x0002
+
/* Defines for print flags */
-#define PRINT_FLAG_HDR_COLUMN 0x0001
+#define PRINT_FLAG_HDR_COLUMN 0x0001
+
+/* Function-specific debug flags */
+#if 0
+#define DEBUG_AIO /* show AIO eligibility, IU, etc. (very spammy!) */
+#define DEBUG_AIO_LOCATOR /* show AIO row/column etc. calc. */
+#define DEBUG_RAID_MAP /* show AIO raid map content from FW */
+#endif
static inline uint16_t GET_LE16(const uint8_t *p)
{
@@ -754,8 +879,27 @@
PUT_BE32(val, p + 4);
}
-#define OS_FREEBSD
-#define SIS_POLL_WAIT
+
+/* Calculates percentage of val vs total, i.e. 20 out of 100 --> 20% */
+static inline uint64_t CALC_PERCENT_TOTAL(uint64_t val, uint64_t total)
+{
+ uint64_t percent = 0;
+ if (total)
+ percent = (val * 100) / total;
+ return percent;
+}
+
+/* Calculates percentage of a vs b, i.e. 50 vs 100 -> 50/150 -> 33% */
+#define CALC_PERCENT_VS(a, b) (CALC_PERCENT_TOTAL(a, (a+b)))
+
+#define STREAM_DETECTION "stream_disable"
+#define SATA_UNIQUE_WWN "sata_unique_wwn_disable"
+#define AIO_RAID1_WRITE_BYPASS "aio_raid1_write_disable"
+#define AIO_RAID5_WRITE_BYPASS "aio_raid5_write_disable"
+#define AIO_RAID6_WRITE_BYPASS "aio_raid6_write_disable"
+#define ADAPTER_QUEUE_DEPTH "queue_depth"
+#define SCATTER_GATHER_COUNT "sg_count"
+#define QUEUE_COUNT "queue_count"
#define OS_ATTRIBUTE_PACKED __attribute__((__packed__))
#define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n)))
@@ -793,19 +937,18 @@
typedef uint8_t *passthru_buf_type_t;
-
-#define PQISRC_OS_VERSION 1
-#define PQISRC_FEATURE_VERSION 4014
-#define PQISRC_PATCH_VERSION 0
-#define PQISRC_BUILD_VERSION 105
+#define PQISRC_DRIVER_MAJOR __FreeBSD__
+#define PQISRC_DRIVER_MINOR 4410
+#define PQISRC_DRIVER_RELEASE 0
+#define PQISRC_DRIVER_REVISION 2005
#define STR(s) # s
-#define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d)
-#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \
- PQISRC_FEATURE_VERSION, \
- PQISRC_PATCH_VERSION, \
- PQISRC_BUILD_VERSION)
-
+#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
+#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \
+ PQISRC_DRIVER_MINOR, \
+ PQISRC_DRIVER_RELEASE, \
+ PQISRC_DRIVER_REVISION)
+
/* End Management interface */
#ifdef ASSERT
@@ -880,7 +1023,6 @@
#define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */
#define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */
-
#define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */
#define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */
@@ -892,7 +1034,7 @@
#define PQI_SIGNATURE 0x4000
#define PQI_ADMINQ_CONFIG 0x4008
#define PQI_ADMINQ_CAP 0x4010
-#define PQI_LEGACY_INTR_STATUS 0x4018
+#define PQI_LEGACY_INTR_STATUS 0x4018
#define PQI_LEGACY_INTR_MASK_SET 0x401C
#define PQI_LEGACY_INTR_MASK_CLR 0x4020
#define PQI_DEV_STATUS 0x4040
@@ -912,7 +1054,7 @@
#define OS_BUSYWAIT(x) DELAY(x)
#define OS_SLEEP(timeout) \
DELAY(timeout);
-
+
/* TMF request timeout is 600 Sec */
#define OS_TMF_TIMEOUT_SEC (10 * 60)
@@ -950,14 +1092,14 @@
typedef uint8_t os_dev_info_t;
typedef struct OS_SPECIFIC {
- device_t pqi_dev;
+ device_t pqi_dev;
struct resource *pqi_regs_res0; /* reg. if. window */
int pqi_regs_rid0; /* resource ID */
bus_dma_tag_t pqi_parent_dmat; /* parent DMA tag */
bus_dma_tag_t pqi_buffer_dmat;
/* controller hardware interface */
- int pqi_hwif;
+ int pqi_hwif;
struct resource *pqi_irq[PQI_MAX_MSIX]; /* interrupt */
int pqi_irq_rid[PQI_MAX_MSIX];
void *intrcookie[PQI_MAX_MSIX];
@@ -980,49 +1122,61 @@
struct callout heartbeat_timeout_id; /* heart beat event handling */
} OS_SPECIFIC_T;
-typedef bus_addr_t dma_addr_t;
+
+typedef struct device_hints {
+ uint8_t stream_status: 1;
+ uint8_t sata_unique_wwn_status: 1;
+ uint8_t aio_raid1_write_status: 1;
+ uint8_t aio_raid5_write_status: 1;
+ uint8_t aio_raid6_write_status: 1;
+ uint32_t queue_depth;
+ uint32_t sg_segments;
+ uint32_t cpu_count;
+} device_hint;
+
+typedef bus_addr_t dma_addr_t;
/* Register access macros */
#define PCI_MEM_GET32( _softs, _absaddr, _offset ) \
- bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
+ bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
#if defined(__i386__)
#define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \
(uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset) + \
- ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
+ _softs->pci_mem_handle.pqi_bhandle, _offset) + \
+ ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
})
#else
#define PCI_MEM_GET64(_softs, _absaddr, _offset ) \
- bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
+ bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
#endif
#define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \
- bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
#if defined(__i386__)
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32);
#else
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
- bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
#endif
#define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \
bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\
_softs->pci_mem_handle.pqi_bhandle, _offset, buf, size)
-
+
/* Lock */
typedef struct mtx OS_LOCK_T;
typedef struct sema OS_SEMA_LOCK_T;
@@ -1038,8 +1192,7 @@
#define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock)
#define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock)
-#define OS_GET_CDBP(rcb) \
- ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
+#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
#define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len)
#define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb)
@@ -1070,8 +1223,14 @@
#define SCMD_WRITE_16 WRITE_16
/* FreeBSD status macros */
-#define BSD_SUCCESS 0
+#define BSD_SUCCESS 0
+#define DEVICE_HINT_SUCCESS 0
+/* Min outstanding commands that driver can register with CAM layer.*/
+#define OS_MIN_OUTSTANDING_REQ 6
+#define BSD_MIN_SG_SEGMENTS 16
+
+#define DISABLE_ERR_RESP_VERBOSE 1
/* Debug facility */
@@ -1110,17 +1269,17 @@
} \
}while(0);
-#define DBG_TRACEIO(fmt,args...) \
+#define DBG_DISC(fmt,args...) \
do { \
- if (logging_level & PQISRC_FLAGS_TRACEIO) { \
- printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ if (logging_level & PQISRC_FLAGS_DISC) { \
+ printf("[DISC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);
-#define DBG_DISC(fmt,args...) \
+#define DBG_TRACEIO(fmt,args...) \
do { \
- if (logging_level & PQISRC_FLAGS_DISC) { \
- printf("[DISC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ if (logging_level & PQISRC_FLAGS_TRACEIO) { \
+ printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);
@@ -1137,6 +1296,12 @@
printf("[ERROR]::[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args); \
} \
}while(0);
+#define DBG_ERR_NO_SOFTS(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_ERROR) { \
+ printf("[ERROR]::[CPU %d][%s][%d]:"fmt,curcpu,__func__,__LINE__,##args); \
+ } \
+ }while(0);
#define DBG_IO(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_TRACEIO) { \
@@ -1147,21 +1312,21 @@
#define DBG_ERR_BTL(device,fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_ERROR) { \
- printf("[ERROR]::[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \
+ printf("[ERROR]::[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_WARN_BTL(device,fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_WARN) { \
- printf("[WARN]:[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\
+ printf("[WARN]:[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\
} \
}while(0);
#define DBG_NOTE(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_NOTE) { \
- printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ printf("[NOTE]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);
diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c
--- a/sys/dev/smartpqi/smartpqi_discovery.c
+++ b/sys/dev/smartpqi/smartpqi_discovery.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,153 +26,398 @@
#include "smartpqi_includes.h"
-#define MAX_RETRIES 3
-#define PQISRC_INQUIRY_TIMEOUT 30
-
-/* Validate the scsi sense response code */
-static inline
-boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
+/*
+ * Populate the controller's advanced aio features via BMIC cmd.
+ */
+int
+pqisrc_QuerySenseFeatures(pqisrc_softstate_t *softs)
{
- DBG_FUNC("IN\n");
+ bmic_sense_feature_aio_buffer_t *features;
+ int ret;
+ pqisrc_raid_req_t request;
- if (!sshdr)
- return false;
+ /* Initializing defaults for AIO support subpage */
+ softs->max_aio_write_raid5_6 =
+ PQISRC_MAX_AIO_RAID5_OR_6_WRITE;
+ softs->max_aio_write_raid1_10_2drv =
+ PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV;
+ softs->max_aio_write_raid1_10_3drv =
+ PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV;
+ softs->max_aio_rw_xfer_crypto_nvme =
+ PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO;
+ softs->max_aio_rw_xfer_crypto_sas_sata =
+ PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO;
+
+#ifdef DEVICE_HINT
+ softs->enable_stream_detection = softs->hint.stream_status;
+#endif
- DBG_FUNC("OUT\n");
+ /* Implement SENSE_FEATURE BMIC to populate AIO limits */
+ features = os_mem_alloc(softs, sizeof(*features));
+ if (!features) {
+ DBG_ERR("Failed to allocate memory for sense aio features.\n");
+ goto err;
+ }
+ memset(features, 0, sizeof(*features));
+
+ memset(&request, 0, sizeof(request));
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_READ;
+ request.cmd.cdb[2] = IO_SENSE_FEATURES_PAGE;
+ request.cmd.cdb[3] = SENSE_FEATURES_AIO_SUBPAGE;
+ request.cmd.bmic_cdb.cmd = BMIC_SENSE_FEATURE;
+ request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*features));
+ ret = pqisrc_prepare_send_ctrlr_request(softs, &request,
+ features, sizeof(*features));
- return (sshdr->response_code & 0x70) == 0x70;
+ if (ret)
+ goto free_err;
+
+ /* If AIO subpage was valid, use values from that page */
+ if (features->aio_subpage.header.total_length >=
+ MINIMUM_AIO_SUBPAGE_LENGTH) {
+ DBG_INIT("AIO support subpage valid. total_length = 0x%0x.\n",
+ features->aio_subpage.header.total_length);
+ softs->adv_aio_capable = true;
+
+ /* AIO transfer limits are reported in kbytes, so x 1024.
+ * Values of 0 mean 'no limit'.
+ */
+
+ softs->max_aio_write_raid5_6 =
+ (features->aio_subpage.max_aio_write_raid5_6 == 0) ?
+ PQISRC_MAX_AIO_NO_LIMIT :
+ features->aio_subpage.max_aio_write_raid5_6 * 1024;
+ softs->max_aio_write_raid1_10_2drv =
+ (features->aio_subpage.max_aio_write_raid1_10_2drv
+ == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
+ features->aio_subpage.max_aio_write_raid1_10_2drv
+ * 1024;
+ softs->max_aio_write_raid1_10_3drv =
+ (features->aio_subpage.max_aio_write_raid1_10_3drv
+ == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
+ features->aio_subpage.max_aio_write_raid1_10_3drv
+ * 1024;
+ softs->max_aio_rw_xfer_crypto_nvme =
+ (features->aio_subpage.max_aio_rw_xfer_crypto_nvme
+ == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
+ features->aio_subpage.max_aio_rw_xfer_crypto_nvme
+ * 1024;
+ softs->max_aio_rw_xfer_crypto_sas_sata =
+ (features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata
+ == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
+ features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata
+ * 1024;
+
+ DBG_INIT("softs->max_aio_write_raid5_6: 0x%x\n",
+ softs->max_aio_write_raid5_6);
+ DBG_INIT("softs->max_aio_write_raid1_10_2drv: 0x%x\n",
+ softs->max_aio_write_raid1_10_2drv);
+ DBG_INIT("softs->max_aio_write_raid1_10_3drv: 0x%x\n",
+ softs->max_aio_write_raid1_10_3drv);
+ DBG_INIT("softs->max_aio_rw_xfer_crypto_nvme: 0x%x\n",
+ softs->max_aio_rw_xfer_crypto_nvme);
+ DBG_INIT("softs->max_aio_rw_xfer_crypto_sas_sata: 0x%x\n",
+ softs->max_aio_rw_xfer_crypto_sas_sata);
+
+ } else {
+ DBG_WARN("Problem getting AIO support subpage settings. "
+ "Disabling advanced AIO writes.\n");
+ softs->adv_aio_capable = false;
+ }
+
+
+ os_mem_free(softs, features, sizeof(*features));
+ return ret;
+free_err:
+ os_mem_free(softs, features, sizeof(*features));
+err:
+ return PQI_STATUS_FAILURE;
}
/*
- * Initialize target ID pool for HBA/PDs .
+ * Initialize target ID pool for exposed physical devices .
*/
void
-pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
+pqisrc_init_bitmap(pqisrc_softstate_t *softs)
{
- int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
+ memset(&softs->bit_map, SLOT_AVAILABLE, sizeof(softs->bit_map));
+}
- for(i = 0; i < PQI_MAX_PHYSICALS; i++) {
- softs->tid_pool.tid[i] = tid--;
+void
+pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target)
+{
+ if((target == PQI_CTLR_INDEX) || (target == INVALID_ELEM)) {
+ DBG_ERR("Invalid target ID\n");
+ return;
}
- softs->tid_pool.index = i - 1;
+ DBG_DISC("Giving back target %d\n", target);
+ softs->bit_map.bit_vector[target] = SLOT_AVAILABLE;
}
+/* Use bit map to find availible targets */
int
-pqisrc_alloc_tid(pqisrc_softstate_t *softs)
+pqisrc_find_avail_target(pqisrc_softstate_t *softs)
{
- if(softs->tid_pool.index <= -1) {
- DBG_ERR("Target ID exhausted\n");
- return INVALID_ELEM;
+ int avail_target;
+ for(avail_target = 1; avail_target < MAX_TARGET_BIT; avail_target++) {
+ if(softs->bit_map.bit_vector[avail_target] == SLOT_AVAILABLE){
+ softs->bit_map.bit_vector[avail_target] = SLOT_TAKEN;
+ DBG_DISC("Avail_target is %d\n", avail_target);
+ return avail_target;
+ }
}
-
- return softs->tid_pool.tid[softs->tid_pool.index--];
+ DBG_ERR("No available targets\n");
+ return INVALID_ELEM;
}
-void
-pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
+/* Subroutine used to set Bus-Target-Lun for the requested device */
+static inline void
+pqisrc_set_btl(pqi_scsi_dev_t *device, int bus, int target, int lun)
{
- if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) {
- DBG_ERR("Target ID queue is full\n");
- return;
- }
+ DBG_FUNC("IN\n");
+
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
- softs->tid_pool.index++;
- softs->tid_pool.tid[softs->tid_pool.index] = tid;
+ DBG_FUNC("OUT\n");
}
-/* Update scsi sense info to a local buffer*/
+/* Add all exposed physical devices, logical devices, controller devices, PT RAID
+* devices and multi-lun devices */
boolean_t
-pqisrc_update_scsi_sense(const uint8_t *buff, int len,
- struct sense_header_scsi *header)
+pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
+ uint8_t *scsi3addr)
{
+ /* Add physical devices with targets that need
+ * targets */
+ int j;
+ int tid = 0;
+ unsigned char addr1[8], addr2[8];
+ pqi_scsi_dev_t *temp_device;
+
+ /* If controller device, add it to list because its lun/bus/target
+ * values are already set */
+ if(pqisrc_is_hba_lunid(scsi3addr))
+ goto add_device_to_dev_list;
+
+ /* If exposed physical device give it a target then add it
+ * to the dev list */
+ if(!pqisrc_is_logical_device(device)) {
+ tid = pqisrc_find_avail_target(softs);
+ if(INVALID_ELEM != tid){
+ pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
+ goto add_device_to_dev_list;
+ }
+ }
- DBG_FUNC("IN\n");
-
- if (!buff || !len)
- return false;
+ /* If external raid device , assign target from the target pool.
+ * If a non-zero lun device, search through the list & find the
+ * device which has same target (byte 2 of LUN address).
+ * Assign the same target for this new lun. */
+ if (pqisrc_is_external_raid_device(device)) {
+ memcpy(addr1, device->scsi3addr, 8);
+ for(j = 0; j < PQI_MAX_DEVICES; j++) {
+ if(softs->dev_list[j] == NULL)
+ continue;
+ temp_device = softs->dev_list[j];
+ memcpy(addr2, temp_device->scsi3addr, 8);
+ if (addr1[2] == addr2[2]) {
+ pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS,
+ temp_device->target,device->scsi3addr[0]);
+ goto add_device_to_dev_list;
+ }
+ }
+ tid = pqisrc_find_avail_target(softs);
+ if(INVALID_ELEM != tid){
+ pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS, tid, device->scsi3addr[0]);
+ goto add_device_to_dev_list;
+ }
+ }
- memset(header, 0, sizeof(struct sense_header_scsi));
+ /* If logical device, add it to list because its lun/bus/target
+ * values are already set */
+ if(pqisrc_is_logical_device(device) && !pqisrc_is_external_raid_device(device))
+ goto add_device_to_dev_list;
+
+ /* This is a non-zero lun of a multi-lun device.
+ * Search through our list and find the device which
+ * has the same 8 byte LUN address, except with bytes 4 and 5.
+ * Assign the same bus and target for this new LUN.
+ * Use the logical unit number from the firmware. */
+ memcpy(addr1, device->scsi3addr, 8);
+ addr1[4] = 0;
+ addr1[5] = 0;
+ for(j = 0; j < PQI_MAX_DEVICES; j++) {
+ if(softs->dev_list[j] == NULL)
+ continue;
+ temp_device = softs->dev_list[j];
+ memcpy(addr2, temp_device->scsi3addr, 8);
+ addr2[4] = 0;
+ addr2[5] = 0;
+ /* If addresses are the same, except for bytes 4 and 5
+ * then the passed-in device is an additional lun of a
+ * previously added multi-lun device. Use the same target
+ * id as that previous device. Otherwise, use the new
+ * target id */
+ if(memcmp(addr1, addr2, 8) == 0) {
+ pqisrc_set_btl(device, temp_device->bus,
+ temp_device->target, temp_device->scsi3addr[4]);
+ goto add_device_to_dev_list;
+ }
+ }
+ DBG_ERR("The device is not a physical, lun or ptraid device"
+ "B %d: T %d: L %d\n", device->bus, device->target,
+ device->lun );
+ return false;
+
+add_device_to_dev_list:
+ /* Actually add the device to the driver list
+ * softs->dev_list */
+ softs->num_devs++;
+ for(j = 0; j < PQI_MAX_DEVICES; j++) {
+ if(softs->dev_list[j])
+ continue;
+ softs->dev_list[j] = device;
+ break;
+ }
+ DBG_NOTE("Added device [%d of %d]: B %d: T %d: L %d\n",
+ j, softs->num_devs, device->bus, device->target,
+ device->lun);
+ return true;
+}
- header->response_code = (buff[0] & 0x7f);
+/* Return a given index for a specific bus, target, lun within the
+* softs dev_list (This function is specifically for freebsd)*/
+int
+pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
+ int bus, int target, int lun)
+{
- if (!pqisrc_scsi_sense_valid(header))
- return false;
+ int index;
+ pqi_scsi_dev_t *temp_device;
+ for(index = 0; index < PQI_MAX_DEVICES; index++) {
+ if(softs->dev_list[index] == NULL)
+ continue;
+ temp_device = softs->dev_list[index];
+ /* Match the devices then return the location
+ * of that device for further use*/
+ if(bus == softs->bus_id &&
+ target == temp_device->target &&
+ lun == temp_device->lun){
+ DBG_DISC("Returning device list index %d\n", index);
+ return index;
- if (header->response_code >= 0x72) {
- /* descriptor format */
- if (len > 1)
- header->sense_key = (buff[1] & 0xf);
- if (len > 2)
- header->asc = buff[2];
- if (len > 3)
- header->ascq = buff[3];
- if (len > 7)
- header->additional_length = buff[7];
- } else {
- /* fixed format */
- if (len > 2)
- header->sense_key = (buff[2] & 0xf);
- if (len > 7) {
- len = (len < (buff[7] + 8)) ?
- len : (buff[7] + 8);
- if (len > 12)
- header->asc = buff[12];
- if (len > 13)
- header->ascq = buff[13];
+ }
+ if ((temp_device->is_physical_device) && (target == temp_device->target)
+ && (temp_device->is_multi_lun)) {
+ return index;
}
}
-
- DBG_FUNC("OUT\n");
-
- return true;
+ return INVALID_ELEM;
}
-/*
- * Function used to build the internal raid request and analyze the response
- */
+/* Return a given index for a specific device within the
+* softs dev_list */
int
-pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
- void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
- raid_path_error_info_elem_t *error_info)
+pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
- uint8_t *cdb;
- int ret = PQI_STATUS_SUCCESS;
- uint32_t tag = 0;
- struct dma_mem device_mem;
- sgt_t *sgd;
+ int index;
+ pqi_scsi_dev_t *temp_device;
+ for(index = 0; index < PQI_MAX_DEVICES; index++) {
+ if(softs->dev_list[index] == NULL)
+ continue;
+ temp_device = softs->dev_list[index];
+ /* Match the devices then return the location
+ * of that device for further use*/
+ if(device->bus == temp_device->bus &&
+ device->target == temp_device->target
+ && device->lun == temp_device->lun){
+ DBG_DISC("Returning device list index %d\n", index);
+ return index;
- ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
- ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ }
+ }
+ return INVALID_ELEM;
+}
- rcb_t *rcb = NULL;
+/* Delete a given device from the softs dev_list*/
+int
+pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
- DBG_FUNC("IN\n");
+ int index;
+ index = pqisrc_find_device_list_index(softs, device);
+ if (0 <= index && index < MAX_TARGET_BIT) {
+ softs->dev_list[index] = NULL;
+ softs->num_devs--;
+ DBG_NOTE("Removing device : B %d: T %d: L %d positioned at %d\n",
+ device->bus, device->target, device->lun, softs->num_devs);
+ return PQI_STATUS_SUCCESS;
+ }
+ if (index == INVALID_ELEM) {
+ DBG_NOTE("Invalid device, either it was already removed "
+ "or never added\n");
+ return PQI_STATUS_FAILURE;
+ }
+ DBG_ERR("This is a bogus device\n");
+ return PQI_STATUS_FAILURE;
+}
- memset(&device_mem, 0, sizeof(struct dma_mem));
+int
+pqisrc_simple_dma_alloc(pqisrc_softstate_t *softs, struct dma_mem *device_mem,
+ size_t datasize, sgt_t *sgd)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ memset(device_mem, 0, sizeof(struct dma_mem));
/* for TUR datasize: 0 buff: NULL */
if (datasize) {
- device_mem.tag = "device_mem";
- device_mem.size = datasize;
- device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
- ret = os_dma_mem_alloc(softs, &device_mem);
+ os_strlcpy(device_mem->tag, "device_mem", sizeof(device_mem->tag));
+ device_mem->size = datasize;
+ device_mem->align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ ret = os_dma_mem_alloc(softs, device_mem);
if (ret) {
DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
return ret;
}
- sgd = (sgt_t *)&request->sg_descriptors[0];
+ ASSERT(device_mem->size == datasize);
- sgd->addr = device_mem.dma_addr;
+ sgd->addr = device_mem->dma_addr;
sgd->len = datasize;
sgd->flags = SG_FLAG_LAST;
}
+ return ret;
+}
+
+/*
+ * Function used to build the internal raid request and analyze the response
+ */
+static int
+pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, struct dma_mem device_mem,
+ pqisrc_raid_req_t *request, void *buff,
+ size_t datasize, uint8_t cmd, uint8_t *scsi3addr,
+ raid_path_error_info_elem_t *error_info)
+{
+
+ uint32_t tag = 0;
+ int ret = PQI_STATUS_SUCCESS;
+
+ ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+
+ rcb_t *rcb = NULL;
+
/* Build raid path request */
request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
@@ -183,86 +428,6 @@
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
- cdb = request->cdb;
-
- switch (cmd) {
- case SA_INQUIRY:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = SA_INQUIRY;
- if (vpd_page & VPD_PAGE) {
- cdb[1] = 0x1;
- cdb[2] = (uint8_t)vpd_page;
- }
- cdb[4] = (uint8_t)datasize;
- if (softs->timeout_in_passthrough) {
- request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
- }
- break;
- case SA_REPORT_LOG:
- case SA_REPORT_PHYS:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = cmd;
- if (cmd == SA_REPORT_PHYS)
- cdb[1] = SA_REPORT_PHYS_EXTENDED;
- else
- cdb[1] = SA_REPORT_LOG_EXTENDED;
- cdb[8] = (uint8_t)((datasize) >> 8);
- cdb[9] = (uint8_t)datasize;
- break;
- case PQI_LOG_EXT_QUEUE_ENABLE:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = SA_REPORT_LOG;
- cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
- cdb[8] = (uint8_t)((datasize) >> 8);
- cdb[9] = (uint8_t)datasize;
- break;
- case TEST_UNIT_READY:
- request->data_direction = SOP_DATA_DIR_NONE;
- break;
- case SA_GET_RAID_MAP:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = SA_CISS_READ;
- cdb[1] = cmd;
- cdb[8] = (uint8_t)((datasize) >> 8);
- cdb[9] = (uint8_t)datasize;
- break;
- case SA_CACHE_FLUSH:
- request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
- memcpy(device_mem.virt_addr, buff, datasize);
- cdb[0] = BMIC_WRITE;
- cdb[6] = BMIC_CACHE_FLUSH;
- cdb[7] = (uint8_t)((datasize) << 8);
- cdb[8] = (uint8_t)((datasize) >> 8);
- break;
- case BMIC_IDENTIFY_CONTROLLER:
- case BMIC_IDENTIFY_PHYSICAL_DEVICE:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = BMIC_READ;
- cdb[6] = cmd;
- cdb[7] = (uint8_t)((datasize) << 8);
- cdb[8] = (uint8_t)((datasize) >> 8);
- break;
- case BMIC_WRITE_HOST_WELLNESS:
- request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
- memcpy(device_mem.virt_addr, buff, datasize);
- cdb[0] = BMIC_WRITE;
- cdb[6] = cmd;
- cdb[7] = (uint8_t)((datasize) << 8);
- cdb[8] = (uint8_t)((datasize) >> 8);
- break;
- case BMIC_SENSE_SUBSYSTEM_INFORMATION:
- request->data_direction = SOP_DATA_DIR_TO_DEVICE;
- cdb[0] = BMIC_READ;
- cdb[6] = cmd;
- cdb[7] = (uint8_t)((datasize) << 8);
- cdb[8] = (uint8_t)((datasize) >> 8);
- break;
- default:
- DBG_ERR("unknown command 0x%x", cmd);
- ret = PQI_STATUS_FAILURE;
- return ret;
- }
-
tag = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == tag) {
DBG_ERR("Tag not available\n");
@@ -312,7 +477,7 @@
ret = PQI_STATUS_SUCCESS;
}
else{
- DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
+ DBG_WARN("Bus=%u Target=%u, Cmd=0x%x,"
"Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
cmd, ret);
@@ -344,7 +509,44 @@
return ret;
}
-/* common function used to send report physical and logical luns cmnds*/
+/* Use this if you need to specify specific target or if you want error info */
+int
+pqisrc_prepare_send_raid(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+ void *buff, size_t datasize, uint8_t *scsi3addr,
+ raid_path_error_info_elem_t *error_info)
+{
+ struct dma_mem device_mem;
+ int ret = PQI_STATUS_SUCCESS;
+ uint8_t cmd = IS_BMIC_OPCODE(request->cmd.cdb[0]) ? request->cmd.cdb[6] : request->cmd.cdb[0];
+
+ ret = pqisrc_simple_dma_alloc(softs, &device_mem, datasize, request->sg_descriptors);
+ if (PQI_STATUS_SUCCESS != ret){
+ DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
+ return ret;
+ }
+
+ /* If we are sending out data, copy it over to dma buf */
+ if (datasize && buff && request->data_direction == SOP_DATA_DIR_FROM_DEVICE)
+ memcpy(device_mem.virt_addr, buff, datasize);
+
+ ret = pqisrc_build_send_raid_request(softs, device_mem, request, buff, datasize,
+ cmd, scsi3addr, error_info);
+
+ return ret;
+}
+
+/* Use this to target controller and don't care about error info */
+int
+pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+ void *buff, size_t datasize)
+{
+ raid_path_error_info_elem_t error_info; /* will be thrown away */
+ uint8_t *scsi3addr = RAID_CTLR_LUNID;
+
+ return pqisrc_prepare_send_raid(softs, request, buff, datasize, scsi3addr, &error_info);
+}
+
+/* common function used to send report physical and logical luns cmds */
static int
pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
void *buff, size_t buf_len)
@@ -355,8 +557,29 @@
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff,
- buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+
+ switch (cmd) {
+ case SA_REPORT_LOG:
+ request.cmd.cdb[0] = SA_REPORT_LOG;
+ request.cmd.cdb[1] = SA_REPORT_LOG_EXTENDED;
+ break;
+ case SA_REPORT_PHYS:
+ request.cmd.cdb[0] = SA_REPORT_PHYS;
+ request.cmd.cdb[1] = SA_REPORT_PHYS_EXTENDED;
+ break;
+ /* @todo: 0x56 does not exist, this is kludgy, need to pass in options */
+ case PQI_LOG_EXT_QUEUE_ENABLE:
+ request.cmd.cdb[0] = SA_REPORT_LOG;
+ request.cmd.cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
+ break;
+ }
+
+ request.cmd.cdb[8] = (uint8_t)((buf_len) >> 8);
+ request.cmd.cdb[9] = (uint8_t)buf_len;
+
+ ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len);
DBG_FUNC("OUT\n");
@@ -464,7 +687,7 @@
}
if (list_len == 0) {
- DBG_INFO("list_len is 0\n");
+ DBG_DISC("list_len is 0\n");
memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
goto out;
}
@@ -527,11 +750,22 @@
return ret;
}
+#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
+ /* Save the report_log_dev buffer for deciding rescan requirement from OS driver*/
+ if(softs->log_dev_data_length != *log_data_length) {
+ if(softs->log_dev_list)
+ os_mem_free(softs, softs->log_dev_list, softs->log_dev_data_length);
+ softs->log_dev_list = os_mem_alloc(softs, *log_data_length);
+ }
+ memcpy(softs->log_dev_list, *logical_dev_list, *log_data_length);
+ softs->log_dev_data_length = *log_data_length;
+#endif
+
ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
- if (ret) {
- DBG_ERR("report logical LUNs failed");
- return ret;
- }
+ if (ret) {
+ DBG_ERR("report logical LUNs failed");
+ return ret;
+ }
logdev_data = *logical_dev_list;
@@ -573,34 +807,21 @@
return ret;
}
-/* Subroutine used to set Bus-Target-Lun for the requested device */
-static inline void
-pqisrc_set_btl(pqi_scsi_dev_t *device,
- int bus, int target, int lun)
-{
- DBG_FUNC("IN\n");
-
- device->bus = bus;
- device->target = target;
- device->lun = lun;
-
- DBG_FUNC("OUT\n");
-}
-
-inline
-boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
+inline boolean_t
+pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
{
return device->is_external_raid_device;
}
-static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
+static inline boolean_t
+pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
{
return scsi3addr[2] != 0;
}
/* Function used to assign Bus-Target-Lun for the requested device */
static void
-pqisrc_assign_btl(pqi_scsi_dev_t *device)
+pqisrc_assign_btl(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint8_t *scsi3addr;
uint32_t lunid;
@@ -613,26 +834,22 @@
lunid = GET_LE32(scsi3addr);
if (pqisrc_is_hba_lunid(scsi3addr)) {
- /* The specified device is the controller. */
- pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1);
+ /* The specified device is the controller. */
+ pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff));
device->target_lun_valid = true;
return;
}
+ /* When the specified device is a logical volume,
+ * physicals will be given targets in pqisrc update
+ * device list in pqisrc scan devices. */
if (pqisrc_is_logical_device(device)) {
- if (pqisrc_is_external_raid_device(device)) {
- DBG_DISC("External Raid Device!!!");
- bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
- target = (lunid >> 16) & 0x3fff;
- lun = lunid & 0xff;
- } else {
bus = PQI_RAID_VOLUME_BUS;
lun = (lunid & 0x3fff) + 1;
target = 0;
- }
- pqisrc_set_btl(device, bus, target, lun);
- device->target_lun_valid = true;
- return;
+ pqisrc_set_btl(device, bus, target, lun);
+ device->target_lun_valid = true;
+ return;
}
DBG_FUNC("OUT\n");
@@ -650,38 +867,25 @@
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
- SA_INQUIRY, vpd_page, scsi3addr, &error_info);
-
- DBG_FUNC("OUT\n");
- return ret;
-}
-
-#if 0
-/* Function used to parse the sense information from response */
-static void
-pqisrc_fetch_sense_info(const uint8_t *sense_data,
- unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
-{
- struct sense_header_scsi header;
- DBG_FUNC("IN\n");
-
- *sense_key = 0;
- *ascq = 0;
- *asc = 0;
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ request.cmd.cdb[0] = SA_INQUIRY;
+ if (vpd_page & VPD_PAGE) {
+ request.cmd.cdb[1] = 0x1;
+ request.cmd.cdb[2] = (uint8_t)vpd_page;
+ }
+ ASSERT(buf_len < 256);
+ request.cmd.cdb[4] = (uint8_t)buf_len;
- if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
- *sense_key = header.sense_key;
- *asc = header.asc;
- *ascq = header.ascq;
+ if (softs->timeout_in_passthrough) {
+ request.timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
}
- DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
+ pqisrc_prepare_send_raid(softs, &request, buff, buf_len, scsi3addr, &error_info);
DBG_FUNC("OUT\n");
+ return ret;
}
-#endif
/* Determine logical volume status from vpd buffer.*/
static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
@@ -732,6 +936,7 @@
return;
}
+
/* Validate the RAID map parameters */
static int
pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
@@ -740,6 +945,7 @@
char *error_msg;
uint32_t raidmap_size;
uint32_t r5or6_blocks_per_row;
+/* unsigned phys_dev_num; */
DBG_FUNC("IN\n");
@@ -792,48 +998,45 @@
pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
- int raidmap_size;
+ int raidmap_alloc_size = sizeof(pqisrc_raid_map_t);
+ int raidmap_reported_size;
+ int structure_size;
+ int ii;
+ int *next_offload_to_mirror;
pqisrc_raid_req_t request;
pqisrc_raid_map_t *raid_map;
DBG_FUNC("IN\n");
- raid_map = os_mem_alloc(softs, sizeof(*raid_map));
- if (!raid_map)
- return PQI_STATUS_FAILURE;
-
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
- SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
-
- if (ret) {
- DBG_ERR("error in build send raid req ret=%d\n", ret);
- goto err_out;
- }
-
- raidmap_size = LE_32(raid_map->structure_size);
- if (raidmap_size > sizeof(*raid_map)) {
- DBG_NOTE("Raid map is larger than 1024 entries, request once again");
- os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
-
- raid_map = os_mem_alloc(softs, raidmap_size);
+ for (ii = 0; ii < 2; ii++)
+ {
+ raid_map = os_mem_alloc(softs, raidmap_alloc_size);
if (!raid_map)
return PQI_STATUS_FAILURE;
+
memset(&request, 0, sizeof(request));
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ request.cmd.cdb[0] = SA_CISS_READ;
+ request.cmd.cdb[1] = SA_GET_RAID_MAP;
+ request.cmd.cdb[8] = (uint8_t)((raidmap_alloc_size) >> 8);
+ request.cmd.cdb[9] = (uint8_t)(raidmap_alloc_size);
+
+ ret = pqisrc_prepare_send_raid(softs, &request, raid_map, raidmap_alloc_size, device->scsi3addr, NULL);
- ret = pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size,
- SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
if (ret) {
DBG_ERR("error in build send raid req ret=%d\n", ret);
goto err_out;
}
- if(LE_32(raid_map->structure_size) != raidmap_size) {
- DBG_WARN("Expected raid map size %d bytes and got %d bytes\n",
- raidmap_size,LE_32(raid_map->structure_size));
- goto err_out;
- }
+ raidmap_reported_size = LE_32(raid_map->structure_size);
+ if (raidmap_reported_size <= raidmap_alloc_size)
+ break;
+
+ DBG_NOTE("Raid map is larger than 1024 entries, request once again");
+ os_mem_free(softs, (char*)raid_map, raidmap_alloc_size);
+
+ raidmap_alloc_size = raidmap_reported_size;
}
ret = pqisrc_raid_map_validation(softs, device, raid_map);
@@ -842,7 +1045,15 @@
goto err_out;
}
+ structure_size = raid_map->data_disks_per_row * sizeof(*next_offload_to_mirror);
+ next_offload_to_mirror = os_mem_alloc(softs, structure_size);
+ if (!next_offload_to_mirror) {
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
device->raid_map = raid_map;
+ device->offload_to_mirror = next_offload_to_mirror;
DBG_FUNC("OUT\n");
return 0;
@@ -928,7 +1139,7 @@
{
int ret = PQI_STATUS_SUCCESS;
uint8_t *inq_buff;
- int retry = MAX_RETRIES;
+ int retry = 3;
DBG_FUNC("IN\n");
@@ -996,8 +1207,14 @@
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
- BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_READ;
+ request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_CONTROLLER;
+ request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*buff));
+
+ ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, sizeof(*buff));
+
DBG_FUNC("OUT\n");
return ret;
@@ -1055,11 +1272,16 @@
memset(&request, 0, sizeof(request));
bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
- request.cdb[2] = (uint8_t)bmic_device_index;
- request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
- BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_READ;
+ request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_PHYSICAL_DEVICE;
+ request.cmd.bmic_cdb.xfer_len = BE_16(buf_len);
+ request.cmd.cdb[2] = (uint8_t)bmic_device_index;
+ request.cmd.cdb[9] = (uint8_t)(bmic_device_index >> 8);
+
+ ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len);
+
DBG_FUNC("OUT\n");
return ret;
}
@@ -1097,6 +1319,9 @@
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
+ if (id_phys->multi_lun_device_lun_count) {
+ device->is_multi_lun = true;
+ }
DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth);
DBG_FUNC("OUT\n");
@@ -1104,28 +1329,28 @@
/* Function used to find the entry of the device in a list */
-static
-device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
+static device_status_t
+pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device)
{
pqi_scsi_dev_t *device;
- int i,j;
+ int i;
DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
- device->scsi3addr)) {
- *same_device = device;
- if (pqisrc_device_equal(device_to_find, device)) {
- if (device_to_find->volume_offline)
- return DEVICE_CHANGED;
- return DEVICE_UNCHANGED;
- }
- return DEVICE_CHANGED;
+ device = softs->dev_list[i];
+ if(device == NULL)
+ continue;
+ if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
+ device->scsi3addr)) {
+ *same_device = device;
+ if (device->in_remove == true)
+ return DEVICE_IN_REMOVE;
+ if (pqisrc_device_equal(device_to_find, device)) {
+ if (device_to_find->volume_offline)
+ return DEVICE_CHANGED;
+ return DEVICE_UNCHANGED;
}
+ return DEVICE_CHANGED;
}
}
DBG_FUNC("OUT\n");
@@ -1148,10 +1373,10 @@
device_exist->is_physical_device = new_device->is_physical_device;
device_exist->is_external_raid_device =
new_device->is_external_raid_device;
-
- if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION ||
- device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) &&
- new_device->volume_status == SA_LV_OK) {
+ /* Whenever a logical device expansion happens, reprobe of
+ * all existing LDs will be triggered, which is resulting
+ * in updating the size to the os. */
+ if ((softs->ld_rescan) && (pqisrc_is_logical_device(device_exist))) {
device_exist->scsi_rescan = true;
}
@@ -1170,105 +1395,19 @@
device_exist->offload_config = new_device->offload_config;
device_exist->offload_enabled_pending =
new_device->offload_enabled_pending;
- device_exist->offload_to_mirror = 0;
+ if (device_exist->offload_to_mirror)
+ os_mem_free(softs,
+ (int *) device_exist->offload_to_mirror,
+ sizeof(*(device_exist->offload_to_mirror)));
+ device_exist->offload_to_mirror = new_device->offload_to_mirror;
if (device_exist->raid_map)
os_mem_free(softs,
- (char *)device_exist->raid_map,
- sizeof(*device_exist->raid_map));
+ (char *)device_exist->raid_map,
+ sizeof(*device_exist->raid_map));
device_exist->raid_map = new_device->raid_map;
- /* To prevent this from being freed later. */
+ /* To prevent these from being freed later. */
new_device->raid_map = NULL;
- DBG_FUNC("OUT\n");
-}
-
-/* Validate the ioaccel_handle for a newly added device */
-static
-pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
- pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
-{
- pqi_scsi_dev_t *device;
- int i,j;
- DBG_FUNC("IN\n");
- for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- if (device->devtype != DISK_DEVICE)
- continue;
- if (pqisrc_is_logical_device(device))
- continue;
- if (device->ioaccel_handle == ioaccel_handle)
- return device;
- }
- }
- DBG_FUNC("OUT\n");
-
- return NULL;
-}
-
-/* Get the scsi device queue depth */
-static void
-pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
-{
- unsigned i;
- unsigned phys_dev_num;
- unsigned num_raidmap_entries;
- unsigned queue_depth;
- pqisrc_raid_map_t *raid_map;
- pqi_scsi_dev_t *device;
- raidmap_data_t *dev_data;
- pqi_scsi_dev_t *phys_disk;
- unsigned j;
- unsigned k;
-
- DBG_FUNC("IN\n");
-
- for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- if (device->devtype != DISK_DEVICE)
- continue;
- if (!pqisrc_is_logical_device(device))
- continue;
- if (pqisrc_is_external_raid_device(device))
- continue;
- device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- raid_map = device->raid_map;
- if (!raid_map)
- return;
- dev_data = raid_map->dev_data;
- phys_dev_num = LE_16(raid_map->layout_map_count) *
- (LE_16(raid_map->data_disks_per_row) +
- LE_16(raid_map->metadata_disks_per_row));
- num_raidmap_entries = phys_dev_num *
- LE_16(raid_map->row_cnt);
-
- queue_depth = 0;
- for (k = 0; k < num_raidmap_entries; k++) {
- phys_disk = pqisrc_identify_device_via_ioaccel(softs,
- dev_data[k].ioaccel_handle);
-
- if (!phys_disk) {
- DBG_WARN(
- "Failed to find physical disk handle for logical drive %016llx\n",
- (unsigned long long)BE_64(device->scsi3addr[0]));
- device->offload_enabled = false;
- device->offload_enabled_pending = false;
- if (raid_map)
- os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
- device->raid_map = NULL;
- return;
- }
-
- queue_depth += phys_disk->queue_depth;
- }
-
- device->queue_depth = queue_depth;
- } /* end inner loop */
- }/* end outer loop */
+ new_device->offload_to_mirror = NULL;
DBG_FUNC("OUT\n");
}
@@ -1281,6 +1420,9 @@
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
device->invalid = false;
+ device->schedule_rescan = false;
+ device->softs = softs;
+ device->in_remove = false;
if(device->expose_device) {
pqisrc_init_device_active_io(softs, device);
@@ -1299,7 +1441,6 @@
DBG_FUNC("IN\n");
DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
-
device->invalid = true;
if (device->expose_device == false) {
/*Masked physical devices are not been exposed to storage stack.
@@ -1307,6 +1448,7 @@
*device memory, Target ID,etc., here.
*/
DBG_NOTE("Deallocated Masked Device Resources.\n");
+ /* softs->device_list[device->target][device->lun] = NULL; */
pqisrc_free_device(softs,device);
return;
}
@@ -1317,6 +1459,7 @@
DBG_FUNC("OUT\n");
}
+
/*
* When exposing new device to OS fails then adjst list according to the
* mid scsi list
@@ -1324,6 +1467,9 @@
static void
pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
+ int i;
+ unsigned char addr1[8], addr2[8];
+ pqi_scsi_dev_t *temp_device;
DBG_FUNC("IN\n");
if (!device) {
@@ -1332,7 +1478,36 @@
}
OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
- softs->device_list[device->target][device->lun] = NULL;
+ uint8_t *scsi3addr;
+ /*For external raid device, there can be multiple luns
+ *with same target. So while freeing external raid device,
+ *free target only after removing all luns with same target.*/
+ if (pqisrc_is_external_raid_device(device)) {
+ memcpy(addr1, device->scsi3addr, 8);
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ if(softs->dev_list[i] == NULL)
+ continue;
+ temp_device = softs->dev_list[i];
+ memcpy(addr2, temp_device->scsi3addr, 8);
+ if(memcmp(addr1, addr2, 8) == 0) {
+ continue;
+ }
+ if (addr1[2] == addr2[2]) {
+ break;
+ }
+ }
+ if(i == PQI_MAX_DEVICES) {
+ pqisrc_remove_target_bit(softs, device->target);
+ }
+ }
+
+ if(pqisrc_delete_softs_entry(softs, device) == PQI_STATUS_SUCCESS){
+ scsi3addr = device->scsi3addr;
+ if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)){
+ DBG_NOTE("About to remove target bit %d \n", device->target);
+ pqisrc_remove_target_bit(softs, device->target);
+ }
+ }
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
pqisrc_device_mem_free(softs, device);
@@ -1341,7 +1516,7 @@
/* Debug routine used to display the RAID volume status of the device */
static void
-pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
char *status;
@@ -1391,6 +1566,7 @@
break;
case SA_LV_QUEUED_FOR_EXPANSION:
status = "Volume queued for expansion";
+ break;
case SA_LV_EJECTED:
status = "Volume ejected";
break;
@@ -1414,7 +1590,7 @@
break;
}
- DBG_DISC("scsi BTL %d:%d:%d %s\n",
+ DBG_NOTE("scsi BTL %d:%d:%d %s\n",
device->bus, device->target, device->lun, status);
DBG_FUNC("OUT\n");
}
@@ -1428,6 +1604,9 @@
if (device->raid_map) {
os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
}
+ if (device->offload_to_mirror) {
+ os_mem_free(softs, (int *)device->offload_to_mirror, sizeof(*(device->offload_to_mirror)));
+ }
os_mem_free(softs, (char *)device,sizeof(*device));
DBG_FUNC("OUT\n");
@@ -1435,11 +1614,13 @@
/* OS should call this function to free the scsi device */
void
-pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
+pqisrc_free_device(pqisrc_softstate_t * softs, pqi_scsi_dev_t *device)
{
rcb_t *rcb;
- int i;
-
+ uint8_t *scsi3addr;
+ int i, index;
+ pqi_scsi_dev_t *temp_device;
+ unsigned char addr1[8], addr2[8];
/* Clear the "device" field in the rcb.
* Response coming after device removal shouldn't access this field
*/
@@ -1451,21 +1632,53 @@
rcb->dvp = NULL;
}
}
+ /* Find the entry in device list for the freed device softs->dev_list[i]&
+ *make it NULL before freeing the device memory
+ */
+ index = pqisrc_find_device_list_index(softs, device);
OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
-
- if (!pqisrc_is_logical_device(device)) {
- pqisrc_free_tid(softs,device->target);
+ scsi3addr = device->scsi3addr;
+ if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)) {
+ DBG_NOTE("Giving back target %i \n", device->target);
+ pqisrc_remove_target_bit(softs, device->target);
+ }
+ /*For external raid device, there can be multiple luns
+ *with same target. So while freeing external raid device,
+ *free target only after removing all luns with same target.*/
+ if (pqisrc_is_external_raid_device(device)) {
+ memcpy(addr1, device->scsi3addr, 8);
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ if(softs->dev_list[i] == NULL)
+ continue;
+ temp_device = softs->dev_list[i];
+ memcpy(addr2, temp_device->scsi3addr, 8);
+ if(memcmp(addr1, addr2, 8) == 0) {
+ continue;
+ }
+ if (addr1[2] == addr2[2]) {
+ break;
+ }
+ }
+ if(i == PQI_MAX_DEVICES) {
+ pqisrc_remove_target_bit(softs, device->target);
+ }
}
- softs->device_list[device->target][device->lun] = NULL;
-
- pqisrc_device_mem_free(softs, device);
+ if (index >= 0 && index < PQI_MAX_DEVICES)
+ softs->dev_list[index] = NULL;
+ if (device->expose_device == true){
+ pqisrc_delete_softs_entry(softs, device);
+ DBG_NOTE("Removed memory for device : B %d: T %d: L %d\n",
+ device->bus, device->target, device->lun);
+ pqisrc_device_mem_free(softs, device);
+ }
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
}
+
/* Update the newly added devices to the device list */
static void
pqisrc_update_device_list(pqisrc_softstate_t *softs,
@@ -1479,9 +1692,7 @@
pqi_scsi_dev_t **added = NULL;
pqi_scsi_dev_t **removed = NULL;
int nadded = 0, nremoved = 0;
- int j;
- int tid = 0;
- boolean_t driver_queue_depth_flag = false;
+ uint8_t *scsi3addr;
DBG_FUNC("IN\n");
@@ -1496,13 +1707,13 @@
OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- device->device_gone = true;
- }
+ if(softs->dev_list[i] == NULL)
+ continue;
+ device = softs->dev_list[i];
+ device->device_gone = true;
}
+
+ /* TODO:Remove later */
DBG_IO("Device list used an array\n");
for (i = 0; i < num_new_devices; i++) {
device = new_device_list[i];
@@ -1525,21 +1736,33 @@
/* Actual device gone need to add device to list*/
device->new_device = true;
break;
+ case DEVICE_IN_REMOVE:
+ /*Older device with same target/lun is in removal stage*/
+ /*New device will be added/scanned when same target/lun
+ * device_list[] gets removed from the OS target
+ * free call*/
+ device->new_device = false;
+ same_device->schedule_rescan = true;
+ break;
default:
break;
}
}
+
/* Process all devices that have gone away. */
- for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ device = softs->dev_list[i];
+ if(device == NULL)
+ continue;
+ if (device->device_gone) {
+ if(device->in_remove == true)
+ {
continue;
- device = softs->device_list[i][j];
- if (device->device_gone) {
- softs->device_list[device->target][device->lun] = NULL;
- removed[nremoved] = device;
- nremoved++;
}
+ device->in_remove = true;
+ removed[nremoved] = device;
+ softs->num_devs--;
+ nremoved++;
}
}
@@ -1551,39 +1774,32 @@
if (device->volume_offline)
continue;
- /* physical device */
- if (!pqisrc_is_logical_device(device)) {
- tid = pqisrc_alloc_tid(softs);
- if(INVALID_ELEM != tid)
- pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
- }
-
- /* This is not expected. We may lose the reference to the old device entry.
- * If the target & lun ids are same, it is supposed to detect as an existing
- * device, and not as a new device
- */
- if(softs->device_list[device->target][device->lun] != NULL) {
- DBG_WARN("Overwriting T : %d L :%d\n",device->target,device->lun);
+ /* Find out which devices to add to the driver list
+ * in softs->dev_list */
+ scsi3addr = device->scsi3addr;
+ if (device->expose_device || !MASKED_DEVICE(scsi3addr)){
+ if(pqisrc_add_softs_entry(softs, device, scsi3addr)){
+ /* To prevent this entry from being freed later. */
+ new_device_list[i] = NULL;
+ added[nadded] = device;
+ nadded++;
+ }
}
- softs->device_list[device->target][device->lun] = device;
-
- DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
- device->bus,device->target,device->lun);
- /* To prevent this entry from being freed later. */
- new_device_list[i] = NULL;
- added[nadded] = device;
- nadded++;
}
-
for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- device->offload_enabled = device->offload_enabled_pending;
+ device = softs->dev_list[i];
+ if(device == NULL)
+ continue;
+ if (device->offload_enabled != device->offload_enabled_pending)
+ {
+ DBG_NOTE("[%d:%d:%d]Changing AIO to %d (was %d)\n",
+ device->bus, device->target, device->lun,
+ device->offload_enabled_pending,
+ device->offload_enabled);
}
+ device->offload_enabled = device->offload_enabled_pending;
}
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
@@ -1594,36 +1810,34 @@
continue;
pqisrc_display_device_info(softs, "removed", device);
pqisrc_remove_device(softs, device);
-
}
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+
for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- /*
- * Notify the OS upper layer if the queue depth of any existing device has
- * changed.
- */
- if (device->queue_depth !=
- device->advertised_queue_depth) {
- device->advertised_queue_depth = device->queue_depth;
- /* TBD: Call OS upper layer function to change device Q depth */
- }
- if (device->firmware_queue_depth_set == false)
- driver_queue_depth_flag = true;
- if (device->scsi_rescan)
- os_rescan_target(softs, device);
+ if(softs->dev_list[i] == NULL)
+ continue;
+ device = softs->dev_list[i];
+ if (device->in_remove)
+ continue;
+ /*
+ * If firmware queue depth is corrupt or not working
+ * use the PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
+ * which is 0. That means there is no limit to the
+ * queue depth all the way up to the controller
+ * queue depth
+ */
+ if (pqisrc_is_logical_device(device) &&
+ device->firmware_queue_depth_set == false)
+ device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+
+ if (device->scsi_rescan) {
+ os_rescan_target(softs, device);
}
}
- /*
- * If firmware queue depth is corrupt or not working
- * use driver method to re-calculate the queue depth
- * for all logical devices
- */
- if (driver_queue_depth_flag)
- pqisrc_update_log_dev_qdepth(softs);
+ softs->ld_rescan = false;
+
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
for(i = 0; i < nadded; i++) {
device = added[i];
@@ -1631,8 +1845,7 @@
ret = pqisrc_add_device(softs, device);
if (ret) {
DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
- device->bus, device->target,
- device->lun);
+ device->bus, device->target, device->lun);
pqisrc_adjust_list(softs, device);
continue;
}
@@ -1654,6 +1867,15 @@
}
}
+ for (i = 0; i < PQI_MAX_DEVICES; i++) {
+ device = softs->dev_list[i];
+ if(device == NULL)
+ continue;
+ DBG_DISC("Current device %d : B%d:T%d:L%d\n",
+ i, device->bus, device->target,
+ device->lun);
+ }
+
free_and_out:
if (added)
os_mem_free(softs, (char *)added,
@@ -1701,16 +1923,21 @@
strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name));
} else {
- DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
- strlen(softs->os_name));
+ DBG_DISC("OS name length(%u) is longer than buffer of driver_version\n",
+ (unsigned int)strlen(softs->os_name));
}
host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
host_wellness_driver_ver->end_tag[0] = 'Z';
host_wellness_driver_ver->end_tag[1] = 'Z';
- rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
- BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_WRITE;
+ request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS;
+ request.cmd.bmic_cdb.xfer_len = BE_16(data_length);
+
+ rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_driver_ver, data_length);
os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
@@ -1757,15 +1984,27 @@
host_wellness_time->end_tag[0] = 'Z';
host_wellness_time->end_tag[1] = 'Z';
- rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
- BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_WRITE;
+ request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS;
+ request.cmd.bmic_cdb.xfer_len = BE_16(data_length);
+
+ rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_time, data_length);
os_mem_free(softs, (char *)host_wellness_time, data_length);
DBG_FUNC("OUT");
return rval;
}
-
+static void
+pqisrc_get_device_vpd_info(pqisrc_softstate_t *softs,
+ bmic_ident_physdev_t *bmic_phy_info,pqi_scsi_dev_t *device)
+{
+ DBG_FUNC("IN\n");
+ memcpy(&device->wwid, &bmic_phy_info->padding[79], sizeof(device->wwid));
+ DBG_FUNC("OUT\n");
+}
/*
* Function used to perform a rescan of scsi devices
* for any config change events
@@ -1774,7 +2013,7 @@
pqisrc_scan_devices(pqisrc_softstate_t *softs)
{
boolean_t is_physical_device;
- int ret = PQI_STATUS_FAILURE;
+ int ret;
int i;
int new_dev_cnt;
int phy_log_dev_cnt;
@@ -1794,7 +2033,9 @@
bmic_ident_physdev_t *bmic_phy_info = NULL;
pqi_scsi_dev_t **new_device_list = NULL;
pqi_scsi_dev_t *device = NULL;
-
+#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
+ int num_ext_raid_devices = 0;
+#endif
DBG_FUNC("IN\n");
@@ -1815,7 +2056,7 @@
/ sizeof(logical_queue_dev_list->lun_entries[0]);
- DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt);
+ DBG_DISC("physical_cnt %u logical_cnt %u queue_cnt %u\n", physical_cnt, logical_cnt, logical_queue_cnt);
if (physical_cnt) {
bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
@@ -1863,6 +2104,9 @@
/* Save the target sas adderess for external raid device */
if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
+#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
+ num_ext_raid_devices++;
+#endif
int target = lun_ext_entry->lunid[3] & 0x3f;
softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
}
@@ -1881,9 +2125,9 @@
device->is_external_raid_device =
pqisrc_is_external_raid_addr(scsi3addr);
/* The multiplier is the value we multiply the queue
- * depth value with to get the actual queue depth.
- * If multiplier is 1 multiply by 256 if
- * multiplier 0 then multiply by 16 */
+ * depth value with to get the actual queue depth.
+ * If multiplier is 1 multiply by 256 if
+ * multiplier 0 then multiply by 16 */
multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier;
qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth;
if (multiplier) {
@@ -1898,11 +2142,12 @@
device->queue_depth = softs->adapterQDepth;
}
if ((multiplier == 1) &&
- (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH))
+ (qdepth >= MAX_RAW_M256_QDEPTH))
device->firmware_queue_depth_set = false;
if ((multiplier == 0) &&
- (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH))
+ (qdepth >= MAX_RAW_M16_QDEPTH))
device->firmware_queue_depth_set = false;
+
}
@@ -1915,12 +2160,12 @@
continue;
}
/* Set controller queue depth to what
- * it was from the scsi midlayer */
+ * it was from the scsi midlayer */
if (device->devtype == RAID_DEVICE) {
device->firmware_queue_depth_set = true;
device->queue_depth = softs->adapterQDepth;
}
- pqisrc_assign_btl(device);
+ pqisrc_assign_btl(softs, device);
/*
* Expose all devices except for physical devices that
@@ -1954,9 +2199,12 @@
if (device->is_physical_device) {
device->ioaccel_handle =
lun_ext_entry->ioaccel_handle;
- device->sas_address = BE_64(lun_ext_entry->wwid);
pqisrc_get_physical_device_info(softs, device,
bmic_phy_info);
+ if ( (!softs->page83id_in_rpl) && (bmic_phy_info->device_type == BMIC_DEVICE_TYPE_SATA)) {
+ pqisrc_get_device_vpd_info(softs, bmic_phy_info, device);
+ }
+ device->sas_address = BE_64(device->wwid);
}
new_dev_cnt++;
break;
@@ -1987,7 +2235,12 @@
}
}
DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
-
+#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
+ if(num_ext_raid_devices)
+ os_start_rescan_timer(softs);
+ else
+ os_stop_rescan_timer(softs);
+#endif
pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
err_out:
@@ -2002,7 +2255,7 @@
}
}
os_mem_free(softs, (char *)new_device_list,
- sizeof(*new_device_list) * ndev_allocated);
+ sizeof(*new_device_list) * ndev_allocated);
}
if(physical_dev_list)
os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
@@ -2025,18 +2278,15 @@
void
pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
{
-
- int i = 0,j = 0;
- pqi_scsi_dev_t *dvp = NULL;
+ int i = 0;
+ pqi_scsi_dev_t *device = NULL;
DBG_FUNC("IN\n");
-
- for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if (softs->device_list[i][j] == NULL)
- continue;
- dvp = softs->device_list[i][j];
- pqisrc_device_mem_free(softs, dvp);
- }
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ if(softs->dev_list[i] == NULL)
+ continue;
+ device = softs->dev_list[i];
+ pqisrc_device_mem_free(softs, device);
}
+
DBG_FUNC("OUT\n");
}
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -62,6 +62,7 @@
struct pqi_event *event)
{
+ int ret;
pqi_event_acknowledge_request_t request;
ib_queue_t *ib_q = &softs->op_raid_ib_q[0];
int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT;
@@ -77,8 +78,11 @@
request.additional_event_id = event->additional_event_id;
/* Submit Event Acknowledge */
-
- pqisrc_submit_cmnd(softs, ib_q, &request);
+ ret = pqisrc_submit_cmnd(softs, ib_q, &request);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit acknowledge command\n");
+ goto out;
+ }
/*
* We have to special-case this type of request because the firmware
@@ -91,9 +95,10 @@
if (tmo <= 0) {
DBG_ERR("wait for event acknowledge timed out\n");
DBG_ERR("tmo : %d\n",tmo);
- }
+ }
- DBG_FUNC(" OUT\n");
+out:
+ DBG_FUNC("OUT\n");
}
/*
@@ -178,11 +183,10 @@
event_q = &softs->event_q;
obq_ci = event_q->ci_local;
obq_pi = *(event_q->pi_virt_addr);
- DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi);
while(1) {
int event_index;
- DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi);
+ DBG_INFO("Event queue_id : %d, ci : %u, pi : %u\n",obq_id, obq_ci, obq_pi);
if (obq_pi == obq_ci)
break;
@@ -191,10 +195,13 @@
/* Copy the response */
memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size),
sizeof(pqi_event_response_t));
- DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type);
- DBG_INFO("response.event_type : 0x%x \n", response.event_type);
+ DBG_INIT("event iu_type=0x%x event_type=0x%x\n",
+ response.header.iu_type, response.event_type);
event_index = pqisrc_event_type_to_event_index(response.event_type);
+ if ( event_index == PQI_EVENT_LOGICAL_DEVICE) {
+ softs->ld_rescan = true;
+ }
if (event_index >= 0) {
if(response.request_acknowledge) {
@@ -224,6 +231,58 @@
}
+/*
+ * Function used to build and send the vendor general request
+ * Used for configuring PQI feature bits between firmware and driver
+ */
+int
+pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
+ struct pqi_vendor_general_request *request)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+
+ rcb_t *rcb = NULL;
+
+ /* Get the tag */
+ request->request_id = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == request->request_id) {
+ DBG_ERR("Tag not available\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_notag;
+ }
+
+ request->response_id = ob_q->q_id;
+
+ rcb = &softs->rcb[request->request_id];
+
+ rcb->req_pending = true;
+ rcb->tag = request->request_id;
+
+ ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_out;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Management request timed out!\n");
+ goto err_out;
+ }
+
+ ret = rcb->status;
+
+err_out:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, request->request_id);
+err_notag:
+ DBG_FUNC("OUT \n");
+ return ret;
+}
+
/*
* Function used to send a general management request to adapter.
*/
@@ -248,6 +307,7 @@
rcb = &softs->rcb[request->request_id];
rcb->req_pending = true;
rcb->tag = request->request_id;
+
/* Submit command on operational raid ib queue */
ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
if (ret != PQI_STATUS_SUCCESS) {
@@ -256,6 +316,7 @@
}
ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
+
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out !!\n");
goto err_cmd;
@@ -331,7 +392,7 @@
DBG_FUNC(" IN\n");
memset(&buf_report_event, 0, sizeof(struct dma_mem));
- buf_report_event.tag = "pqi_report_event_buf" ;
+ os_strlcpy(buf_report_event.tag, "pqi_report_event_buf", sizeof(buf_report_event.tag)); ;
buf_report_event.size = alloc_size;
buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
@@ -392,7 +453,7 @@
DBG_FUNC(" IN\n");
memset(&buf_set_event, 0, sizeof(struct dma_mem));
- buf_set_event.tag = "pqi_set_event_buf";
+ os_strlcpy(buf_set_event.tag, "pqi_set_event_buf", sizeof(buf_set_event.tag));
buf_set_event.size = alloc_size;
buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;
diff --git a/sys/dev/smartpqi/smartpqi_features.c b/sys/dev/smartpqi/smartpqi_features.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_features.c
@@ -0,0 +1,520 @@
+/*-
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "smartpqi_includes.h"
+
+/*
+ * Checks a firmware feature status, given bit position.
+ */
+static inline boolean_t
+pqi_is_firmware_feature_supported(
+ struct pqi_config_table_firmware_features *firmware_features,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = bit_position / BITS_PER_BYTE;
+
+ if (byte_index >= firmware_features->num_elements) {
+ DBG_ERR_NO_SOFTS("Invalid byte index for bit position %u\n",
+ bit_position);
+ return false;
+ }
+
+ return (firmware_features->features_supported[byte_index] &
+ (1 << (bit_position % BITS_PER_BYTE))) ? true : false;
+}
+
+/*
+ * Counts down into the enabled section of firmware
+ * features and reports current enabled status, given
+ * bit position.
+ */
+static inline boolean_t
+pqi_is_firmware_feature_enabled(
+ struct pqi_config_table_firmware_features *firmware_features,
+ uint8_t *firmware_features_iomem_addr,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+ uint8_t *features_enabled_iomem_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (firmware_features->num_elements * 2);
+
+ features_enabled_iomem_addr = firmware_features_iomem_addr +
+ offsetof(struct pqi_config_table_firmware_features,
+ features_supported) + byte_index;
+
+ return (*features_enabled_iomem_addr &
+ (1 << (bit_position % BITS_PER_BYTE))) ? true : false;
+}
+
+/*
+ * Sets the given bit position for the driver to request the indicated
+ * firmware feature be enabled.
+ */
+static inline void
+pqi_request_firmware_feature(
+ struct pqi_config_table_firmware_features *firmware_features,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ /* byte_index adjusted to index into requested start bits */
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ firmware_features->num_elements;
+
+ /* setting requested bits of local firmware_features */
+ firmware_features->features_supported[byte_index] |=
+ (1 << (bit_position % BITS_PER_BYTE));
+}
+
+/*
+ * Creates and sends the request for firmware to update the config
+ * table.
+ */
+static int
+pqi_config_table_update(pqisrc_softstate_t *softs,
+ uint16_t first_section, uint16_t last_section)
+{
+ struct pqi_vendor_general_request request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+ request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
+ request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
+ request.data.config_table_update.first_section = first_section;
+ request.data.config_table_update.last_section = last_section;
+
+ ret = pqisrc_build_send_vendor_request(softs, &request);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Copies requested features bits into firmware config table,
+ * checks for support, and returns status of updating the config table.
+ */
+static int
+pqi_enable_firmware_features(pqisrc_softstate_t *softs,
+ struct pqi_config_table_firmware_features *firmware_features,
+ uint8_t *firmware_features_abs_addr)
+{
+ uint8_t *features_requested;
+ uint8_t *features_requested_abs_addr;
+ uint16_t *host_max_known_feature_iomem_addr;
+ uint16_t pqi_max_feature = PQI_FIRMWARE_FEATURE_MAXIMUM;
+
+ features_requested = firmware_features->features_supported +
+ firmware_features->num_elements;
+
+ features_requested_abs_addr = firmware_features_abs_addr +
+ (features_requested - (uint8_t*)firmware_features);
+ /*
+ * NOTE: This memcpy is writing to a BAR-mapped address
+ * which may not be safe for all OSes without proper API
+ */
+ memcpy(features_requested_abs_addr, features_requested,
+ firmware_features->num_elements);
+
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+ host_max_known_feature_iomem_addr =
+ (uint16_t*)(features_requested_abs_addr +
+ (firmware_features->num_elements * 2) + sizeof(uint16_t));
+ /*
+ * NOTE: This writes to a BAR-mapped address
+ * which may not be safe for all OSes without proper API
+ */
+ *host_max_known_feature_iomem_addr = pqi_max_feature;
+ }
+
+ return pqi_config_table_update(softs,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+typedef struct pqi_firmware_feature pqi_firmware_feature_t;
+typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
+ pqi_firmware_feature_t *firmware_feature);
+
+struct pqi_firmware_feature {
+ char *feature_name;
+ unsigned int feature_bit;
+ boolean_t supported;
+ boolean_t enabled;
+ feature_status_fn feature_status;
+};
+
+static void
+pqi_firmware_feature_status(pqisrc_softstate_t *softs,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ if (!firmware_feature->supported) {
+ DBG_NOTE("%s not supported by controller\n",
+ firmware_feature->feature_name);
+ return;
+ }
+
+ if (firmware_feature->enabled) {
+ DBG_NOTE("%s enabled\n", firmware_feature->feature_name);
+ return;
+ }
+
+ DBG_NOTE("failed to enable %s\n", firmware_feature->feature_name);
+}
+
+static void
+pqi_ctrl_update_feature_flags(pqisrc_softstate_t *softs,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+ softs->aio_raid1_write_bypass = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+ softs->aio_raid5_write_bypass = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+ softs->aio_raid6_write_bypass = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ softs->timeout_in_passthrough = true;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ softs->timeout_in_tmf = true;
+ break;
+ case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN:
+ break;
+ case PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID:
+ softs->page83id_in_rpl = true;
+ break;
+ default:
+ DBG_NOTE("Nothing to do\n");
+ return;
+ break;
+ }
+ /* for any valid feature, also go update the feature status. */
+ pqi_firmware_feature_status(softs, firmware_feature);
+}
+
+
+static inline void
+pqi_firmware_feature_update(pqisrc_softstate_t *softs,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ if (firmware_feature->feature_status)
+ firmware_feature->feature_status(softs, firmware_feature);
+}
+
+/* Defines PQI features that driver wishes to support */
+static struct pqi_firmware_feature pqi_firmware_features[] = {
+#if 0
+ {
+ .feature_name = "Online Firmware Activation",
+ .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Serial Management Protocol",
+ .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+ .feature_status = pqi_firmware_feature_status,
+ },
+#endif
+ {
+ .feature_name = "SATA WWN Unique ID",
+ .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "Support for RPL WWID filled by Page83 identifier",
+ .feature_bit = PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ /* Features independent of Maximum Known Feature should be added
+ before Maximum Known Feature*/
+ {
+ .feature_name = "Maximum Known Feature",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 5 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 6 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 5 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 6 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+#if 0
+ {
+ .feature_name = "New Soft Reset Handshake",
+ .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+#endif
+
+};
+
+static void
+pqi_process_firmware_features(pqisrc_softstate_t *softs,
+ void *features, void *firmware_features_abs_addr)
+{
+ int rc;
+ struct pqi_config_table_firmware_features *firmware_features = features;
+ unsigned int i;
+ unsigned int num_features_supported;
+
+ /* Iterates through local PQI feature support list to
+ see if the controller also supports the feature */
+ for (i = 0, num_features_supported = 0;
+ i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ /*Check if SATA_WWN_FOR_DEV_UNIQUE_ID feature enabled by setting module
+ parameter if not avoid checking for the feature*/
+ if ((pqi_firmware_features[i].feature_bit ==
+ PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN) &&
+ (!softs->sata_unique_wwn)) {
+ continue;
+ }
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].supported = true;
+ num_features_supported++;
+ } else {
+ DBG_WARN("Feature %s is not supported by firmware\n",
+ pqi_firmware_features[i].feature_name);
+ pqi_firmware_feature_update(softs,
+ &pqi_firmware_features[i]);
+
+ /* if max known feature bit isn't supported,
+ * then no other feature bits are supported.
+ */
+ if (pqi_firmware_features[i].feature_bit ==
+ PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)
+ break;
+ }
+ }
+
+ DBG_INFO("Num joint features supported : %u \n", num_features_supported);
+
+ if (num_features_supported == 0)
+ return;
+
+ /* request driver features that are also on firmware-supported list */
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+#ifdef DEVICE_HINT
+ if (check_device_hint_status(softs, pqi_firmware_features[i].feature_bit))
+ continue;
+#endif
+ pqi_request_firmware_feature(firmware_features,
+ pqi_firmware_features[i].feature_bit);
+ }
+
+ /* enable the features that were successfully requested. */
+ rc = pqi_enable_firmware_features(softs, firmware_features,
+ firmware_features_abs_addr);
+ if (rc) {
+ DBG_ERR("failed to enable firmware features in PQI configuration table\n");
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+ pqi_firmware_feature_update(softs,
+ &pqi_firmware_features[i]);
+ }
+ return;
+ }
+
+ /* report the features that were successfully enabled. */
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+ if (pqi_is_firmware_feature_enabled(firmware_features,
+ firmware_features_abs_addr,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].enabled = true;
+ } else {
+ DBG_WARN("Feature %s could not be enabled.\n",
+ pqi_firmware_features[i].feature_name);
+ }
+ pqi_firmware_feature_update(softs,
+ &pqi_firmware_features[i]);
+ }
+}
+
+static void
+pqi_init_firmware_features(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ pqi_firmware_features[i].supported = false;
+ pqi_firmware_features[i].enabled = false;
+ }
+}
+
+static void
+pqi_process_firmware_features_section(pqisrc_softstate_t *softs,
+ void *features, void *firmware_features_abs_addr)
+{
+ pqi_init_firmware_features();
+ pqi_process_firmware_features(softs, features, firmware_features_abs_addr);
+}
+
+
+/*
+ * Get the PQI configuration table parameters.
+ * Currently using for heart-beat counter scratch-pad register.
+ */
+int
+pqisrc_process_config_table(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_FAILURE;
+ uint32_t config_table_size;
+ uint32_t section_off;
+ uint8_t *config_table_abs_addr;
+ struct pqi_conf_table *conf_table;
+ struct pqi_conf_table_section_header *section_hdr;
+
+ config_table_size = softs->pqi_cap.conf_tab_sz;
+
+ if (config_table_size < sizeof(*conf_table) ||
+ config_table_size > PQI_CONF_TABLE_MAX_LEN) {
+ DBG_ERR("Invalid PQI conf table length of %u\n",
+ config_table_size);
+ return ret;
+ }
+
+ conf_table = os_mem_alloc(softs, config_table_size);
+ if (!conf_table) {
+ DBG_ERR("Failed to allocate memory for PQI conf table\n");
+ return ret;
+ }
+
+ config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
+ softs->pqi_cap.conf_tab_off);
+
+ PCI_MEM_GET_BUF(softs, config_table_abs_addr,
+ softs->pqi_cap.conf_tab_off,
+ (uint8_t*)conf_table, config_table_size);
+
+ if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
+ sizeof(conf_table->sign)) != 0) {
+ DBG_ERR("Invalid PQI config signature\n");
+ goto out;
+ }
+
+ section_off = LE_32(conf_table->first_section_off);
+
+ while (section_off) {
+
+ if (section_off+ sizeof(*section_hdr) >= config_table_size) {
+ DBG_INFO("Reached end of PQI config table. Breaking off.\n");
+ break;
+ }
+
+ section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
+
+ switch (LE_16(section_hdr->section_id)) {
+ case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
+ break;
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
+ pqi_process_firmware_features_section(softs, section_hdr, (config_table_abs_addr + section_off));
+ break;
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
+ case PQI_CONF_TABLE_SECTION_DEBUG:
+ break;
+ case PQI_CONF_TABLE_SECTION_HEARTBEAT:
+ softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
+ section_off +
+ offsetof(struct pqi_conf_table_heartbeat, heartbeat_counter);
+ softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
+ softs->heartbeat_counter_off);
+ ret = PQI_STATUS_SUCCESS;
+ break;
+ case PQI_CONF_TABLE_SOFT_RESET:
+ break;
+ default:
+ DBG_NOTE("unrecognized PQI config table section ID: 0x%x\n",
+ LE_16(section_hdr->section_id));
+ break;
+ }
+ section_off = LE_16(section_hdr->next_section_off);
+ }
+out:
+ os_mem_free(softs, (void *)conf_table,config_table_size);
+ return ret;
+}
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_helper.h
copy from sys/dev/smartpqi/smartpqi_cmd.c
copy to sys/dev/smartpqi/smartpqi_helper.h
--- a/sys/dev/smartpqi/smartpqi_cmd.c
+++ b/sys/dev/smartpqi/smartpqi_helper.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,51 +24,43 @@
*/
-#include "smartpqi_includes.h"
+#ifndef _PQI_HELPER_H
+#define _PQI_HELPER_H
-/*
- * Function to submit the request to the adapter.
- */
-int
-pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
+inline uint64_t
+pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
- char *slot = NULL;
- uint32_t offset;
- iu_header_t *hdr = (iu_header_t *)req;
- uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
- int i = 0;
- DBG_FUNC("IN\n");
-
- PQI_LOCK(&ib_q->lock);
-
- /* Check queue full */
- if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) {
- DBG_WARN("OUT Q full\n");
- PQI_UNLOCK(&ib_q->lock);
- return PQI_STATUS_QFULL;
- }
-
- /* Get the slot */
- offset = ib_q->pi_local * ib_q->elem_size;
- slot = ib_q->array_virt_addr + offset;
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Increment device active io count by one*/
+ return OS_ATOMIC64_INC(&device->active_requests);
+#endif
+}
- /* Copy the IU */
- memcpy(slot, req, iu_len);
- DBG_INFO("IU : \n");
- for(i = 0; i< iu_len; i++)
- DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
+inline uint64_t
+pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Decrement device active io count by one*/
+ return OS_ATOMIC64_DEC(&device->active_requests);
+#endif
+}
- /* Update the local PI */
- ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem;
- DBG_INFO("ib_q->pi_local : %x IU size : %d\n",
- ib_q->pi_local, hdr->iu_length);
- DBG_INFO("*ib_q->ci_virt_addr: %x\n",
- *(ib_q->ci_virt_addr));
+inline void
+pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* Reset device count to Zero */
+ OS_ATOMIC64_INIT(&device->active_requests, 0);
+#endif
+}
- /* Inform the fw about the new IU */
- PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
- PQI_UNLOCK(&ib_q->lock);
- DBG_FUNC("OUT\n");
- return PQI_STATUS_SUCCESS;
+inline uint64_t
+pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* read device active count*/
+ return OS_ATOMIC64_READ(&device->active_requests);
+#endif
}
+#endif /* _PQI_HELPER_H */
diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c
--- a/sys/dev/smartpqi/smartpqi_helper.c
+++ b/sys/dev/smartpqi/smartpqi_helper.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,57 +26,6 @@
#include "smartpqi_includes.h"
-/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */
-void
-pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs)
-{
- int ret = PQI_STATUS_SUCCESS;
- uint32_t diags_options = 0;
- pqisrc_raid_req_t request;
-
- DBG_NOTE("IN\n");
-
- memset(&request, 0, sizeof(request));
- /* read diags options of controller */
- ret = pqisrc_build_send_raid_request(softs, &request,
- (void*)&diags_options,
- sizeof(diags_options),
- BMIC_SENSE_DIAGS_OPTIONS,
- 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
- if (ret != PQI_STATUS_SUCCESS) {
- DBG_WARN("Request failed for BMIC Sense Diags Option command."
- "ret:%d\n",ret);
- return;
- }
- DBG_NOTE("diags options data after read: %#x\n",diags_options);
- diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS;
- DBG_NOTE("diags options data to write: %#x\n",diags_options);
- memset(&request, 0, sizeof(request));
- /* write specified diags options to controller */
- ret = pqisrc_build_send_raid_request(softs, &request,
- (void*)&diags_options,
- sizeof(diags_options),
- BMIC_SET_DIAGS_OPTIONS,
- 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
- if (ret != PQI_STATUS_SUCCESS)
- DBG_WARN("Request failed for BMIC Set Diags Option command."
- "ret:%d\n",ret);
-#if 0
- diags_options = 0;
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request,
- (void*)&diags_options,
- sizeof(diags_options),
- BMIC_SENSE_DIAGS_OPTIONS,
- 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
- if (ret != PQI_STATUS_SUCCESS)
- DBG_WARN("Request failed for BMIC Sense Diags Option command."
- "ret:%d\n",ret);
- DBG_NOTE("diags options after re-read: %#x\n",diags_options);
-#endif
- DBG_NOTE("OUT\n");
-}
-
/*
* Function used to validate the adapter health.
*/
@@ -89,7 +38,6 @@
return !softs->ctrl_online;
}
-
/* Function used set/clear legacy INTx bit in Legacy Interrupt INTx
* mask clear pqi register
*/
@@ -97,20 +45,14 @@
pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
{
uint32_t intx_mask;
- uint32_t *reg_addr __unused;
-
- DBG_FUNC("IN\n");
- if (enable_intx)
- reg_addr = &softs->pqi_reg->legacy_intr_mask_clr;
- else
- reg_addr = &softs->pqi_reg->legacy_intr_mask_set;
+ DBG_FUNC("IN\n");
- intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR);
+ intx_mask = PCI_MEM_GET32(softs, 0, PQI_LEGACY_INTR_MASK_CLR);
intx_mask |= PQISRC_LEGACY_INTX_MASK;
- PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask);
+ PCI_MEM_PUT32(softs, 0, PQI_LEGACY_INTR_MASK_CLR ,intx_mask);
- DBG_FUNC("OUT\n");
+ DBG_FUNC("OUT\n");
}
/*
@@ -120,16 +62,14 @@
pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
{
pqi_scsi_dev_t *device = NULL;
- int i,j;
+ int i;
DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
- for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if(softs->device_list[i][j] == NULL)
- continue;
- device = softs->device_list[i][j];
- pqisrc_remove_device(softs, device);
- }
+ device = softs->dev_list[i];
+ if(device == NULL)
+ continue;
+ pqisrc_remove_device(softs, device);
}
DBG_FUNC("OUT\n");
@@ -143,17 +83,17 @@
{
DBG_FUNC("IN\n");
- softs->ctrl_online = false;
-
int lockupcode = 0;
+ softs->ctrl_online = false;
+
if (SIS_IS_KERNEL_PANIC(softs)) {
- lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
- DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
- }
- else {
- pqisrc_trigger_nmi_sis(softs);
- }
+ lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
+ DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
+ }
+ else {
+ pqisrc_trigger_nmi_sis(softs);
+ }
os_complete_outstanding_cmds_nodevice(softs);
pqisrc_wait_for_rescan_complete(softs);
@@ -169,23 +109,34 @@
pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
{
uint8_t take_offline = false;
+ uint64_t new_heartbeat;
+ static uint32_t running_ping_cnt = 0;
DBG_FUNC("IN\n");
- if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
+ new_heartbeat = CTRLR_HEARTBEAT_CNT(softs);
+ DBG_IO("heartbeat old=%lx new=%lx\n", softs->prev_heartbeat_count, new_heartbeat);
+
+ if (new_heartbeat == softs->prev_heartbeat_count) {
take_offline = true;
goto take_ctrl_offline;
}
- softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
- DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
- softs->prev_heartbeat_count = %lx\n",
- CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
+
+#if 1
+ /* print every 30 calls (should print once/minute) */
+ running_ping_cnt++;
+
+ if ((running_ping_cnt % 30) == 0)
+ print_all_counters(softs, COUNTER_FLAG_ONLY_NON_ZERO);
+#endif
+
+ softs->prev_heartbeat_count = new_heartbeat;
take_ctrl_offline:
if (take_offline){
DBG_ERR("controller is offline\n");
- pqisrc_take_ctrl_offline(softs);
os_stop_heartbeat_timer(softs);
+ pqisrc_take_ctrl_offline(softs);
}
DBG_FUNC("OUT\n");
}
@@ -253,7 +204,7 @@
boolean_t
pqisrc_is_hba_lunid(uint8_t *scsi3addr)
{
- return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
+ return pqisrc_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
@@ -287,8 +238,8 @@
"RAID 1(1+0)",
"RAID 5",
"RAID 5+1",
- "RAID ADG",
- "RAID 1(ADM)",
+ "RAID 6",
+ "RAID 1(Triple)",
};
/* Get the RAID level from the index */
@@ -417,6 +368,7 @@
}
+#if 0
uint32_t
pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
@@ -436,7 +388,7 @@
check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint32_t tag = softs->max_outstanding_io, active_requests;
- uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds
+ uint64_t timeout = 0, delay_in_usec = 1000; /* In micro Seconds */
rcb_t* rcb;
DBG_FUNC("IN\n");
@@ -451,7 +403,7 @@
do {
rcb = &softs->rcb[tag];
if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
- OS_BUSYWAIT(delay_in_usec);
+ OS_SLEEP(delay_in_usec);
timeout += delay_in_usec;
}
else
@@ -461,49 +413,25 @@
return;
}
} while(tag);
-
}
-
-inline uint64_t
-pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
-{
-#if PQISRC_DEVICE_IO_COUNTER
- /*Increment device active io count by one*/
- return OS_ATOMIC64_INC(&device->active_requests);
#endif
-}
-inline uint64_t
-pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
-{
-#if PQISRC_DEVICE_IO_COUNTER
- /*Decrement device active io count by one*/
- return OS_ATOMIC64_DEC(&device->active_requests);
-#endif
-}
+extern inline uint64_t
+pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
-inline void
-pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
-{
-#if PQISRC_DEVICE_IO_COUNTER
- /* Reset device count to Zero */
- OS_ATOMIC64_INIT(&device->active_requests, 0);
-#endif
-}
+extern inline uint64_t
+pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
-inline uint64_t
-pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
-{
-#if PQISRC_DEVICE_IO_COUNTER
- /* read device active count*/
- return OS_ATOMIC64_READ(&device->active_requests);
-#endif
-}
+extern inline void
+pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
+
+extern inline uint64_t
+pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
void
pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
- uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds
+ uint64_t timeout_in_usec = 0, delay_in_usec = 1000; /* In microseconds */
DBG_FUNC("IN\n");
@@ -511,16 +439,16 @@
return;
#if PQISRC_DEVICE_IO_COUNTER
- DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device));
+ DBG_WARN_BTL(device,"Device Outstanding IO count = %lu\n", pqisrc_read_device_active_io(softs, device));
while(pqisrc_read_device_active_io(softs, device)) {
- OS_BUSYWAIT(delay_in_usec); // In microseconds
+ OS_BUSYWAIT(delay_in_usec); /* In microseconds */
if(!softs->ctrl_online) {
DBG_WARN("Controller Offline was detected.\n");
}
timeout_in_usec += delay_in_usec;
if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
- DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n",
+ DBG_WARN_BTL(device,"timed out waiting for pending IO. DeviceOutStandingIo's=%lu\n",
pqisrc_read_device_active_io(softs, device));
return;
}
diff --git a/sys/dev/smartpqi/smartpqi_includes.h b/sys/dev/smartpqi/smartpqi_includes.h
--- a/sys/dev/smartpqi/smartpqi_includes.h
+++ b/sys/dev/smartpqi/smartpqi_includes.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -77,11 +77,10 @@
#include <vm/pmap.h>
-
#include "smartpqi_defines.h"
#include "smartpqi_structures.h"
#include "smartpqi_prototypes.h"
#include "smartpqi_ioctl.h"
+#include "smartpqi_helper.h"
-
-#endif // _PQI_INCLUDES_H
+#endif /* _PQI_INCLUDES_H*/
diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c
--- a/sys/dev/smartpqi/smartpqi_init.c
+++ b/sys/dev/smartpqi/smartpqi_init.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,9 +26,6 @@
#include "smartpqi_includes.h"
-/* 5 mins timeout for quiesce */
-#define PQI_QUIESCE_TIMEOUT 300000
-
/*
* Request the adapter to get PQI capabilities supported.
*/
@@ -36,7 +33,7 @@
pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
-
+
DBG_FUNC("IN\n");
gen_adm_req_iu_t admin_req;
@@ -49,7 +46,6 @@
capability = os_mem_alloc(softs, sizeof(*capability));
if (!capability) {
DBG_ERR("Failed to allocate memory for capability\n");
- ret = PQI_STATUS_FAILURE;
goto err_out;
}
@@ -57,7 +53,7 @@
memset(&admin_resp, 0, sizeof(admin_resp));
memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
- pqi_cap_dma_buf.tag = "pqi_cap_buf";
+ os_strlcpy(pqi_cap_dma_buf.tag, "pqi_cap_buf", sizeof(pqi_cap_dma_buf.tag));
pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
@@ -110,6 +106,12 @@
DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
+ /* Not expecting these to change, could cause problems if they do */
+ ASSERT(softs->pqi_dev_cap.max_iq_elem_len == PQISRC_OP_MAX_ELEM_SIZE);
+ ASSERT(softs->pqi_dev_cap.min_iq_elem_len == PQISRC_OP_MIN_ELEM_SIZE);
+ ASSERT(softs->max_ib_iu_length_per_fw == PQISRC_MAX_SPANNING_IU_LENGTH);
+ ASSERT(softs->ib_spanning_supported == true);
+
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
@@ -135,6 +137,7 @@
void
pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
{
+
uint32_t num_req;
size_t size;
int i;
@@ -152,6 +155,7 @@
/*
* Allocate memory for rcb and SG descriptors.
+ * TODO : Sg list should be created separately
*/
static int
pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
@@ -168,11 +172,11 @@
/* Set maximum outstanding requests */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb will be accessed by using the tag as index
- * As 0 tag index is not used, we need to allocate one extra.
+ * As 0 tag index is not used, we need to allocate one extra.
*/
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
num_req = softs->max_outstanding_io + 1;
- DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
+ DBG_INIT("Max Outstanding IO reset to %u\n", num_req);
alloc_size = num_req * sizeof(rcb_t);
@@ -192,9 +196,10 @@
prcb = &softs->rcb[1];
/* Initialize rcb */
for(i=1; i < num_req; i++) {
+ /* TODO:Here tag is local variable */
char tag[15];
sprintf(tag, "sg_dma_buf%d", i);
- softs->sg_dma_desc[i].tag = tag;
+ os_strlcpy(softs->sg_dma_desc[i].tag, tag, sizeof(softs->sg_dma_desc[i].tag));
softs->sg_dma_desc[i].size = sg_buf_size;
softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
@@ -231,7 +236,9 @@
DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
softs->intr_count, softs->num_cpus_online);
-
+
+ /* TODO : Get the number of IB and OB queues from OS layer */
+
if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
/* Share the event and Operational queue. */
softs->num_op_obq = 1;
@@ -244,32 +251,33 @@
softs->share_opq_and_eventq = false;
}
/* If the available interrupt count is more than one,
- we dont need to share the interrupt for IO and event queue */
+ we don’t need to share the interrupt for IO and event queue */
if (softs->intr_count > 1)
softs->share_opq_and_eventq = false;
- DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
+ DBG_INIT("softs->num_op_obq : %u\n",softs->num_op_obq);
+
+ /* TODO : Reset the interrupt count based on number of queues*/
softs->num_op_raid_ibq = softs->num_op_obq;
softs->num_op_aio_ibq = softs->num_op_raid_ibq;
- softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
- softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
+ softs->max_ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
+ softs->max_obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
if (softs->max_ib_iu_length_per_fw == 256 &&
softs->ob_spanning_supported) {
/* older f/w that doesn't actually support spanning. */
- softs->max_ib_iu_length = softs->ibq_elem_size;
+ softs->max_ib_iu_length = softs->max_ibq_elem_size;
} else {
/* max. inbound IU length is an multiple of our inbound element size. */
- softs->max_ib_iu_length =
- (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
- softs->ibq_elem_size;
-
+ softs->max_ib_iu_length = PQISRC_ROUND_DOWN(softs->max_ib_iu_length_per_fw,
+ softs->max_ibq_elem_size);
}
+
/* If Max. Outstanding IO came with Max. Spanning element count then,
needed elements per IO are multiplication of
Max.Outstanding IO and Max.Spanning element */
total_iq_elements = (softs->max_outstanding_io *
- (softs->max_ib_iu_length / softs->ibq_elem_size));
+ (softs->max_ib_iu_length / softs->max_ibq_elem_size));
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
@@ -279,15 +287,23 @@
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
softs->pqi_dev_cap.max_oq_elements);
- softs->max_sg_per_iu = ((softs->max_ib_iu_length -
- softs->ibq_elem_size) /
- sizeof(sgt_t)) +
- MAX_EMBEDDED_SG_IN_FIRST_IU;
+ /* spanning elements should be 9 (1152/128) */
+ softs->max_spanning_elems = softs->max_ib_iu_length/softs->max_ibq_elem_size;
+ ASSERT(softs->max_spanning_elems == PQISRC_MAX_SPANNING_ELEMS);
+
+ /* max SGs should be 8 (128/16) */
+ softs->max_sg_per_single_iu_element = softs->max_ibq_elem_size / sizeof(sgt_t);
+ ASSERT(softs->max_sg_per_single_iu_element == MAX_EMBEDDED_SG_IN_IU);
- DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
- DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
- DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
- DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
+ /* max SGs for spanning cmd should be 68*/
+ softs->max_sg_per_spanning_cmd = (softs->max_spanning_elems - 1) * softs->max_sg_per_single_iu_element;
+ softs->max_sg_per_spanning_cmd += MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
+
+ DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); /* 1152 per FW advertisement */
+ DBG_INIT("softs->num_elem_per_op_ibq: %u\n", softs->num_elem_per_op_ibq); /* 32 for xcal */
+ DBG_INIT("softs->num_elem_per_op_obq: %u\n", softs->num_elem_per_op_obq); /* 256 for xcal */
+ DBG_INIT("softs->max_spanning_elems: %d\n", softs->max_spanning_elems); /* 9 */
+ DBG_INIT("softs->max_sg_per_spanning_cmd: %u\n", softs->max_sg_per_spanning_cmd); /* 68 until we add AIO writes */
DBG_FUNC("OUT\n");
}
@@ -384,293 +400,6 @@
return ret;
}
-/* PQI Feature processing */
-static int
-pqisrc_config_table_update(struct pqisrc_softstate *softs,
- uint16_t first_section, uint16_t last_section)
-{
- pqi_vendor_general_request_t request;
- int ret = PQI_STATUS_FAILURE;
-
- memset(&request, 0, sizeof(request));
-
- request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
- request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
- request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
- request.data.config_table_update.first_section = first_section;
- request.data.config_table_update.last_section = last_section;
-
- ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
-
- if (ret != PQI_STATUS_SUCCESS) {
- DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
- return PQI_STATUS_FAILURE;
- }
-
- return PQI_STATUS_SUCCESS;
-}
-
-static inline
-boolean_t pqi_is_firmware_feature_supported(
- struct pqi_conf_table_firmware_features *firmware_feature_list,
- unsigned int bit_position)
-{
- unsigned int byte_index;
-
- byte_index = bit_position / BITS_PER_BYTE;
-
- if (byte_index >= firmware_feature_list->num_elements)
- return false;
-
- return firmware_feature_list->features_supported[byte_index] &
- (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
-}
-
-static inline
-boolean_t pqi_is_firmware_feature_enabled(
- struct pqi_conf_table_firmware_features *firmware_feature_list,
- uint8_t *firmware_features_addr, unsigned int bit_position)
-{
- unsigned int byte_index;
- uint8_t *feature_enabled_addr;
-
- byte_index = (bit_position / BITS_PER_BYTE) +
- (firmware_feature_list->num_elements * 2);
-
- feature_enabled_addr = firmware_features_addr +
- offsetof(struct pqi_conf_table_firmware_features,
- features_supported) + byte_index;
-
- return *feature_enabled_addr &
- (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
-}
-
-static inline void
-pqi_request_firmware_feature(
- struct pqi_conf_table_firmware_features *firmware_feature_list,
- unsigned int bit_position)
-{
- unsigned int byte_index;
-
- byte_index = (bit_position / BITS_PER_BYTE) +
- firmware_feature_list->num_elements;
-
- firmware_feature_list->features_supported[byte_index] |=
- (1 << (bit_position % BITS_PER_BYTE));
-}
-
-/* Update PQI config table firmware features section and inform the firmware */
-static int
-pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
- struct pqi_conf_table_firmware_features *firmware_feature_list)
-{
- uint8_t *request_feature_addr;
- void *request_feature_abs_addr;
-
- request_feature_addr = firmware_feature_list->features_supported +
- firmware_feature_list->num_elements;
- request_feature_abs_addr = softs->fw_features_section_abs_addr +
- (request_feature_addr - (uint8_t*)firmware_feature_list);
-
- os_io_memcpy(request_feature_abs_addr, request_feature_addr,
- firmware_feature_list->num_elements);
-
- return pqisrc_config_table_update(softs,
- PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
- PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
-}
-
-/* Check firmware has enabled the feature specified in the respective bit position. */
-inline boolean_t
-pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
- struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
-{
- uint16_t byte_index;
- uint8_t *features_enabled_abs_addr;
-
- byte_index = (bit_position / BITS_PER_BYTE) +
- (firmware_feature_list->num_elements * 2);
-
- features_enabled_abs_addr = softs->fw_features_section_abs_addr +
- offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
-
- return *features_enabled_abs_addr &
- (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
-}
-
-static void
-pqi_firmware_feature_status(struct pqisrc_softstate *softs,
- struct pqi_firmware_feature *firmware_feature)
-{
- switch(firmware_feature->feature_bit) {
- case PQI_FIRMWARE_FEATURE_OFA:
- break;
- case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
- softs->timeout_in_passthrough = true;
- break;
- case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
- softs->timeout_in_tmf = true;
- break;
- default:
- DBG_NOTE("Nothing to do \n");
- }
-}
-
-/* Firmware features supported by the driver */
-static struct
-pqi_firmware_feature pqi_firmware_features[] = {
- {
- .feature_name = "Support timeout for pass-through commands",
- .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
- .feature_status = pqi_firmware_feature_status,
- },
- {
- .feature_name = "Support timeout for LUN Reset TMF",
- .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
- .feature_status = pqi_firmware_feature_status,
- }
-};
-
-static void
-pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
-{
- int rc;
- struct pqi_conf_table_firmware_features *firmware_feature_list;
- unsigned int i;
- unsigned int num_features_requested;
-
- firmware_feature_list = (struct pqi_conf_table_firmware_features*)
- softs->fw_features_section_abs_addr;
-
- /* Check features and request those supported by firmware and driver.*/
- for (i = 0, num_features_requested = 0;
- i < ARRAY_SIZE(pqi_firmware_features); i++) {
- /* Firmware support it ? */
- if (pqi_is_firmware_feature_supported(firmware_feature_list,
- pqi_firmware_features[i].feature_bit)) {
- pqi_request_firmware_feature(firmware_feature_list,
- pqi_firmware_features[i].feature_bit);
- pqi_firmware_features[i].supported = true;
- num_features_requested++;
- DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
- pqi_firmware_features[i].feature_name);
- } else {
- DBG_NOTE("%s supported by driver, but not by current firmware\n",
- pqi_firmware_features[i].feature_name);
- }
- }
- if (num_features_requested == 0)
- return;
-
- rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
- if (rc) {
- DBG_ERR("Failed to update pqi config table\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
- if (pqi_is_firmware_feature_enabled(firmware_feature_list,
- softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
- pqi_firmware_features[i].enabled = true;
- DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
- if(pqi_firmware_features[i].feature_status)
- pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
- }
- }
-}
-
-/*
- * Get the PQI configuration table parameters.
- * Currently using for heart-beat counter scratch-pad register.
- */
-int
-pqisrc_process_config_table(pqisrc_softstate_t *softs)
-{
- int ret = PQI_STATUS_FAILURE;
- uint32_t config_table_size;
- uint32_t section_off;
- uint8_t *config_table_abs_addr __unused;
- struct pqi_conf_table *conf_table;
- struct pqi_conf_table_section_header *section_hdr;
-
- config_table_size = softs->pqi_cap.conf_tab_sz;
-
- if (config_table_size < sizeof(*conf_table) ||
- config_table_size > PQI_CONF_TABLE_MAX_LEN) {
- DBG_ERR("Invalid PQI conf table length of %u\n",
- config_table_size);
- return ret;
- }
-
- conf_table = os_mem_alloc(softs, config_table_size);
- if (!conf_table) {
- DBG_ERR("Failed to allocate memory for PQI conf table\n");
- return ret;
- }
-
- if (config_table_size < sizeof(conf_table) ||
- config_table_size > PQI_CONF_TABLE_MAX_LEN) {
- DBG_ERR("Invalid PQI conf table length of %u\n",
- config_table_size);
- goto out;
- }
-
- config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
- softs->pqi_cap.conf_tab_off);
-
- PCI_MEM_GET_BUF(softs, config_table_abs_addr,
- softs->pqi_cap.conf_tab_off,
- (uint8_t*)conf_table, config_table_size);
-
-
- if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
- sizeof(conf_table->sign)) != 0) {
- DBG_ERR("Invalid PQI config signature\n");
- goto out;
- }
-
- section_off = LE_32(conf_table->first_section_off);
-
- while (section_off) {
-
- if (section_off+ sizeof(*section_hdr) >= config_table_size) {
- DBG_INFO("Reached end of PQI config table. Breaking off.\n");
- break;
- }
-
- section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
-
- switch (LE_16(section_hdr->section_id)) {
- case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
- case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
- case PQI_CONF_TABLE_SECTION_DEBUG:
- break;
- case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
- softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
- softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
- pqisrc_process_firmware_features(softs);
- break;
- case PQI_CONF_TABLE_SECTION_HEARTBEAT:
- softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
- section_off +
- offsetof(struct pqi_conf_table_heartbeat,
- heartbeat_counter);
- softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
- softs->heartbeat_counter_off);
- ret = PQI_STATUS_SUCCESS;
- break;
- default:
- DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
- LE_16(section_hdr->section_id));
- break;
- }
- section_off = LE_16(section_hdr->next_section_off);
- }
-out:
- os_mem_free(softs, (void *)conf_table,config_table_size);
- return ret;
-}
-
/* Wait for PQI reset completion for the adapter*/
int
pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
@@ -721,6 +450,7 @@
val |= SIS_PQI_RESET_QUIESCE;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(val));
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
if (ret) {
DBG_ERR("failed with error %d during quiesce\n", ret);
@@ -734,6 +464,7 @@
PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
LE_32(pqi_reset_reg.all_bits));
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
ret = pqisrc_wait_for_pqi_reset_completion(softs);
if (ret) {
@@ -827,10 +558,9 @@
/* Create Operational queues */
ret = pqisrc_create_op_queues(softs);
if(ret) {
- DBG_ERR("Failed to create op queue\n");
- ret = PQI_STATUS_FAILURE;
- goto err_create_opq;
- }
+ DBG_ERR("Failed to create op queue\n");
+ goto err_create_opq;
+ }
softs->ctrl_online = true;
@@ -851,13 +581,14 @@
return PQI_STATUS_FAILURE;
}
+/* */
int
pqisrc_force_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
if (SIS_IS_KERNEL_PANIC(softs)) {
- DBG_INIT("Controller FW is not running");
+ DBG_ERR("Controller FW is not running\n");
return PQI_STATUS_FAILURE;
}
@@ -888,18 +619,22 @@
return ret;
}
-static int
+/* 5 mins timeout for quiesce */
+#define PQI_QUIESCE_TIMEOUT 300000
+
+int
pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
{
+
int count = 0;
int ret = PQI_STATUS_SUCCESS;
- DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
+ DBG_NOTE("softs->taglist.num_elem : %u",softs->taglist.num_elem);
if (softs->taglist.num_elem == softs->max_outstanding_io)
return ret;
else {
- DBG_WARN("%d commands pending\n",
+ DBG_WARN("%u commands pending\n",
softs->max_outstanding_io - softs->taglist.num_elem);
while(1) {
@@ -928,7 +663,7 @@
return ret;
}
-static void
+void
pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
{
@@ -938,7 +673,7 @@
for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
rcb = &softs->rcb[tag];
if(rcb->req_pending && is_internal_req(rcb)) {
- rcb->status = REQUEST_FAILED;
+ rcb->status = PQI_STATUS_TIMEOUT;
rcb->req_pending = false;
}
}
@@ -951,7 +686,7 @@
void
pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
{
- int i, ret;
+ int ret;
DBG_FUNC("IN\n");
@@ -975,25 +710,10 @@
softs->devlist_lockcreated = false;
}
- for (i = 0; i < softs->num_op_raid_ibq; i++) {
- /* OP RAID IB Q */
- if(softs->op_raid_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
- softs->op_raid_ib_q[i].lockcreated = false;
- }
- /* OP AIO IB Q */
- if(softs->op_aio_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
- softs->op_aio_ib_q[i].lockcreated = false;
- }
- }
-
- /* Free Op queues */
- os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
- os_dma_mem_free(softs, &softs->op_obq_dma_mem);
- os_dma_mem_free(softs, &softs->event_q_dma_mem);
-
-
+ /* Free all queues */
+ pqisrc_destroy_op_ib_queues(softs);
+ pqisrc_destroy_op_ob_queues(softs);
+ pqisrc_destroy_event_queue(softs);
/* Free rcb */
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
@@ -1001,13 +721,8 @@
/* Free request id lists */
pqisrc_destroy_taglist(softs,&softs->taglist);
- if(softs->admin_ib_queue.lockcreated==true) {
- OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
- softs->admin_ib_queue.lockcreated = false;
- }
-
/* Free Admin Queue */
- os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+ pqisrc_destroy_admin_queue(softs);
/* Switch back to SIS mode */
if (pqisrc_force_sis(softs)) {
@@ -1017,6 +732,30 @@
DBG_FUNC("OUT\n");
}
+
+/*
+ * Function to do any sanity checks for OS macros
+ */
+void
+sanity_check_os_behavior(pqisrc_softstate_t *softs)
+{
+#ifdef OS_ATOMIC64_INC
+ OS_ATOMIC64_T atomic_test_var = 0;
+ OS_ATOMIC64_T atomic_ret = 0;
+
+ atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
+ ASSERT(atomic_ret == 1);
+
+ atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
+ ASSERT(atomic_ret == 2);
+
+ atomic_ret = OS_ATOMIC64_DEC(&atomic_test_var);
+ ASSERT(atomic_ret == 1);
+#else
+ DBG_INIT("OS needs to define/implement atomic macros\n");
+#endif
+}
+
/*
* Function to initialize the adapter settings.
*/
@@ -1024,25 +763,54 @@
pqisrc_init(pqisrc_softstate_t *softs)
{
int ret = 0;
- int i = 0, j = 0;
+ uint32_t ctrl_type;
DBG_FUNC("IN\n");
+ sanity_check_os_behavior(softs);
+
check_struct_sizes();
- /* Init the Sync interface */
- ret = pqisrc_sis_init(softs);
- if (ret) {
- DBG_ERR("SIS Init failed with error %d\n", ret);
+ /*Get verbose flags, defined in OS code XX_debug.h or so*/
+#ifdef DISABLE_ERR_RESP_VERBOSE
+ softs->err_resp_verbose = false;
+#else
+ softs->err_resp_verbose = true;
+#endif
+
+ /* prevent attachment of revA hardware. */
+ ctrl_type = PQI_GET_CTRL_TYPE(softs);
+ if (ctrl_type == PQI_CTRL_PRODUCT_ID_GEN2_REV_A) {
+ DBG_ERR("adapter at B.D.F=%u.%u.%u: unsupported RevA card.\n",
+ softs->bus_id, softs->device_id, softs->func_id);
+ ret = PQI_STATUS_FAILURE;
goto err_out;
}
+ /* Increment the global adapter ID and tie it to this BDF */
+#ifdef OS_ATOMIC64_INC
+ static OS_ATOMIC64_T g_adapter_cnt = 0;
+ softs->adapter_num = (uint8_t)OS_ATOMIC64_INC(&g_adapter_cnt);
+#else
+ static uint64_t g_adapter_cnt = 0;
+ softs->adapter_num = (uint8_t)++g_adapter_cnt;
+#endif
+ DBG_NOTE("Initializing adapter %u\n", (uint32_t)softs->adapter_num);
+
ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
if(ret != PQI_STATUS_SUCCESS){
DBG_ERR(" Failed to initialize scan lock\n");
- goto err_scan_lock;
+ goto err_out;
}
+ /* Init the Sync interface */
+ ret = pqisrc_sis_init(softs);
+ if (ret) {
+ DBG_ERR("SIS Init failed with error %d\n", ret);
+ goto err_sis;
+ }
+
+
/* Init the PQI interface */
ret = pqisrc_pqi_init(softs);
if (ret) {
@@ -1058,25 +826,25 @@
}
/* Report event configuration */
- ret = pqisrc_report_event_config(softs);
- if(ret){
- DBG_ERR(" Failed to configure Report events\n");
+ ret = pqisrc_report_event_config(softs);
+ if(ret){
+ DBG_ERR(" Failed to configure Report events\n");
goto err_event;
}
/* Set event configuration*/
- ret = pqisrc_set_event_config(softs);
- if(ret){
- DBG_ERR(" Failed to configure Set events\n");
- goto err_event;
- }
+ ret = pqisrc_set_event_config(softs);
+ if(ret){
+ DBG_ERR(" Failed to configure Set events\n");
+ goto err_event;
+ }
/* Check for For PQI spanning */
ret = pqisrc_get_ctrl_fw_version(softs);
- if(ret){
- DBG_ERR(" Failed to get ctrl fw version\n");
- goto err_fw_version;
- }
+ if(ret){
+ DBG_ERR(" Failed to get ctrl fw version\n");
+ goto err_fw_version;
+ }
/* update driver version in to FW */
ret = pqisrc_write_driver_version_to_host_wellness(softs);
@@ -1085,6 +853,12 @@
goto err_host_wellness;
}
+ /* Setup sense features */
+ ret = pqisrc_QuerySenseFeatures(softs);
+ if (ret) {
+ DBG_ERR("Failed to get sense features\n");
+ goto err_sense;
+ }
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
@@ -1104,12 +878,8 @@
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
- /* Init device list */
- for(i = 0; i < PQI_MAX_DEVICES; i++)
- for(j = 0; j < PQI_MAX_MULTILUN; j++)
- softs->device_list[i][j] = NULL;
-
- pqisrc_init_targetid_pool(softs);
+ memset(softs->dev_list, 0, sizeof(*softs->dev_list));
+ pqisrc_init_bitmap(softs);
DBG_FUNC("OUT\n");
return ret;
@@ -1124,11 +894,12 @@
err_event:
err_host_wellness:
err_intr:
+err_sense:
pqisrc_pqi_uninit(softs);
err_pqi:
- os_destroy_semaphore(&softs->scan_lock);
-err_scan_lock:
pqisrc_sis_uninit(softs);
+err_sis:
+ os_destroy_semaphore(&softs->scan_lock);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
@@ -1162,16 +933,18 @@
memset(&request, 0, sizeof(request));
- rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
- sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
- (uint8_t *)RAID_CTLR_LUNID, NULL);
+ request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ request.cmd.bmic_cdb.op_code = BMIC_WRITE;
+ request.cmd.bmic_cdb.cmd = BMIC_CACHE_FLUSH;
+ request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*flush_buff));
+
+ rval = pqisrc_prepare_send_ctrlr_request(softs, &request, flush_buff, sizeof(*flush_buff));
+
if (rval) {
DBG_ERR("error in build send raid req ret=%d\n", rval);
}
- if (flush_buff)
- os_mem_free(softs, (void *)flush_buff,
- sizeof(pqisrc_bmic_flush_cache_t));
+ os_mem_free(softs, (void *)flush_buff, sizeof(pqisrc_bmic_flush_cache_t));
DBG_FUNC("OUT\n");
diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c
--- a/sys/dev/smartpqi/smartpqi_intr.c
+++ b/sys/dev/smartpqi/smartpqi_intr.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,7 @@
#include "smartpqi_includes.h"
+
/*
* Function to get processor count
*/
@@ -34,6 +35,7 @@
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
+ bsd_set_hint_adapter_cpu_config(softs);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
@@ -86,10 +88,10 @@
softs->intr_count = 1;
}
- DBG_FUNC("OUT\n");
-
error = bsd_status_to_pqi_status(BSD_SUCCESS);
+ DBG_FUNC("OUT\n");
+
return error;
}
@@ -117,7 +119,7 @@
DBG_FUNC("IN\n");
- if (softs == NULL)
+ if (!softs)
return;
pqisrc_process_response_queue(softs, oq_id);
@@ -138,7 +140,7 @@
DBG_FUNC("IN\n");
- if (softs == NULL)
+ if (!softs)
return;
pqisrc_process_response_queue(softs, oq_id);
@@ -155,7 +157,7 @@
DBG_FUNC("IN\n");
- if (softs == NULL)
+ if (!softs)
return;
pqisrc_process_event_intr_src(softs, oq_id);
@@ -170,10 +172,12 @@
register_legacy_intr(pqisrc_softstate_t *softs)
{
int error = BSD_SUCCESS;
- device_t dev = softs->os_specific.pqi_dev;
+ device_t dev;
DBG_FUNC("IN\n");
+ dev = softs->os_specific.pqi_dev;
+
softs->os_specific.pqi_irq_rid[0] = 0;
softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
@@ -216,12 +220,13 @@
int i = 0;
device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
+ size_t msix_size = sizeof(pqi_intr_ctx_t) * msix_count;
DBG_FUNC("IN\n");
- softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
+ softs->os_specific.msi_ctx = os_mem_alloc(softs, msix_size);
if (!softs->os_specific.msi_ctx) {
- DBG_ERR("Memory allocation failed\n");
+ DBG_ERR("Memory allocation failed, Requested memory:%lu bytes\n", (unsigned long)msix_size);
return ENXIO;
}
@@ -282,7 +287,7 @@
return error;
}
softs->os_specific.intr_registered[i] = TRUE;
- /* Add interrupt handlers*/
+ /* Add interrupt handlers*/
for (i = 1; i < msix_count; ++i) {
softs->os_specific.pqi_irq_rid[i] = i+1;
softs->os_specific.pqi_irq[i] = \
@@ -335,7 +340,7 @@
bsd_status = register_msix_intr(softs);
}
- if(bsd_status)
+ if (bsd_status)
DBG_WARN("interrupt registration is failed, error = %d\n", bsd_status);
pqi_status = bsd_status_to_pqi_status(bsd_status);
@@ -422,8 +427,8 @@
if (softs->os_specific.msi_enabled) {
pci_release_msi(dev);
softs->os_specific.msi_enabled = FALSE;
- }
-
+ }
+
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.h b/sys/dev/smartpqi/smartpqi_ioctl.h
--- a/sys/dev/smartpqi/smartpqi_ioctl.h
+++ b/sys/dev/smartpqi/smartpqi_ioctl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
/* IOCTL passthrough macros and structures */
#define SENSEINFOBYTES 32 /* note that this value may vary
- between host implementations */
+ between host implementations */
/* transfer direction */
#define PQIIOCTL_NONE 0x00
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c
--- a/sys/dev/smartpqi/smartpqi_ioctl.c
+++ b/sys/dev/smartpqi/smartpqi_ioctl.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -51,7 +51,7 @@
}
/*
- * Device open function for ioctl entry
+ * Device open function for ioctl entry
*/
static int
smartpqi_open(struct cdev *cdev, int flags, int devtype,
@@ -81,10 +81,10 @@
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
- driver_info->major_version = PQISRC_OS_VERSION;
- driver_info->minor_version = PQISRC_FEATURE_VERSION;
- driver_info->release_version = PQISRC_PATCH_VERSION;
- driver_info->build_revision = PQISRC_BUILD_VERSION;
+ driver_info->major_version = PQISRC_DRIVER_MAJOR;
+ driver_info->minor_version = PQISRC_DRIVER_MINOR;
+ driver_info->release_version = PQISRC_DRIVER_RELEASE;
+ driver_info->build_revision = PQISRC_DRIVER_REVISION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
driver_info->max_io = softs->max_io_for_scsi_ml;
driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
@@ -117,6 +117,7 @@
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
+
DBG_FUNC("OUT\n");
}
@@ -175,6 +176,7 @@
}
DBG_FUNC("OUT error = %d\n", bsd_status);
+
return bsd_status;
}
@@ -232,7 +234,7 @@
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
{
- int ret = PQI_STATUS_SUCCESS;
+ int ret;
char *drv_buf = NULL;
uint32_t tag = 0;
IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
@@ -246,18 +248,18 @@
memset(&request, 0, sizeof(request));
memset(&error_info, 0, sizeof(error_info));
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
if (!arg)
- return (PQI_STATUS_FAILURE);
+ return PQI_STATUS_FAILURE;
if (iocommand->buf_size < 1 &&
iocommand->Request.Type.Direction != PQIIOCTL_NONE)
return PQI_STATUS_FAILURE;
- if (iocommand->Request.CDBLen > sizeof(request.cdb))
+ if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb))
return PQI_STATUS_FAILURE;
switch (iocommand->Request.Type.Direction) {
@@ -272,26 +274,23 @@
if (iocommand->buf_size > 0) {
memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
- ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
+ os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag));
ioctl_dma_buf.size = iocommand->buf_size;
ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
/* allocate memory */
ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
if (ret) {
DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
- ret = PQI_STATUS_FAILURE;
goto out;
}
- DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
- DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
+ DBG_IO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
+ DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
drv_buf = (char *)ioctl_dma_buf.virt_addr;
if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
- if ((ret = os_copy_from_user(softs, (void *)drv_buf,
- (void *)iocommand->buf,
- iocommand->buf_size, mode)) != 0) {
- ret = PQI_STATUS_FAILURE;
+ ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode);
+ if (ret != 0) {
goto free_mem;
}
}
@@ -302,7 +301,7 @@
PQI_REQUEST_HEADER_LENGTH;
memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
sizeof(request.lun_number));
- memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
+ memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
switch (iocommand->Request.Type.Direction) {
@@ -330,7 +329,6 @@
tag = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == tag) {
DBG_ERR("Tag not available\n");
- ret = PQI_STATUS_FAILURE;
goto free_mem;
}
request.request_id = tag;
@@ -352,8 +350,7 @@
goto err_out;
}
- ret = pqisrc_wait_on_condition(softs, rcb,
- PQISRC_PASSTHROUGH_CMD_TIMEOUT);
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Passthru IOCTL cmd timed out !!\n");
goto err_out;
@@ -386,29 +383,29 @@
iocommand->error_info.SenseLen = sense_data_length;
}
- if (error_info.data_out_result ==
- PQI_RAID_DATA_IN_OUT_UNDERFLOW){
- rcb->status = REQUEST_SUCCESS;
+ if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
+ rcb->status = PQI_STATUS_SUCCESS;
}
}
- if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
+ if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 &&
(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
- if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
- (void*)drv_buf, iocommand->buf_size, mode)) != 0) {
- DBG_ERR("Failed to copy the response\n");
- goto err_out;
+ ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode);
+ if (ret != 0) {
+ DBG_ERR("Failed to copy the response\n");
+ goto err_out;
}
}
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
if (iocommand->buf_size > 0)
- os_dma_mem_free(softs,&ioctl_dma_buf);
+ os_dma_mem_free(softs,&ioctl_dma_buf);
DBG_FUNC("OUT\n");
- return ret;
+ return PQI_STATUS_SUCCESS;
+
err_out:
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c
--- a/sys/dev/smartpqi/smartpqi_main.c
+++ b/sys/dev/smartpqi/smartpqi_main.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,7 +29,6 @@
*/
#include "smartpqi_includes.h"
-#include "smartpqi_prototypes.h"
CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS);
@@ -60,6 +59,7 @@
{0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
+ {0x9005, 0x028f, 0x9005, 0x659, PQI_HWIF_SRCV, "2100C8iOXS"},
/* (MSCC PM8221 8x12G based) */
{0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
@@ -67,7 +67,7 @@
{0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
{0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
{0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
-
+ {0x9005, 0x028f, 0x193d, 0x1109, PQI_HWIF_SRCV, "UN RAID P4408-Mr-8i-2GB"},
/* (MSCC PM8204 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
@@ -81,14 +81,15 @@
{0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
{0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
{0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
- {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"},
- {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "PM8204-2GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "PM8204-4GB"},
{0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
{0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
{0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
{0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
{0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
{0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
+ {0x9005, 0x028f, 0x1cc4, 0x0101, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8204"},
/* (MSCC PM8222 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
@@ -105,12 +106,13 @@
{0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
{0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
{0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
- {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"},
+ {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"},
{0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
- {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"},
+ {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"},
{0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
- {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "INSPUR RS0800M5E8i"},
- {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "INSPUR RS0800M5H8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"},
+ {0x9005, 0x028f, 0x1cc4, 0x0201, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8222"},
/* (SRCx MSCC FVB 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
@@ -124,17 +126,26 @@
{0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
{0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
{0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"},
- {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8242-24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"},
/* (MSCC PM8236 16x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
{0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
- {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"},
{0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B27, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B-18i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B45, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B_L-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x5445, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x5446, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242-18i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"},
+ {0x9005, 0x028f, 0x1bd4, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"},
+
+
/* (MSCC PM8237 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
@@ -145,18 +156,24 @@
{0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
{0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
{0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
- {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"},
{0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
{0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
{0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B29, PQI_HWIF_SRCV, "ZTE SmartIOC2100 SDPSA/B_I-18i"},
+ {0x9005, 0x028f, 0x1cf2, 0x5447, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243-18i"},
+ {0x9005, 0x028f, 0x1cf2, 0x544B, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RS243-18i"},
{0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"},
/* (MSCC PM8240 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
{0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
{0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
{0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
- {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"},
+ {0x9005, 0x028f, 0x1dfc, 0x3161, PQI_HWIF_SRCV, "NTCOM SAS3 RAID-24i"},
{0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"},
/* Huawei ID's */
@@ -166,22 +183,127 @@
{0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
{0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
{0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
+
+ /* (MSCC PM8252 8x12G based) */
+ {0x9005, 0x028f, 0x193d, 0x110b, PQI_HWIF_SRCV, "UN HBA H4508-Mf-8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"},
+ {0x9005, 0x028f, 0x1bd4, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"},
+ {0x9005, 0x028f, 0x1bd4, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"},
+ {0x9005, 0x028f, 0x1bd4, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"},
+ {0x9005, 0x028f, 0x1f51, 0x1001, PQI_HWIF_SRCV, "SmartHBA P6600-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1003, PQI_HWIF_SRCV, "SmartHBA P6600-8e"},
+ {0x9005, 0x028f, 0x9005, 0x1460, PQI_HWIF_SRCV, "HBA 1200"},
+ {0x9005, 0x028f, 0x9005, 0x1461, PQI_HWIF_SRCV, "SmartHBA 2200"},
+ {0x9005, 0x028f, 0x9005, 0x1462, PQI_HWIF_SRCV, "HBA 1200-8i"},
+
/* (MSCC PM8254 32x12G based) */
+ {0x9005, 0x028f, 0x1bd4, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"},
+ {0x9005, 0x028f, 0x1f51, 0x1002, PQI_HWIF_SRCV, "SmartRAID P7604-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1004, PQI_HWIF_SRCV, "SmartRAID P7604-8e"},
+ {0x9005, 0x028f, 0x9005, 0x14a0, PQI_HWIF_SRCV, "SmartRAID 3254-8i"},
+ {0x9005, 0x028f, 0x9005, 0x14a1, PQI_HWIF_SRCV, "SmartRAID 3204-8i"},
{0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"},
{0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"},
{0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"},
{0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"},
-/* (MSCC PM8265 16x12G based) */
+
+ /* (MSCC PM8262 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14c0, PQI_HWIF_SRCV, "SmartHBA 2200-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14c1, PQI_HWIF_SRCV, "HBA 1200-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14c3, PQI_HWIF_SRCV, "HBA 1200-16e"},
+ {0x9005, 0x028f, 0x9005, 0x14c4, PQI_HWIF_SRCV, "HBA 1200-8e"},
+ {0x9005, 0x028f, 0x1f51, 0x1005, PQI_HWIF_SRCV, "SmartHBA P6600-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1007, PQI_HWIF_SRCV, "SmartHBA P6600-8i8e"},
+ {0x9005, 0x028f, 0x1f51, 0x1009, PQI_HWIF_SRCV, "SmartHBA P6600-16e"},
+ {0x9005, 0x028f, 0x1cf2, 0x54dc, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RM346-16i"},
+ {0x9005, 0x028f, 0x1cf2, 0x0806, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RS346-16i"},
+
+ /* (MSCC PM8264 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14b0, PQI_HWIF_SRCV, "SmartRAID 3254-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14b1, PQI_HWIF_SRCV, "SmartRAID 3258-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1006, PQI_HWIF_SRCV, "SmartRAID P7608-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1008, PQI_HWIF_SRCV, "SmartRAID P7608-8i8e"},
+ {0x9005, 0x028f, 0x1f51, 0x100a, PQI_HWIF_SRCV, "SmartRAID P7608-16e"},
+ {0x9005, 0x028f, 0x1cf2, 0x54da, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM344-16i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x54db, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM345-16i 8G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0804, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS344-16i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0805, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS345-16i 8G"},
+
+ /* (MSCC PM8265 16x12G based) */
+ {0x9005, 0x028f, 0x1590, 0x02dc, PQI_HWIF_SRCV, "SR416i-a Gen10+"},
+ {0x9005, 0x028f, 0x9005, 0x1470, PQI_HWIF_SRCV, "SmartRAID 3200"},
+ {0x9005, 0x028f, 0x9005, 0x1471, PQI_HWIF_SRCV, "SmartRAID 3254-16i /e"},
+ {0x9005, 0x028f, 0x9005, 0x1472, PQI_HWIF_SRCV, "SmartRAID 3258-16i /e"},
+ {0x9005, 0x028f, 0x9005, 0x1473, PQI_HWIF_SRCV, "SmartRAID 3284-16io /e/uC"},
{0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"},
-/* (MSCC PM8270 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1475, PQI_HWIF_SRCV, "SmartRAID 3254-16e /e"},
+
+ /* (MSCC PM8266 16x12G based) */
+ {0x9005, 0x028f, 0x1014, 0x0718, PQI_HWIF_SRCV, "IBM 4-Port 24G SAS"},
+ {0x9005, 0x028f, 0x9005, 0x1490, PQI_HWIF_SRCV, "HBA 1200p Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1491, PQI_HWIF_SRCV, "SmartHBA 2200p Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1402, PQI_HWIF_SRCV, "HBA Ultra 1200P-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1441, PQI_HWIF_SRCV, "HBA Ultra 1200P-32i"},
+
+ /* (MSCC PM8268 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14d0, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i"},
+
+ /* (MSCC PM8269 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1400, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i /e"},
+
+ /* (MSCC PM8270 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1410, PQI_HWIF_SRCV, "HBA Ultra 1200P-16e"},
+ {0x9005, 0x028f, 0x9005, 0x1411, PQI_HWIF_SRCV, "HBA 1200 Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1412, PQI_HWIF_SRCV, "SmartHBA 2200 Ultra"},
{0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"},
{0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"},
+
+ /* (MSCC PM8271 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14e0, PQI_HWIF_SRCV, "SmartIOC PM8271"},
+
+ /* (MSCC PM8272 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1420, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e"},
+
+ /* (MSCC PM8273 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1430, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e /e"},
+
+ /* (MSCC PM8274 16x12G based) */
+ {0x9005, 0x028f, 0x1e93, 0x1000, PQI_HWIF_SRCV, "ByteHBA JGH43024-8"},
+ {0x9005, 0x028f, 0x1e93, 0x1001, PQI_HWIF_SRCV, "ByteHBA JGH43034-8"},
+ {0x9005, 0x028f, 0x1e93, 0x1005, PQI_HWIF_SRCV, "ByteHBA JGH43014-8"},
+
+ /* (MSCC PM8275 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14f0, PQI_HWIF_SRCV, "SmartIOC PM8275"},
+
+ /* (MSCC PM8276 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1480, PQI_HWIF_SRCV, "SmartRAID 3200 Ultra"},
+ {0x9005, 0x028f, 0x1e93, 0x1002, PQI_HWIF_SRCV, "ByteHBA JGH44014-8"},
+
+ /* (MSCC PM8278 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1440, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i"},
+
/* (MSCC PM8279 32x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1450, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i /e"},
+ {0x9005, 0x028f, 0x1590, 0x0294, PQI_HWIF_SRCV, "SR932i-p Gen10+"},
{0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"},
{0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"},
{0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"},
{0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"},
{0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"},
+ {0x9005, 0x028f, 0x9005, 0x1452, PQI_HWIF_SRCV, "SmartRAID 3200p Ultra"},
+
+ /* (MSCC HBA/SMARTHBA/CFF SmartRAID - Lenovo 8X12G 16X12G based) */
+ {0x9005, 0x028f, 0x1d49, 0x0220, PQI_HWIF_SRCV, "4350-8i SAS/SATA HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0221, PQI_HWIF_SRCV, "4350-16i SAS/SATA HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0520, PQI_HWIF_SRCV, "5350-8i"},
+ {0x9005, 0x028f, 0x1d49, 0x0522, PQI_HWIF_SRCV, "5350-8i INTR"},
+ {0x9005, 0x028f, 0x1d49, 0x0620, PQI_HWIF_SRCV, "9350-8i 2GB Flash"},
+ {0x9005, 0x028f, 0x1d49, 0x0621, PQI_HWIF_SRCV, "9350-8i 2GB Flash INTR"},
+ {0x9005, 0x028f, 0x1d49, 0x0622, PQI_HWIF_SRCV, "9350-16i 4GB Flash"},
+ {0x9005, 0x028f, 0x1d49, 0x0623, PQI_HWIF_SRCV, "9350-16i 4GB Flash INTR"},
{0, 0, 0, 0, 0, 0}
};
@@ -195,11 +317,26 @@
/*
* Function to identify the installed adapter.
*/
-static struct
-pqi_ident *pqi_find_ident(device_t dev)
+static struct pqi_ident *
+pqi_find_ident(device_t dev)
{
struct pqi_ident *m;
u_int16_t vendid, devid, sub_vendid, sub_devid;
+ static long AllowWildcards = 0xffffffff;
+ int result;
+
+#ifdef DEVICE_HINT
+ if (AllowWildcards == 0xffffffff)
+ {
+ result = resource_long_value("smartpqi", 0, "allow_wildcards", &AllowWildcards);
+
+ /* the default case if the hint is not found is to allow wildcards */
+ if (result != DEVICE_HINT_SUCCESS) {
+ AllowWildcards = 1;
+ }
+ }
+
+#endif
vendid = pci_get_vendor(dev);
devid = pci_get_device(dev);
@@ -216,7 +353,16 @@
for (m = pqi_family_identifiers; m->vendor != 0; m++) {
if ((m->vendor == vendid) && (m->device == devid)) {
- return (m);
+ if (AllowWildcards != 0)
+ {
+ DBG_NOTE("Controller device ID matched using wildcards\n");
+ return (m);
+ }
+ else
+ {
+ DBG_NOTE("Controller not probed because device ID wildcards are disabled\n")
+ return (NULL);
+ }
}
}
@@ -253,6 +399,97 @@
}
+static void read_device_hint_resource(struct pqisrc_softstate *softs,
+ char *keyword, uint32_t *value)
+{
+ DBG_FUNC("IN\n");
+
+ device_t dev = softs->os_specific.pqi_dev;
+
+ if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
+ if (*value) {
+ /* set resource to 1 for disabling the
+ * firmware feature in device hint file. */
+ *value = 0;
+
+ }
+ else {
+ /* set resource to 0 for enabling the
+ * firmware feature in device hint file. */
+ *value = 1;
+ }
+ }
+ else {
+ /* Enabled by default */
+ *value = 1;
+ }
+
+ DBG_NOTE("SmartPQI Device Hint: %s, Is it enabled = %u\n", keyword, *value);
+
+ DBG_FUNC("OUT\n");
+}
+
+static void read_device_hint_decimal_value(struct pqisrc_softstate *softs,
+ char *keyword, uint32_t *value)
+{
+ DBG_FUNC("IN\n");
+
+ device_t dev = softs->os_specific.pqi_dev;
+
+ if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
+ /* Nothing to do here. Value reads
+ * directly from Device.Hint file */
+ }
+ else {
+ /* Set to max to determine the value */
+ *value = 0XFFFF;
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+static void smartpqi_read_all_device_hint_file_entries(struct pqisrc_softstate *softs)
+{
+ uint32_t value = 0;
+
+ DBG_FUNC("IN\n");
+
+ /* hint.smartpqi.0.stream_disable = "0" */
+ read_device_hint_resource(softs, STREAM_DETECTION, &value);
+ softs->hint.stream_status = value;
+
+ /* hint.smartpqi.0.sata_unique_wwn_disable = "0" */
+ read_device_hint_resource(softs, SATA_UNIQUE_WWN, &value);
+ softs->hint.sata_unique_wwn_status = value;
+
+ /* hint.smartpqi.0.aio_raid1_write_disable = "0" */
+ read_device_hint_resource(softs, AIO_RAID1_WRITE_BYPASS, &value);
+ softs->hint.aio_raid1_write_status = value;
+
+ /* hint.smartpqi.0.aio_raid5_write_disable = "0" */
+ read_device_hint_resource(softs, AIO_RAID5_WRITE_BYPASS, &value);
+ softs->hint.aio_raid5_write_status = value;
+
+ /* hint.smartpqi.0.aio_raid6_write_disable = "0" */
+ read_device_hint_resource(softs, AIO_RAID6_WRITE_BYPASS, &value);
+ softs->hint.aio_raid6_write_status = value;
+
+ /* hint.smartpqi.0.queue_depth = "0" */
+ read_device_hint_decimal_value(softs, ADAPTER_QUEUE_DEPTH, &value);
+ softs->hint.queue_depth = value;
+
+ /* hint.smartpqi.0.sg_count = "0" */
+ read_device_hint_decimal_value(softs, SCATTER_GATHER_COUNT, &value);
+ softs->hint.sg_segments = value;
+
+ /* hint.smartpqi.0.queue_count = "0" */
+ read_device_hint_decimal_value(softs, QUEUE_COUNT, &value);
+ softs->hint.cpu_count = value;
+
+ DBG_FUNC("IN\n");
+}
+
+
/*
* Allocate resources for our device, set up the bus interface.
* Initialize the PQI related functionality, scan devices, register sim to
@@ -261,7 +498,7 @@
static int
smartpqi_attach(device_t dev)
{
- struct pqisrc_softstate *softs = NULL;
+ struct pqisrc_softstate *softs;
struct pqi_ident *id = NULL;
int error = BSD_SUCCESS;
u_int32_t command = 0, i = 0;
@@ -368,6 +605,8 @@
softs->os_specific.sim_registered = FALSE;
softs->os_name = "FreeBSD ";
+ smartpqi_read_all_device_hint_file_entries(softs);
+
/* Initialize the PQI library */
error = pqisrc_init(softs);
if (error != PQI_STATUS_SUCCESS) {
@@ -379,17 +618,17 @@
error = BSD_SUCCESS;
}
- mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
- softs->os_specific.mtx_init = TRUE;
- mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
+ mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
+ softs->os_specific.mtx_init = TRUE;
+ mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
- callout_init(&softs->os_specific.wellness_periodic, 1);
- callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
+ callout_init(&softs->os_specific.wellness_periodic, 1);
+ callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
- /*
- * Create DMA tag for mapping buffers into controller-addressable space.
- */
- if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
+ /*
+ * Create DMA tag for mapping buffers into controller-addressable space.
+ */
+ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
PAGE_SIZE, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -409,7 +648,7 @@
for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
DBG_ERR("Cant create datamap for buf @"
- "rcbp = %p maxio = %d error = %d\n",
+ "rcbp = %p maxio = %u error = %d\n",
rcbp, softs->pqi_cap.max_outstanding_io, error);
goto dma_out;
}
@@ -425,6 +664,9 @@
error = ENXIO;
goto out;
}
+ else {
+ error = BSD_SUCCESS;
+ }
error = register_sim(softs, card_index);
if (error) {
@@ -443,6 +685,7 @@
card_index, error);
goto out;
}
+
goto out;
dma_out:
@@ -452,6 +695,7 @@
softs->os_specific.pqi_regs_res0);
out:
DBG_FUNC("OUT error = %d\n", error);
+
return(error);
}
@@ -479,6 +723,8 @@
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval);
rval = EIO;
+ } else {
+ rval = BSD_SUCCESS;
}
}
@@ -558,10 +804,11 @@
}
DBG_FUNC("OUT\n");
-
+
return bsd_status;
}
+
/*
* PCI bus interface.
*/
diff --git a/sys/dev/smartpqi/smartpqi_mem.c b/sys/dev/smartpqi/smartpqi_mem.c
--- a/sys/dev/smartpqi/smartpqi_mem.c
+++ b/sys/dev/smartpqi/smartpqi_mem.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -168,9 +168,9 @@
void
*os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
{
- void *addr = NULL;
+ void *addr;
- /* DBG_FUNC("IN\n"); */
+ /* DBG_FUNC("IN\n"); */
addr = malloc((unsigned long)size, M_SMARTPQI,
M_NOWAIT | M_ZERO);
@@ -184,7 +184,7 @@
* Mem resource deallocation wrapper function
*/
void
-os_mem_free(pqisrc_softstate_t *softs, char *addr, size_t size)
+os_mem_free(pqisrc_softstate_t *softs, void *addr, size_t size)
{
/* DBG_FUNC("IN\n"); */
@@ -205,6 +205,6 @@
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev,
SYS_RES_MEMORY,
- softs->os_specific.pqi_regs_rid0,
+ softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
}
diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c
--- a/sys/dev/smartpqi/smartpqi_misc.c
+++ b/sys/dev/smartpqi/smartpqi_misc.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,13 +27,13 @@
#include "smartpqi_includes.h"
/*
- * Populate hostwellness time variables in bcd format from FreeBSD format
+ * Populate hostwellness time variables in bcd format from FreeBSD format.
*/
void
os_get_time(struct bmic_host_wellness_time *host_wellness_time)
{
struct timespec ts;
- struct clocktime ct;
+ struct clocktime ct = {0};
getnanotime(&ts);
clock_ts_to_ct(&ts, &ct);
@@ -111,8 +111,9 @@
os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
char *lockname)
{
- mtx_init(lock, lockname, NULL, MTX_SPIN);
- return 0;
+ mtx_init(lock, lockname, NULL, MTX_SPIN);
+ return 0;
+
}
/*
@@ -180,3 +181,132 @@
else
return PQI_STATUS_FAILURE;
}
+
+/* Return true : If the feature is disabled from device hints.
+ * Return false : If the feature is enabled from device hints.
+ * Return default: The feature status is not deciding from hints.
+ * */
+boolean_t
+check_device_hint_status(struct pqisrc_softstate *softs, unsigned int feature_bit)
+{
+ DBG_FUNC("IN\n");
+
+ switch(feature_bit) {
+ case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+ if (!softs->hint.aio_raid1_write_status)
+ return true;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+ if (!softs->hint.aio_raid5_write_status)
+ return true;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+ if (!softs->hint.aio_raid6_write_status)
+ return true;
+ break;
+ case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN:
+ if (!softs->hint.sata_unique_wwn_status)
+ return true;
+ break;
+ default:
+ return false;
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return false;
+}
+
+static void
+bsd_set_hint_adapter_queue_depth(struct pqisrc_softstate *softs)
+{
+ uint32_t queue_depth = softs->pqi_cap.max_outstanding_io;
+
+ DBG_FUNC("IN\n");
+
+ if ((!softs->hint.queue_depth) || (softs->hint.queue_depth >
+ softs->pqi_cap.max_outstanding_io)) {
+ /* Nothing to do here. Supported queue depth
+ * is already set by controller/driver */
+ }
+ else if (softs->hint.queue_depth < PQISRC_MIN_OUTSTANDING_REQ) {
+ /* Nothing to do here. Supported queue depth
+ * is already set by controller/driver */
+ }
+ else {
+ /* Set Device.Hint queue depth here */
+ softs->pqi_cap.max_outstanding_io =
+ softs->hint.queue_depth;
+ }
+
+ DBG_NOTE("Adapter queue depth before hint set = %u, Queue depth after hint set = %u\n",
+ queue_depth, softs->pqi_cap.max_outstanding_io);
+
+ DBG_FUNC("OUT\n");
+}
+
+static void
+bsd_set_hint_scatter_gather_config(struct pqisrc_softstate *softs)
+{
+ uint32_t pqi_sg_segments = softs->pqi_cap.max_sg_elem;
+
+ DBG_FUNC("IN\n");
+
+ /* At least > 16 sg's required to wotk hint correctly.
+ * Default the sg count set by driver/controller. */
+
+ if ((!softs->hint.sg_segments) || (softs->hint.sg_segments >
+ softs->pqi_cap.max_sg_elem)) {
+ /* Nothing to do here. Supported sg count
+ * is already set by controller/driver. */
+ }
+ else if (softs->hint.sg_segments < BSD_MIN_SG_SEGMENTS)
+ {
+ /* Nothing to do here. Supported sg count
+ * is already set by controller/driver. */
+ }
+ else {
+ /* Set Device.Hint sg count here */
+ softs->pqi_cap.max_sg_elem = softs->hint.sg_segments;
+ }
+
+ DBG_NOTE("SG segments before hint set = %u, SG segments after hint set = %u\n",
+ pqi_sg_segments, softs->pqi_cap.max_sg_elem);
+
+ DBG_FUNC("OUT\n");
+}
+
+void
+bsd_set_hint_adapter_cap(struct pqisrc_softstate *softs)
+{
+ DBG_FUNC("IN\n");
+
+ bsd_set_hint_adapter_queue_depth(softs);
+ bsd_set_hint_scatter_gather_config(softs);
+
+ DBG_FUNC("OUT\n");
+}
+
+void
+bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *softs)
+{
+ DBG_FUNC("IN\n");
+
+ /* online cpu count decides the no.of queues the driver can create,
+ * and msi interrupt count as well.
+ * If the cpu count is "zero" set by hint file then the driver
+ * can have "one" queue and "one" legacy interrupt. (It shares event queue for
+ * operational IB queue).
+ * Check for os_get_intr_config function for interrupt assignment.*/
+
+ if (softs->hint.cpu_count > softs->num_cpus_online) {
+ /* Nothing to do here. Supported cpu count
+ * already fetched from hardware */
+ }
+ else {
+ /* Set Device.Hint cpu count here */
+ softs->num_cpus_online = softs->hint.cpu_count;
+ }
+
+ DBG_FUNC("OUT\n");
+}
diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h
--- a/sys/dev/smartpqi/smartpqi_prototypes.h
+++ b/sys/dev/smartpqi/smartpqi_prototypes.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,15 +29,19 @@
/* Function prototypes */
-/*pqi_init.c */
+/*smartpqi_init.c */
int pqisrc_init(pqisrc_softstate_t *);
void pqisrc_uninit(pqisrc_softstate_t *);
void pqisrc_pqi_uninit(pqisrc_softstate_t *);
int pqisrc_process_config_table(pqisrc_softstate_t *);
int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
+int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *);
+void pqisrc_complete_internal_cmds(pqisrc_softstate_t *);
+void sanity_check_os_behavior(pqisrc_softstate_t *);
-/* pqi_sis.c*/
+
+/* smartpqi_sis.c*/
int pqisrc_sis_init(pqisrc_softstate_t *);
void pqisrc_sis_uninit(pqisrc_softstate_t *);
int pqisrc_reenable_sis(pqisrc_softstate_t *);
@@ -50,17 +54,21 @@
void sis_disable_interrupt(pqisrc_softstate_t*);
-/* pqi_queue.c */
+/* smartpqi_queue.c */
int pqisrc_submit_admin_req(pqisrc_softstate_t *,
gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
int pqisrc_create_admin_queue(pqisrc_softstate_t *);
int pqisrc_destroy_admin_queue(pqisrc_softstate_t *);
int pqisrc_create_op_queues(pqisrc_softstate_t *);
+int pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *, ib_queue_t *,
+ char *);
+int pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *, ob_queue_t *,
+ char *);
-/* pqi_cmd.c */
+/* smartpqi_cmd.c */
int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *);
-/* pqi_tag.c */
+/* smartpqi_tag.c */
#ifndef LOCKFREE_STACK
int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t);
void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *);
@@ -73,26 +81,37 @@
uint32_t pqisrc_get_tag(lockless_stack_t *);
#endif /* LOCKFREE_STACK */
-/* pqi_discovery.c */
+/* smartpqi_discovery.c */
void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+boolean_t pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
+ uint8_t *scsi3addr);
int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *);
int pqisrc_rescan_devices(pqisrc_softstate_t *);
int pqisrc_scan_devices(pqisrc_softstate_t *);
-void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *);
-void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *);
void pqisrc_cleanup_devices(pqisrc_softstate_t *);
void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device);
void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
-void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs);
-int pqisrc_alloc_tid(pqisrc_softstate_t *softs);
-void pqisrc_free_tid(pqisrc_softstate_t *softs, int);
+void pqisrc_init_bitmap(pqisrc_softstate_t *softs);
+void pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target);
+int pqisrc_find_avail_target(pqisrc_softstate_t *softs);
+int pqisrc_find_device_list_index(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device);
+int pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
+ int bus, int target, int lun);
+int pqisrc_delete_softs_entry(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device);
int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
- reportlun_data_ext_t **buff, size_t *data_length);
+ reportlun_data_ext_t **buff, size_t *data_length);
int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len);
+int pqisrc_simple_dma_alloc(pqisrc_softstate_t *, struct dma_mem *, size_t,
+ sgt_t *);
+int pqisrc_prepare_send_raid(pqisrc_softstate_t *, pqisrc_raid_req_t *,
+ void *, size_t , uint8_t *, raid_path_error_info_elem_t *);
+
-/* pqi_helper.c */
+/* smartpqi_helper.c */
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
@@ -109,14 +128,13 @@
void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *);
void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
+int pqisrc_QuerySenseFeatures(pqisrc_softstate_t *);
void check_device_pending_commands_to_complete(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
-
-/* pqi_response.c */
-void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
+/* smartpqi_response.c */
void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *,
rcb_t *);
void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *,
@@ -129,39 +147,73 @@
void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
-
-
-
-/* pqi_request.c */
-int pqisrc_build_send_vendor_request(pqisrc_softstate_t*,
- pqi_vendor_general_request_t *,
- raid_path_error_info_elem_t *);
+void pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
+ aio_path_error_info_elem_t *aio_err);
+void pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
+ raid_path_error_info_elem_t *aio_err);
+boolean_t suppress_innocuous_error_prints(pqisrc_softstate_t *softs,
+ rcb_t *rcb);
+uint8_t pqisrc_get_cmd_from_rcb(rcb_t *);
+boolean_t pqisrc_is_innocuous_error(pqisrc_softstate_t *, rcb_t *, void *);
+
+
+/* smartpqi_request.c */
+int pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
+ struct pqi_vendor_general_request *request);
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
-
-
-int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
-
-
+int pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, rcb_t *rcb);
int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
rcb_t *, rcb_t *, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
+extern inline void pqisrc_aio_build_cdb(aio_req_locator_t *, uint32_t,
+ rcb_t *, uint8_t *);
+extern inline boolean_t pqisrc_aio_req_too_big(pqisrc_softstate_t *, pqi_scsi_dev_t *,
+ rcb_t *, aio_req_locator_t *, uint32_t);
+void pqisrc_build_aio_common(pqisrc_softstate_t *, pqi_aio_req_t *,
+ rcb_t *, uint32_t);
+void pqisrc_build_aio_R1_write(pqisrc_softstate_t *,
+ pqi_aio_raid1_write_req_t *, rcb_t *, uint32_t);
+void pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *,
+ pqi_aio_raid5or6_write_req_t *, rcb_t *, uint32_t);
+void pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb);
void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags);
+void pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info);
+void pqisrc_show_aio_io(pqisrc_softstate_t *, rcb_t *,
+ pqi_aio_req_t *, uint32_t);
+void pqisrc_show_aio_common(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *);
+void pqisrc_show_aio_R1_write(pqisrc_softstate_t *, rcb_t *,
+ pqi_aio_raid1_write_req_t *);
+void pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *, rcb_t *,
+ pqi_aio_raid5or6_write_req_t *);
+boolean_t pqisrc_cdb_is_write(uint8_t *);
+void print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg);
+void print_all_counters(pqisrc_softstate_t *softs, uint32_t flags);
char *io_path_to_ascii(IO_PATH_T path);
+void int_to_scsilun(uint64_t, uint8_t *);
+boolean_t pqisrc_cdb_is_read(uint8_t *);
+void pqisrc_build_aio_io(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *, uint32_t);
+uint8_t pqisrc_get_aio_data_direction(rcb_t *);
+uint8_t pqisrc_get_raid_data_direction(rcb_t *);
+void dump_tmf_details(pqisrc_softstate_t *, rcb_t *, char *);
+io_type_t get_io_type_from_cdb(uint8_t *);
+OS_ATOMIC64_T increment_this_counter(io_counters_t *, IO_PATH_T , io_type_t );
+boolean_t
+is_buffer_zero(void *, uint32_t );
+
+
-/* pqi_event.c*/
+
+/* smartpqi_event.c*/
int pqisrc_report_event_config(pqisrc_softstate_t *);
int pqisrc_set_event_config(pqisrc_softstate_t *);
int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
void pqisrc_ack_all_events(void *arg);
void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
-boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
- struct sense_header_scsi *);
-int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
- void *, size_t, uint8_t, uint16_t, uint8_t *,
- raid_path_error_info_elem_t *);
+int pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+ void *buff, size_t datasize);
int pqisrc_submit_management_req(pqisrc_softstate_t *,
pqi_event_config_request_t *);
@@ -185,102 +237,86 @@
int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *);
int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t);
void pqisrc_print_adminq_config(pqisrc_softstate_t *);
-int pqisrc_delete_op_queue(pqisrc_softstate_t *,
- uint32_t, boolean_t);
+int pqisrc_delete_op_queue(pqisrc_softstate_t *, uint32_t, boolean_t);
void pqisrc_destroy_event_queue(pqisrc_softstate_t *);
-
void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *);
-
void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *);
-
-int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *,
- ib_queue_t *, uint32_t);
-int pqisrc_create_op_obq(pqisrc_softstate_t *,
- ob_queue_t *);
-int pqisrc_create_op_ibq(pqisrc_softstate_t *,
- ib_queue_t *);
+int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t *,
+ uint32_t);
+int pqisrc_create_op_obq(pqisrc_softstate_t *, ob_queue_t *);
+int pqisrc_create_op_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
+int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
int pqisrc_process_task_management_response(pqisrc_softstate_t *,
pqi_tmf_resp_t *);
-/*Device outstanding Io count*/
-uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *,
- pqi_scsi_dev_t *);
-uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *,
- pqi_scsi_dev_t *);
-void pqisrc_init_device_active_io(pqisrc_softstate_t *,
- pqi_scsi_dev_t *);
-uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *,
- pqi_scsi_dev_t *);
-
-/* pqi_ioctl.c*/
-
-int
-pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
-
-
+/* smartpqi_ioctl.c*/
+int pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
/* Functions Prototypes */
-/* FreeBSD_mem.c */
+/* smartpqi_mem.c */
int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *);
void *os_mem_alloc(pqisrc_softstate_t *,size_t);
-void os_mem_free(pqisrc_softstate_t *,char *,size_t);
+void os_mem_free(pqisrc_softstate_t *,void *,size_t);
void os_resource_free(pqisrc_softstate_t *);
int os_dma_setup(pqisrc_softstate_t *);
int os_dma_destroy(pqisrc_softstate_t *);
void os_update_dma_attributes(pqisrc_softstate_t *);
-/* FreeBSD intr.c */
+/* smartpqi_intr.c */
int os_get_intr_config(pqisrc_softstate_t *);
int os_setup_intr(pqisrc_softstate_t *);
int os_destroy_intr(pqisrc_softstate_t *);
int os_get_processor_config(pqisrc_softstate_t *);
void os_free_intr_config(pqisrc_softstate_t *);
-/* FreeBSD_ioctl.c */
+/* smartpqi_ioctl.c */
int os_copy_to_user(struct pqisrc_softstate *, void *,
void *, int, int);
int os_copy_from_user(struct pqisrc_softstate *, void *,
void *, int, int);
int create_char_dev(struct pqisrc_softstate *, int);
void destroy_char_dev(struct pqisrc_softstate *);
-
-/* FreeBSD_misc.c*/
+
+/* smartpqi_misc.c*/
int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *);
void os_uninit_spinlock(struct mtx *);
int os_create_semaphore(const char *, int,struct sema *);
int os_destroy_semaphore(struct sema *);
void os_sema_lock(struct sema *);
void os_sema_unlock(struct sema *);
+void bsd_set_hint_adapter_cap(struct pqisrc_softstate *);
+void bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *);
int os_strlcpy(char *dst, char *src, int len);
void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *);
void os_stop_heartbeat_timer(pqisrc_softstate_t *);
void os_start_heartbeat_timer(void *);
-/* FreeBSD_cam.c */
+/* smartpqi_cam.c */
uint8_t os_get_task_attr(rcb_t *);
void smartpqi_target_rescan(struct pqisrc_softstate *);
void os_rescan_target(struct pqisrc_softstate *, pqi_scsi_dev_t *);
-/* FreeBSD_intr.c FreeBSD_main.c */
+/* smartpqi_intr.c smartpqi_main.c */
void pqisrc_event_worker(void *, int);
void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
-void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_io_response_success(rcb_t *);
void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
+boolean_t check_device_hint_status(struct pqisrc_softstate *, unsigned int );
void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
void os_wellness_periodic(void *);
void os_reset_rcb( rcb_t *);
int register_sim(struct pqisrc_softstate *, int);
void deregister_sim(struct pqisrc_softstate *);
-int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
+int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
uint32_t *);
int register_legacy_intr(pqisrc_softstate_t *);
int register_msix_intr(pqisrc_softstate_t *);
@@ -292,5 +328,4 @@
/* Domain status conversion */
int bsd_status_to_pqi_status(int );
-
-#endif // _SMARTPQI_PROTOTYPES_H
+#endif
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
/*
* Submit an admin IU to the adapter.
+ * TODO : Admin command implemented using polling,
* Add interrupt support, if required
*/
int
@@ -120,10 +121,10 @@
softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16;
softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16;
- DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n",
- softs->admin_ib_queue.num_elem);
- DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n",
- softs->admin_ib_queue.elem_size);
+ DBG_INIT(" admin ib: num_elem=%u elem_size=%u\n",
+ softs->admin_ib_queue.num_elem, softs->admin_ib_queue.elem_size);
+ DBG_INIT(" admin ob: num_elem=%u elem_size=%u\n",
+ softs->admin_ob_queue.num_elem, softs->admin_ob_queue.elem_size);
}
/*
@@ -142,79 +143,136 @@
}
/*
- * Allocate DMA memory for admin queue and initialize.
+ * Allocate DMA memory for inbound queue and initialize.
*/
int
-pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
+pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *softs, ib_queue_t *ib_q, char *tag)
{
+ struct dma_mem *dma_mem = &ib_q->alloc_dma;
uint32_t ib_array_size = 0;
- uint32_t ob_array_size = 0;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
int ret = PQI_STATUS_SUCCESS;
- ib_array_size = (softs->admin_ib_queue.num_elem *
- softs->admin_ib_queue.elem_size);
+ ib_array_size = ib_q->num_elem * ib_q->elem_size;
+ ASSERT(ib_array_size > 0);
- ob_array_size = (softs->admin_ob_queue.num_elem *
- softs->admin_ob_queue.elem_size);
+ alloc_size = ib_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for IB CI and OB PI */
- alloc_size = ib_array_size + ob_array_size +
- 2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */
- /* Allocate memory for Admin Q */
- softs->admin_queue_dma_mem.tag = "admin_queue";
- softs->admin_queue_dma_mem.size = alloc_size;
- softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN;
- ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem);
+ /* Allocate memory for the Q */
+ memset(dma_mem, 0, sizeof(*dma_mem));
+ os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag));
+ dma_mem->size = alloc_size;
+ dma_mem->align = PQI_ADDR_ALIGN;
+ ret = os_dma_mem_alloc(softs, &ib_q->alloc_dma);
if (ret) {
- DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
+ DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret);
goto err_out;
}
+ DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n",
+ dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr);
+
/* Setup the address */
- virt_addr = softs->admin_queue_dma_mem.virt_addr;
- dma_addr = softs->admin_queue_dma_mem.dma_addr;
+ virt_addr = dma_mem->virt_addr;
+ dma_addr = dma_mem->dma_addr;
+ ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK));
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
/* IB */
- softs->admin_ib_queue.q_id = 0;
- softs->admin_ib_queue.array_virt_addr = virt_addr;
- softs->admin_ib_queue.array_dma_addr = dma_addr;
- softs->admin_ib_queue.pi_local = 0;
- /* OB */
- softs->admin_ob_queue.q_id = 0;
- softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size;
- softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size;
- softs->admin_ob_queue.ci_local = 0;
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
+ ib_q->array_virt_addr = virt_addr;
+ ib_q->array_dma_addr = dma_addr;
+ ib_q->pi_local = 0;
+
+ /* update addr for the next user */
+ virt_addr += ib_array_size;
+ dma_addr += ib_array_size;
/* IB CI */
- softs->admin_ib_queue.ci_virt_addr =
- (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
- + ob_array_size);
- softs->admin_ib_queue.ci_dma_addr =
- (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
- ob_array_size);
+ ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK));
+ ib_q->ci_virt_addr = (uint32_t*)virt_addr;
+ ib_q->ci_dma_addr = dma_addr;
+
+ /* update addr for the next user */
+ virt_addr += PQI_CI_PI_ALIGN;
+
+ DBG_INIT("ib_q: virt_addr=%p, ci_dma_addr=%p elem=%u size=%u\n",
+ ib_q->array_virt_addr, (void*)ib_q->ci_dma_addr, ib_q->num_elem, ib_array_size);
+
+ /* Verify we aren't out of bounds from allocation */
+ ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size));
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return PQI_STATUS_FAILURE;
+}
+
+
+/*
+ * Allocate DMA memory for outbound queue and initialize.
+ */
+int
+pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *softs, ob_queue_t *ob_q,
+ char *tag)
+{
+ struct dma_mem *dma_mem = &ob_q->alloc_dma;
+ uint32_t ob_array_size = 0;
+ uint32_t alloc_size = 0;
+ char *virt_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ int ret = PQI_STATUS_SUCCESS;
+
+ ob_array_size = ob_q->num_elem * ob_q->elem_size;
+ ASSERT(ob_array_size > 0);
+
+ alloc_size = ob_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for OB PI */
+
+ /* Allocate memory for the Q */
+ memset(dma_mem, 0, sizeof(*dma_mem));
+ os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag));
+ dma_mem->size = alloc_size;
+ dma_mem->align = PQI_ADDR_ALIGN;
+ ret = os_dma_mem_alloc(softs, &ob_q->alloc_dma);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret);
+ goto err_out;
+ }
+
+ DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n",
+ dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr);
+
+ /* Setup the address */
+ virt_addr = dma_mem->virt_addr;
+ dma_addr = dma_mem->dma_addr;
+ ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK));
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
+
+ ob_q->array_virt_addr = virt_addr;
+ ob_q->array_dma_addr = dma_addr;
+ ob_q->ci_local = 0;
+
+ /* update addr for the next user */
+ virt_addr += ob_array_size;
+ dma_addr += ob_array_size;
/* OB PI */
- softs->admin_ob_queue.pi_virt_addr =
- (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
- PQI_ADDR_ALIGN_MASK_64 + 1);
- softs->admin_ob_queue.pi_dma_addr =
- (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
- PQI_ADDR_ALIGN_MASK_64 + 1);
-
- DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
- (void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr );
-
- /* Verify alignment */
- ASSERT(!(softs->admin_ib_queue.array_dma_addr &
- PQI_ADDR_ALIGN_MASK_64));
- ASSERT(!(softs->admin_ib_queue.ci_dma_addr &
- PQI_ADDR_ALIGN_MASK_64));
- ASSERT(!(softs->admin_ob_queue.array_dma_addr &
- PQI_ADDR_ALIGN_MASK_64));
- ASSERT(!(softs->admin_ob_queue.pi_dma_addr &
- PQI_ADDR_ALIGN_MASK_64));
+ ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK));
+ ob_q->pi_virt_addr = (uint32_t*)virt_addr;
+ ob_q->pi_dma_addr = dma_addr;
+
+ /* update addr to show the end next user */
+ virt_addr += PQI_CI_PI_ALIGN;
+
+ DBG_INIT("ob_q: virt_addr=%p, pi_dma_addr=%p elem=%u size=%u\n",
+ ob_q->array_virt_addr, (void*)ob_q->pi_dma_addr, ob_q->num_elem, ob_array_size);
+
+ /* Verify we aren't out of bounds from allocation */
+ ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size));
DBG_FUNC("OUT\n");
return ret;
@@ -224,6 +282,36 @@
return PQI_STATUS_FAILURE;
}
+/*
+ * Allocate DMA memory for admin queue and initialize.
+ */
+int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
+{
+ int ret;
+ ib_queue_t *admin_ib_q = &softs->admin_ib_queue;
+ ob_queue_t *admin_ob_q = &softs->admin_ob_queue;
+
+ ret = pqisrc_allocate_and_init_inbound_q(softs, admin_ib_q, "admin_queue");
+ if (!ret) {
+ admin_ib_q->q_id = PQI_ADMIN_IB_QUEUE_ID;
+ ret = pqisrc_allocate_and_init_outbound_q(softs, admin_ob_q, "admin_queue");
+ if(!ret)
+ admin_ob_q->q_id = PQI_ADMIN_OB_QUEUE_ID;
+ else {
+ if(softs->admin_ib_queue.lockcreated==true) {
+ OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
+ softs->admin_ib_queue.lockcreated = false;
+ }
+ if (softs->admin_ib_queue.alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma);
+ }
+ }
+ else
+ DBG_ERR("Failed to create Admin Queue pair\n");
+
+ return ret;
+}
+
/*
* Subroutine used to create (or) delete the admin queue requested.
*/
@@ -264,17 +352,17 @@
(void*)softs->admin_ib_queue.array_dma_addr);
DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n",
(void*)softs->admin_ib_queue.array_virt_addr);
- DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n",
+ DBG_INFO(" softs->admin_ib_queue.num_elem : %u\n",
softs->admin_ib_queue.num_elem);
- DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n",
+ DBG_INFO(" softs->admin_ib_queue.elem_size : %u\n",
softs->admin_ib_queue.elem_size);
DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n",
(void*)softs->admin_ob_queue.array_dma_addr);
DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n",
(void*)softs->admin_ob_queue.array_virt_addr);
- DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n",
+ DBG_INFO(" softs->admin_ob_queue.num_elem : %u\n",
softs->admin_ob_queue.num_elem);
- DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n",
+ DBG_INFO(" softs->admin_ob_queue.elem_size : %u\n",
softs->admin_ob_queue.elem_size);
DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n",
(void*)softs->admin_ib_queue.pi_register_abs);
@@ -289,6 +377,7 @@
pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
+/* struct pqi_dev_adminq_cap *pqi_cap; */
uint32_t admin_q_param = 0;
DBG_FUNC("IN\n");
@@ -371,8 +460,11 @@
return ret;
err_lock:
+#if 0
+ pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
+#endif
err_q_create:
- os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+ pqisrc_destroy_admin_queue(softs);
err_out:
DBG_FUNC("failed OUT\n");
return ret;
@@ -425,13 +517,14 @@
int ret = PQI_STATUS_SUCCESS;
ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
if (ret) {
- DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id);
+ DBG_ERR("Failed to Delete Event Q %u\n", softs->event_q.q_id);
}
softs->event_q.created = false;
}
/* Free the memory */
- os_dma_mem_free(softs, &softs->event_q_dma_mem);
+ if (softs->event_q.alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &softs->event_q.alloc_dma);
DBG_FUNC("OUT\n");
}
@@ -444,44 +537,42 @@
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = NULL;
+ uint32_t total_op_ibq = softs->num_op_raid_ibq;
int i;
DBG_FUNC("IN\n");
- for (i = 0; i < softs->num_op_raid_ibq; i++) {
- /* OP RAID IB Q */
+ for (i = 0; i < total_op_ibq; i++) {
+ int repeat = 0;
+ /* RAID first */
op_ib_q = &softs->op_raid_ib_q[i];
+release_queue:
if (op_ib_q->created == true) {
- ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
+ ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id,
+ true);
if (ret) {
- DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
+ DBG_ERR("Failed to Delete IB Q %u\n",
+ op_ib_q->q_id);
}
op_ib_q->created = false;
}
- if(op_ib_q->lockcreated==true){
- OS_UNINIT_PQILOCK(&op_ib_q->lock);
- op_ib_q->lockcreated = false;
- }
-
- /* OP AIO IB Q */
- op_ib_q = &softs->op_aio_ib_q[i];
- if (op_ib_q->created == true) {
- ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
- if (ret) {
- DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id);
- }
- op_ib_q->created = false;
+ if (op_ib_q->lockcreated == true) {
+ OS_UNINIT_PQILOCK(&op_ib_q->lock);
+ op_ib_q->lockcreated = false;
}
- if(op_ib_q->lockcreated==true){
- OS_UNINIT_PQILOCK(&op_ib_q->lock);
- op_ib_q->lockcreated = false;
- }
+ /* Free the memory */
+ if (op_ib_q->alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &op_ib_q->alloc_dma);
+
+ if (repeat < 1) {
+ repeat++;
+ op_ib_q = &softs->op_aio_ib_q[i];
+ goto release_queue;
+ }
}
- /* Free the memory */
- os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
DBG_FUNC("OUT\n");
}
@@ -493,23 +584,27 @@
{
int ret = PQI_STATUS_SUCCESS;
int i;
+ ob_queue_t *op_ob_q = NULL;
DBG_FUNC("IN\n");
for (i = 0; i < softs->num_op_obq; i++) {
- ob_queue_t *op_ob_q = NULL;
op_ob_q = &softs->op_ob_q[i];
+
if (op_ob_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
if (ret) {
- DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id);
+ DBG_ERR("Failed to Delete OB Q %u\n",op_ob_q->q_id);
}
op_ob_q->created = false;
}
+
+ /* Free the memory */
+ if (op_ob_q->alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &op_ob_q->alloc_dma);
}
/* Free the memory */
- os_dma_mem_free(softs, &softs->op_obq_dma_mem);
DBG_FUNC("OUT\n");
}
@@ -522,11 +617,22 @@
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
+
+ if(softs->admin_ib_queue.lockcreated==true) {
+ OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
+ softs->admin_ib_queue.lockcreated = false;
+ }
+
#if 0
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
#endif
- os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+
+ if (softs->admin_ib_queue.alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma);
+
+ if (softs->admin_ob_queue.alloc_dma.virt_addr)
+ os_dma_mem_free(softs, &softs->admin_ob_queue.alloc_dma);
DBG_FUNC("OUT\n");
return ret;
@@ -689,15 +795,8 @@
pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
- uint32_t alloc_size = 0;
uint32_t num_elem;
- char *virt_addr = NULL;
- dma_addr_t dma_addr = 0;
- uint64_t event_q_pi_dma_start_offset = 0;
- uint32_t event_q_pi_virt_start_offset = 0;
- char *event_q_pi_virt_start_addr = NULL;
- ob_queue_t *event_q = NULL;
-
+ ob_queue_t *event_q = &softs->event_q;
DBG_FUNC("IN\n");
@@ -710,47 +809,26 @@
* for queue size calculation.
*/
#ifdef SHARE_EVENT_QUEUE_FOR_IO
- num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
+ num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_MAX_EVENT_QUEUE_ELEM_NUM);
#else
- num_elem = PQISRC_NUM_EVENT_Q_ELEM;
+ num_elem = PQISRC_MAX_EVENT_QUEUE_ELEM_NUM;
#endif
- alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE;
- event_q_pi_dma_start_offset = alloc_size;
- event_q_pi_virt_start_offset = alloc_size;
- alloc_size += sizeof(uint32_t); /*For IBQ CI*/
+ event_q->num_elem = num_elem;
+ event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE_BYTES;
+
+ ret = pqisrc_allocate_and_init_outbound_q(softs, event_q, "event_queue");
- /* Allocate memory for event queues */
- softs->event_q_dma_mem.tag = "event_queue";
- softs->event_q_dma_mem.size = alloc_size;
- softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
- ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem);
if (ret) {
- DBG_ERR("Failed to Allocate Event Q ret : %d\n"
- , ret);
+ DBG_ERR("Failed to Allocate EventQ\n");
goto err_out;
}
-
- /* Set up the address */
- virt_addr = softs->event_q_dma_mem.virt_addr;
- dma_addr = softs->event_q_dma_mem.dma_addr;
- event_q_pi_dma_start_offset += dma_addr;
- event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset;
-
- event_q = &softs->event_q;
- ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
- FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr);
event_q->q_id = PQI_OP_EVENT_QUEUE_ID;
- event_q->num_elem = num_elem;
- event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE;
- event_q->pi_dma_addr = event_q_pi_dma_start_offset;
- event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr;
event_q->intr_msg_num = 0; /* vector zero for event */
- ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_obq(softs,event_q);
if (ret) {
- DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id);
+ DBG_ERR("Failed to Create EventQ %u\n",event_q->q_id);
goto err_out_create;
}
event_q->created = true;
@@ -772,115 +850,62 @@
pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
- uint32_t alloc_size = 0;
- char *virt_addr = NULL;
- dma_addr_t dma_addr = 0;
- uint32_t ibq_size = 0;
- uint64_t ib_ci_dma_start_offset = 0;
- char *ib_ci_virt_start_addr = NULL;
- uint32_t ib_ci_virt_start_offset = 0;
- uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
ib_queue_t *op_ib_q = NULL;
- uint32_t num_op_ibq = softs->num_op_raid_ibq +
- softs->num_op_aio_ibq;
+ uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
+ uint32_t total_op_ibq = softs->num_op_raid_ibq + softs->num_op_aio_ibq;
int i = 0;
+ char *string = NULL;
DBG_FUNC("IN\n");
- /* Calculate memory requirements */
- ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
- alloc_size = num_op_ibq * ibq_size;
- /* CI indexes starts after Queue element array */
- ib_ci_dma_start_offset = alloc_size;
- ib_ci_virt_start_offset = alloc_size;
- alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
-
- /* Allocate memory for IB queues */
- softs->op_ibq_dma_mem.tag = "op_ib_queue";
- softs->op_ibq_dma_mem.size = alloc_size;
- softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
- ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem);
- if (ret) {
- DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
- ret);
- goto err_out;
- }
-
- /* Set up the address */
- virt_addr = softs->op_ibq_dma_mem.virt_addr;
- dma_addr = softs->op_ibq_dma_mem.dma_addr;
- ib_ci_dma_start_offset += dma_addr;
- ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset;
-
ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq);
- for (i = 0; i < softs->num_op_raid_ibq; i++) {
- /* OP RAID IB Q */
- op_ib_q = &softs->op_raid_ib_q[i];
- ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
- FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
- op_ib_q->q_id = ibq_id++;
+ for (i = 0; i < total_op_ibq; i++) {
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
- ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- /* TODO: error handling */
- DBG_ERR("raid_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
+ /* OP RAID IB Q */
+ if (i % 2 == 0)
+ {
+ op_ib_q = &softs->op_raid_ib_q[i/2];
+ string = "raid";
+ }
+ else
+ {
+ op_ib_q = &softs->op_aio_ib_q[i/2];
+ string = "aio";
}
- op_ib_q->lockcreated = true;
+ /* Allocate memory for IB queues */
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
- op_ib_q->elem_size = softs->ibq_elem_size;
- op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
- (2 * i * sizeof(uint32_t));
- op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
- (2 * i * sizeof(uint32_t)));
- ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
-
- ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
+ op_ib_q->elem_size = softs->max_ibq_elem_size;
+
+ ret = pqisrc_allocate_and_init_inbound_q(softs, op_ib_q, "op_ib_queue");
if (ret) {
- DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
- __func__, op_ib_q->q_id);
- goto err_out_create;
+ DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
+ ret);
+ goto err_out;
}
- op_ib_q->created = true;
-
- /* OP AIO IB Q */
- virt_addr += ibq_size;
- dma_addr += ibq_size;
- op_ib_q = &softs->op_aio_ib_q[i];
- ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
- FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
+
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "%s_ibqlock_%d", string, i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- /* TODO: error handling */
- DBG_ERR("aio_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
- }
- op_ib_q->lockcreated = true;
+ if(ret){
+ /* TODO: error handling */
+ DBG_ERR("%s %d init failed\n", string, i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
+ }
+ op_ib_q->lockcreated = true;
- op_ib_q->num_elem = softs->num_elem_per_op_ibq;
- op_ib_q->elem_size = softs->ibq_elem_size;
- op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
- (((2 * i) + 1) * sizeof(uint32_t));
- op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
- (((2 * i) + 1) * sizeof(uint32_t)));
- ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
-
- ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
+ if (i % 2 == 0)
+ ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
+ else
+ ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
- DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
+ DBG_ERR("Failed to Create OP IBQ type=%s id=%u\n",
+ string, op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
-
- virt_addr += ibq_size;
- dma_addr += ibq_size;
}
DBG_FUNC("OUT\n");
@@ -888,8 +913,8 @@
err_lock:
err_out_create:
- pqisrc_destroy_op_ib_queues(softs);
err_out:
+ pqisrc_destroy_op_ib_queues(softs);
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
@@ -901,16 +926,8 @@
pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
- uint32_t alloc_size = 0;
- char *virt_addr = NULL;
- dma_addr_t dma_addr = 0;
- uint32_t obq_size = 0;
- uint64_t ob_pi_dma_start_offset = 0;
- uint32_t ob_pi_virt_start_offset = 0;
- char *ob_pi_virt_start_addr = NULL;
uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
ob_queue_t *op_ob_q = NULL;
- uint32_t num_op_obq = softs->num_op_obq;
int i = 0;
DBG_FUNC("IN\n");
@@ -923,65 +940,41 @@
*/
ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
- obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
- alloc_size += num_op_obq * obq_size;
- /* PI indexes starts after Queue element array */
- ob_pi_dma_start_offset = alloc_size;
- ob_pi_virt_start_offset = alloc_size;
- alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
-
- /* Allocate memory for OB queues */
- softs->op_obq_dma_mem.tag = "op_ob_queue";
- softs->op_obq_dma_mem.size = alloc_size;
- softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
- ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem);
- if (ret) {
- DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
- ret);
- goto err_out;
- }
- /* Set up the address */
- virt_addr = softs->op_obq_dma_mem.virt_addr;
- dma_addr = softs->op_obq_dma_mem.dma_addr;
- ob_pi_dma_start_offset += dma_addr;
- ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset;
-
- DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
+ DBG_INIT("softs->num_op_obq %u max_obq_elem_size=%u\n",softs->num_op_obq, softs->max_obq_elem_size);
for (i = 0; i < softs->num_op_obq; i++) {
op_ob_q = &softs->op_ob_q[i];
- ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
- FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
+
+ /* Allocate memory for OB queues */
+ op_ob_q->num_elem = softs->num_elem_per_op_obq;
+ op_ob_q->elem_size = PQISRC_OP_OBQ_ELEM_SIZE_BYTES;
+ ret = pqisrc_allocate_and_init_outbound_q(softs, op_ob_q, "op_ob_queue");
+ if (ret) {
+ DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
+ ret);
+ goto err_out;
+ }
op_ob_q->q_id = obq_id++;
if(softs->share_opq_and_eventq == true)
op_ob_q->intr_msg_num = i;
else
op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
- op_ob_q->num_elem = softs->num_elem_per_op_obq;
- op_ob_q->elem_size = softs->obq_elem_size;
- op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
- (i * sizeof(uint32_t));
- op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
- (i * sizeof(uint32_t)));
- ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
-
- ret = pqisrc_create_op_obq(softs,op_ob_q);
+
+ ret = pqisrc_create_op_obq(softs, op_ob_q);
if (ret) {
- DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
+ DBG_ERR("Failed to Create OP OBQ %u\n",op_ob_q->q_id);
goto err_out_create;
}
op_ob_q->created = true;
- virt_addr += obq_size;
- dma_addr += obq_size;
}
DBG_FUNC("OUT\n");
return ret;
err_out_create:
- pqisrc_destroy_op_ob_queues(softs);
err_out:
+ pqisrc_destroy_op_ob_queues(softs);
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
--- a/sys/dev/smartpqi/smartpqi_request.c
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,37 +26,37 @@
#include "smartpqi_includes.h"
-/*
- * Attempt to perform offload RAID mapping for a logical volume I/O.
- */
-
-#define HPSA_RAID_0 0
-#define HPSA_RAID_4 1
-#define HPSA_RAID_1 2 /* also used for RAID 10 */
-#define HPSA_RAID_5 3 /* also used for RAID 50 */
-#define HPSA_RAID_51 4
-#define HPSA_RAID_6 5 /* also used for RAID 60 */
-#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define HPSA_RAID_MAX HPSA_RAID_ADM
-#define HPSA_RAID_UNKNOWN 0xff
+/* Change this if need to debug why AIO is not being used */
+#define DBG_AIO DBG_IO
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
+/* Local Prototypes */
+static void pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb);
+static int fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l);
+
+
/* Subroutine to find out embedded sgl count in IU */
static inline uint32_t
-pqisrc_embedded_sgl_count(uint32_t elem_alloted)
+pqisrc_embedded_sgl_count(uint32_t elem_alloted, uint8_t iu_type)
{
- uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
- DBG_FUNC(" IN ");
+ uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
+
+ DBG_FUNC("IN\n");
+
+ if (iu_type == PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST ||
+ iu_type == PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST)
+ embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
+
/**
calculate embedded sgl count using num_elem_alloted for IO
**/
if(elem_alloted - 1)
embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
- DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
+ /* DBG_IO("embedded_sgl_count :%d\n", embedded_sgl_count); */
- DBG_FUNC(" OUT ");
+ DBG_FUNC("OUT\n");
return embedded_sgl_count;
@@ -68,7 +68,7 @@
{
uint32_t contiguous_free_elem = 0;
- DBG_FUNC(" IN ");
+ DBG_FUNC("IN\n");
if(pi >= ci) {
contiguous_free_elem = (elem_in_q - pi);
@@ -78,33 +78,41 @@
contiguous_free_elem = (ci - pi - 1);
}
- DBG_FUNC(" OUT ");
+ DBG_FUNC("OUT\n");
return contiguous_free_elem;
}
/* Subroutine to find out num of elements need for the request */
static uint32_t
-pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
+pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count,
+ pqi_scsi_dev_t *devp, boolean_t is_write, IO_PATH_T io_path)
{
uint32_t num_sg;
uint32_t num_elem_required = 1;
- DBG_FUNC(" IN ");
- DBG_IO("SGL_Count :%d",SG_Count);
+ uint32_t sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
+
+ DBG_FUNC("IN\n");
+ DBG_IO("SGL_Count :%u\n",SG_Count);
+
+ if ((devp->raid_level == SA_RAID_5 || devp->raid_level == SA_RAID_6)
+ && is_write && (io_path == AIO_PATH))
+ sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
/********
If SG_Count greater than max sg per IU i.e 4 or 68
(4 is with out spanning or 68 is with spanning) chaining is required.
- OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
+ OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU_* then,
on these two cases one element is enough.
********/
- if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
+ if(SG_Count > softs->max_sg_per_spanning_cmd ||
+ SG_Count <= sg_in_first_iu)
return num_elem_required;
/*
SGL Count Other Than First IU
*/
- num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
+ num_sg = SG_Count - sg_in_first_iu;
num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
- DBG_FUNC(" OUT ");
+ DBG_FUNC("OUT\n");
return num_elem_required;
}
@@ -119,19 +127,21 @@
sgt_t *sg_chain = NULL;
boolean_t partial = false;
- DBG_FUNC(" IN ");
+ DBG_FUNC("IN\n");
- DBG_IO("SGL_Count :%d",num_sg);
+ /* DBG_IO("SGL_Count :%d",num_sg); */
if (0 == num_sg) {
goto out;
}
- if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
+ if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted,
+ iu_hdr->iu_type)) {
+
for (i = 0; i < num_sg; i++, sgt++) {
- sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
- sgt->len= OS_GET_IO_SG_LEN(rcb,i);
- sgt->flags= 0;
- }
+ sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
+ sgt->len= OS_GET_IO_SG_LEN(rcb,i);
+ sgt->flags= 0;
+ }
sg_array[num_sg - 1].flags = SG_FLAG_LAST;
} else {
@@ -157,17 +167,44 @@
}
out:
iu_hdr->iu_length = num_sg * sizeof(sgt_t);
- DBG_FUNC(" OUT ");
+ DBG_FUNC("OUT\n");
return partial;
}
+#if 0
+static inline void
+pqisrc_show_raid_req(pqisrc_softstate_t *softs, pqisrc_raid_req_t *raid_req)
+{
+ DBG_IO("%30s: 0x%x\n", "raid_req->header.iu_type",
+ raid_req->header.iu_type);
+ DBG_IO("%30s: 0x%d\n", "raid_req->response_queue_id",
+ raid_req->response_queue_id);
+ DBG_IO("%30s: 0x%x\n", "raid_req->request_id",
+ raid_req->request_id);
+ DBG_IO("%30s: 0x%x\n", "raid_req->buffer_length",
+ raid_req->buffer_length);
+ DBG_IO("%30s: 0x%x\n", "raid_req->task_attribute",
+ raid_req->task_attribute);
+ DBG_IO("%30s: 0x%llx\n", "raid_req->lun_number",
+ *((long long unsigned int*)raid_req->lun_number));
+ DBG_IO("%30s: 0x%x\n", "raid_req->error_index",
+ raid_req->error_index);
+ DBG_IO("%30s: 0x%p\n", "raid_req->sg_descriptors[0].addr",
+ (void *)raid_req->sg_descriptors[0].addr);
+ DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].len",
+ raid_req->sg_descriptors[0].len);
+ DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].flags",
+ raid_req->sg_descriptors[0].flags);
+}
+#endif
+
/*Subroutine used to Build the RAID request */
static void
pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
{
- DBG_FUNC(" IN ");
+ DBG_FUNC("IN\n");
raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
raid_req->header.comp_feature = 0;
@@ -185,15 +222,16 @@
raid_req->fence = 0;
raid_req->error_index = raid_req->request_id;
raid_req->reserved2 = 0;
- raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
- raid_req->command_priority = 0;
+ raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
+ raid_req->command_priority = 0;
raid_req->reserved3 = 0;
raid_req->reserved4 = 0;
raid_req->reserved5 = 0;
+ raid_req->ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
/* As cdb and additional_cdb_bytes are contiguous,
update them in a single statement */
- memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
+ memcpy(raid_req->cmd.cdb, rcb->cdbp, rcb->cmdlen);
#if 0
DBG_IO("CDB :");
for(i = 0; i < rcb->cmdlen ; i++)
@@ -235,32 +273,70 @@
offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
#if 0
- DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
- DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
- DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
- DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
- DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
- DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number);
- DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
- DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
- DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
- DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
+ pqisrc_show_raid_req(softs, raid_req);
#endif
rcb->success_cmp_callback = pqisrc_process_io_response_success;
rcb->error_cmp_callback = pqisrc_process_raid_response_error;
rcb->resp_qid = raid_req->response_queue_id;
- DBG_FUNC(" OUT ");
+ DBG_FUNC("OUT\n");
}
-/*Subroutine used to Build the AIO request */
-static void
-pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
- pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
+/* We will need to expand this to handle different types of
+ * aio request structures.
+ */
+#if 0
+static inline void
+pqisrc_show_aio_req(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req)
{
- DBG_FUNC(" IN ");
+ DBG_IO("%30s: 0x%x\n", "aio_req->header.iu_type",
+ aio_req->header.iu_type);
+ DBG_IO("%30s: 0x%x\n", "aio_req->resp_qid",
+ aio_req->response_queue_id);
+ DBG_IO("%30s: 0x%x\n", "aio_req->req_id",
+ aio_req->req_id);
+ DBG_IO("%30s: 0x%x\n", "aio_req->nexus",
+ aio_req->nexus);
+ DBG_IO("%30s: 0x%x\n", "aio_req->buf_len",
+ aio_req->buf_len);
+ DBG_IO("%30s: 0x%x\n", "aio_req->cmd_flags.data_dir",
+ aio_req->cmd_flags.data_dir);
+ DBG_IO("%30s: 0x%x\n", "aio_req->attr_prio.task_attr",
+ aio_req->attr_prio.task_attr);
+ DBG_IO("%30s: 0x%x\n", "aio_req->err_idx",
+ aio_req->err_idx);
+ DBG_IO("%30s: 0x%x\n", "aio_req->num_sg",
+ aio_req->num_sg);
+ DBG_IO("%30s: 0x%p\n", "aio_req->sg_desc[0].addr",
+ (void *)aio_req->sg_desc[0].addr);
+ DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].len",
+ aio_req->sg_desc[0].len);
+ DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].flags",
+ aio_req->sg_desc[0].flags);
+}
+#endif
+
+void
+int_to_scsilun(uint64_t lun, uint8_t *scsi_lun)
+{
+ int i;
+
+ memset(scsi_lun, 0, sizeof(lun));
+ for (i = 0; i < sizeof(lun); i += 2) {
+ scsi_lun[i] = (lun >> 8) & 0xFF;
+ scsi_lun[i+1] = lun & 0xFF;
+ lun = lun >> 16;
+ }
+}
+
+/*Subroutine used to populate AIO IUs. */
+void
+pqisrc_build_aio_common(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req,
+ rcb_t *rcb, uint32_t num_elem_alloted)
+{
+ DBG_FUNC("IN\n");
aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
aio_req->header.comp_feature = 0;
aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
@@ -271,80 +347,648 @@
aio_req->res1[1] = 0;
aio_req->nexus = rcb->ioaccel_handle;
aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
- aio_req->data_dir = rcb->data_dir;
- aio_req->mem_type = 0;
- aio_req->fence = 0;
- aio_req->res2 = 0;
- aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
- aio_req->cmd_prio = 0;
- aio_req->res3 = 0;
+ aio_req->cmd_flags.data_dir = rcb->data_dir;
+ aio_req->cmd_flags.mem_type = 0;
+ aio_req->cmd_flags.fence = 0;
+ aio_req->cmd_flags.res2 = 0;
+ aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->attr_prio.cmd_prio = 0;
+ aio_req->attr_prio.res3 = 0;
aio_req->err_idx = aio_req->req_id;
aio_req->cdb_len = rcb->cmdlen;
- if(rcb->cmdlen > sizeof(aio_req->cdb))
+ if (rcb->cmdlen > sizeof(aio_req->cdb))
rcb->cmdlen = sizeof(aio_req->cdb);
memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
+ memset(aio_req->res4, 0, sizeof(aio_req->res4));
+
+ uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
+ if (lun && (rcb->dvp->is_multi_lun)) {
+ int_to_scsilun(lun, aio_req->lun);
+ }
+ else {
+ memset(aio_req->lun, 0, sizeof(aio_req->lun));
+ }
+
+ /* handle encryption fields */
+ if (rcb->encrypt_enable == true) {
+ aio_req->cmd_flags.encrypt_enable = true;
+ aio_req->encrypt_key_index =
+ LE_16(rcb->enc_info.data_enc_key_index);
+ aio_req->encrypt_twk_low =
+ LE_32(rcb->enc_info.encrypt_tweak_lower);
+ aio_req->encrypt_twk_high =
+ LE_32(rcb->enc_info.encrypt_tweak_upper);
+ } else {
+ aio_req->cmd_flags.encrypt_enable = 0;
+ aio_req->encrypt_key_index = 0;
+ aio_req->encrypt_twk_high = 0;
+ aio_req->encrypt_twk_low = 0;
+ }
+ /* Frame SGL Descriptor */
+ aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
+ &aio_req->header, num_elem_alloted);
+
+ aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
+
+ /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
+
+ aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
+ sizeof(iu_header_t);
+ /* set completion and error handlers. */
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
+ rcb->resp_qid = aio_req->response_queue_id;
+ DBG_FUNC("OUT\n");
+
+}
+/*Subroutine used to show standard AIO IU fields */
+void
+pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_req_t *aio_req)
+{
+#ifdef DEBUG_AIO
+ DBG_INFO("AIO IU Content, tag# 0x%08x", rcb->tag);
+ DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type);
+ DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature);
+ DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length);
+ DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id);
+ DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id);
+ DBG_INFO("%15s: 0x%x\n", "nexus", aio_req->nexus);
+ DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len);
+ DBG_INFO("%15s:\n", "cmd_flags");
+ DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir);
+ DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial);
+ DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type);
+ DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence);
+ DBG_INFO("%15s: 0x%x\n", "encryption",
+ aio_req->cmd_flags.encrypt_enable);
+ DBG_INFO("%15s:\n", "attr_prio");
+ DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr);
+ DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio);
+ DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index);
+ DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low);
+ DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high);
+ pqisrc_show_cdb(softs, "AIOC", rcb, aio_req->cdb);
+ DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx);
+ DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg);
+ DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len);
#if 0
- DBG_IO("CDB : \n");
- for(int i = 0; i < rcb->cmdlen ; i++)
- DBG_IO(" 0x%x \n",aio_req->cdb[i]);
+ DBG_INFO("%15s: 0x%x\n", "lun", aio_req->lun);
+ DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
+ (void *)aio_req->sg_desc[0].addr);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
+ aio_req->sg_desc[0].len);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
+ aio_req->sg_desc[0].flags);
#endif
- memset(aio_req->lun,0,sizeof(aio_req->lun));
- memset(aio_req->res4,0,sizeof(aio_req->res4));
-
- if(rcb->encrypt_enable == true) {
- aio_req->encrypt_enable = true;
- aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
- aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
- aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
+#endif /* DEBUG_AIO */
+}
+
+/*Subroutine used to populate AIO RAID 1 write bypass IU. */
+void
+pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs,
+ pqi_aio_raid1_write_req_t *aio_req, rcb_t *rcb,
+ uint32_t num_elem_alloted)
+{
+ DBG_FUNC("IN\n");
+ if (!rcb->dvp) {
+ DBG_WARN("%s: DEBUG: dev ptr is null", __func__);
+ return;
+ }
+ if (!rcb->dvp->raid_map) {
+ DBG_WARN("%s: DEBUG: raid_map is null", __func__);
+ return;
+ }
+
+ aio_req->header.iu_type = PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST;
+ aio_req->header.comp_feature = 0;
+ aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
+ aio_req->work_area[0] = 0;
+ aio_req->work_area[1] = 0;
+ aio_req->req_id = rcb->tag;
+ aio_req->volume_id = (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
+ aio_req->nexus_1 = rcb->it_nexus[0];
+ aio_req->nexus_2 = rcb->it_nexus[1];
+ aio_req->nexus_3 = rcb->it_nexus[2];
+ aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
+ aio_req->cmd_flags.data_dir = rcb->data_dir;
+ aio_req->cmd_flags.mem_type = 0;
+ aio_req->cmd_flags.fence = 0;
+ aio_req->cmd_flags.res2 = 0;
+ aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->attr_prio.cmd_prio = 0;
+ aio_req->attr_prio.res3 = 0;
+ if(rcb->cmdlen > sizeof(aio_req->cdb))
+ rcb->cmdlen = sizeof(aio_req->cdb);
+ memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
+ aio_req->err_idx = aio_req->req_id;
+ aio_req->cdb_len = rcb->cmdlen;
+ aio_req->num_drives = LE_16(rcb->dvp->raid_map->layout_map_count);
+
+ /* handle encryption fields */
+ if (rcb->encrypt_enable == true) {
+ aio_req->cmd_flags.encrypt_enable = true;
+ aio_req->encrypt_key_index =
+ LE_16(rcb->enc_info.data_enc_key_index);
+ aio_req->encrypt_twk_low =
+ LE_32(rcb->enc_info.encrypt_tweak_lower);
+ aio_req->encrypt_twk_high =
+ LE_32(rcb->enc_info.encrypt_tweak_upper);
} else {
- aio_req->encrypt_enable = 0;
+ aio_req->cmd_flags.encrypt_enable = 0;
aio_req->encrypt_key_index = 0;
aio_req->encrypt_twk_high = 0;
aio_req->encrypt_twk_low = 0;
}
-
/* Frame SGL Descriptor */
- aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
- &aio_req->header, num_elem_alloted);
+ aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
+ &aio_req->header, num_elem_alloted);
aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
- DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
+ /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
- aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
+ aio_req->header.iu_length += offsetof(pqi_aio_raid1_write_req_t, sg_desc) -
sizeof(iu_header_t);
+
+ /* set completion and error handlers. */
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
+ rcb->resp_qid = aio_req->response_queue_id;
+ DBG_FUNC("OUT\n");
+
+}
+
+/*Subroutine used to show AIO RAID1 Write bypass IU fields */
+void
+pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_raid1_write_req_t *aio_req)
+{
+
+#ifdef DEBUG_AIO
+ DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x", rcb->tag);
+ DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type);
+ DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature);
+ DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length);
+ DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id);
+ DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id);
+ DBG_INFO("%15s: 0x%x\n", "volume_id", aio_req->volume_id);
+ DBG_INFO("%15s: 0x%x\n", "nexus_1", aio_req->nexus_1);
+ DBG_INFO("%15s: 0x%x\n", "nexus_2", aio_req->nexus_2);
+ DBG_INFO("%15s: 0x%x\n", "nexus_3", aio_req->nexus_3);
+ DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len);
+ DBG_INFO("%15s:\n", "cmd_flags");
+ DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir);
+ DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial);
+ DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type);
+ DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence);
+ DBG_INFO("%15s: 0x%x\n", "encryption",
+ aio_req->cmd_flags.encrypt_enable);
+ DBG_INFO("%15s:\n", "attr_prio");
+ DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr);
+ DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio);
+ DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index);
+ pqisrc_show_cdb(softs, "AIOR1W", rcb, aio_req->cdb);
+ DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx);
+ DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg);
+ DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len);
+ DBG_INFO("%15s: 0x%x\n", "num_drives", aio_req->num_drives);
+ DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low);
+ DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high);
#if 0
- DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
- DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
- DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
- DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus);
- DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
- DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
- DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
- DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
- DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
- DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
- DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
- DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
+ DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
+ (void *)aio_req->sg_desc[0].addr);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
+ aio_req->sg_desc[0].len);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
+ aio_req->sg_desc[0].flags);
#endif
+#endif /* DEBUG_AIO */
+}
+/*Subroutine used to populate AIO Raid5 or 6 write bypass IU */
+void
+pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *softs,
+ pqi_aio_raid5or6_write_req_t *aio_req, rcb_t *rcb,
+ uint32_t num_elem_alloted)
+{
+ DBG_FUNC("IN\n");
+ uint32_t index;
+ unsigned num_data_disks;
+ unsigned num_metadata_disks;
+ unsigned total_disks;
+ num_data_disks = LE_16(rcb->dvp->raid_map->data_disks_per_row);
+ num_metadata_disks = LE_16(rcb->dvp->raid_map->metadata_disks_per_row);
+ total_disks = num_data_disks + num_metadata_disks;
+
+ index = PQISRC_DIV_ROUND_UP(rcb->raid_map_index + 1, total_disks);
+ index *= total_disks;
+ index -= num_metadata_disks;
+
+ switch (rcb->dvp->raid_level) {
+ case SA_RAID_5:
+ aio_req->header.iu_type =
+ PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST;
+ break;
+ case SA_RAID_6:
+ aio_req->header.iu_type =
+ PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST;
+ break;
+ default:
+ DBG_ERR("WRONG RAID TYPE FOR FUNCTION\n");
+ }
+ aio_req->header.comp_feature = 0;
+ aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
+ aio_req->work_area[0] = 0;
+ aio_req->work_area[1] = 0;
+ aio_req->req_id = rcb->tag;
+ aio_req->volume_id = (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
+ aio_req->data_it_nexus = rcb->dvp->raid_map->dev_data[rcb->raid_map_index].ioaccel_handle;
+ aio_req->p_parity_it_nexus =
+ rcb->dvp->raid_map->dev_data[index].ioaccel_handle;
+ if (aio_req->header.iu_type ==
+ PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST) {
+ aio_req->q_parity_it_nexus =
+ rcb->dvp->raid_map->dev_data[index + 1].ioaccel_handle;
+ }
+ aio_req->xor_multiplier =
+ rcb->dvp->raid_map->dev_data[rcb->raid_map_index].xor_mult[1];
+ aio_req->row = rcb->row_num;
+ /*aio_req->reserved = rcb->row_num * rcb->blocks_per_row +
+ rcb->dvp->raid_map->disk_starting_blk;*/
+ aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
+ aio_req->cmd_flags.data_dir = rcb->data_dir;
+ aio_req->cmd_flags.mem_type = 0;
+ aio_req->cmd_flags.fence = 0;
+ aio_req->cmd_flags.res2 = 0;
+ aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->attr_prio.cmd_prio = 0;
+ aio_req->attr_prio.res3 = 0;
+ if (rcb->cmdlen > sizeof(aio_req->cdb))
+ rcb->cmdlen = sizeof(aio_req->cdb);
+ memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
+ aio_req->err_idx = aio_req->req_id;
+ aio_req->cdb_len = rcb->cmdlen;
+#if 0
+ /* Stubbed out for later */
+ aio_req->header.iu_type = iu_type;
+ aio_req->data_it_nexus = ;
+ aio_req->p_parity_it_nexus = ;
+ aio_req->q_parity_it_nexus = ;
+ aio_req->row = ;
+ aio_req->stripe_lba = ;
+#endif
+ /* handle encryption fields */
+ if (rcb->encrypt_enable == true) {
+ aio_req->cmd_flags.encrypt_enable = true;
+ aio_req->encrypt_key_index =
+ LE_16(rcb->enc_info.data_enc_key_index);
+ aio_req->encrypt_twk_low =
+ LE_32(rcb->enc_info.encrypt_tweak_lower);
+ aio_req->encrypt_twk_high =
+ LE_32(rcb->enc_info.encrypt_tweak_upper);
+ } else {
+ aio_req->cmd_flags.encrypt_enable = 0;
+ aio_req->encrypt_key_index = 0;
+ aio_req->encrypt_twk_high = 0;
+ aio_req->encrypt_twk_low = 0;
+ }
+ /* Frame SGL Descriptor */
+ aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
+ &aio_req->header, num_elem_alloted);
+
+ aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
+
+ /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
+
+ aio_req->header.iu_length += offsetof(pqi_aio_raid5or6_write_req_t, sg_desc) -
+ sizeof(iu_header_t);
+ /* set completion and error handlers. */
rcb->success_cmp_callback = pqisrc_process_io_response_success;
rcb->error_cmp_callback = pqisrc_process_aio_response_error;
rcb->resp_qid = aio_req->response_queue_id;
+ DBG_FUNC("OUT\n");
+
+}
+
+/*Subroutine used to show AIO RAID5/6 Write bypass IU fields */
+void
+pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_raid5or6_write_req_t *aio_req)
+{
+#ifdef DEBUG_AIO
+ DBG_INFO("AIO RAID5or6 Write IU Content, tag# 0x%08x\n", rcb->tag);
+ DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type);
+ DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature);
+ DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length);
+ DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id);
+ DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id);
+ DBG_INFO("%15s: 0x%x\n", "volume_id", aio_req->volume_id);
+ DBG_INFO("%15s: 0x%x\n", "data_it_nexus",
+ aio_req->data_it_nexus);
+ DBG_INFO("%15s: 0x%x\n", "p_parity_it_nexus",
+ aio_req->p_parity_it_nexus);
+ DBG_INFO("%15s: 0x%x\n", "q_parity_it_nexus",
+ aio_req->q_parity_it_nexus);
+ DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len);
+ DBG_INFO("%15s:\n", "cmd_flags");
+ DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir);
+ DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial);
+ DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type);
+ DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence);
+ DBG_INFO("%15s: 0x%x\n", "encryption",
+ aio_req->cmd_flags.encrypt_enable);
+ DBG_INFO("%15s:\n", "attr_prio");
+ DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr);
+ DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio);
+ DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index);
+ pqisrc_show_cdb(softs, "AIOR56W", rcb, aio_req->cdb);
+ DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx);
+ DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg);
+ DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len);
+ DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low);
+ DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high);
+ DBG_INFO("%15s: 0x%lx\n", "row", aio_req->row);
+#if 0
+ DBG_INFO("%15s: 0x%lx\n", "stripe_lba", aio_req->stripe_lba);
+ DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
+ (void *)aio_req->sg_desc[0].addr);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
+ aio_req->sg_desc[0].len);
+ DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
+ aio_req->sg_desc[0].flags);
+#endif
+#endif /* DEBUG_AIO */
+}
- DBG_FUNC(" OUT ");
+/* Is the cdb a read command? */
+boolean_t
+pqisrc_cdb_is_read(uint8_t *cdb)
+{
+ if (cdb[0] == SCMD_READ_6 || cdb[0] == SCMD_READ_10 ||
+ cdb[0] == SCMD_READ_12 || cdb[0] == SCMD_READ_16)
+ return true;
+ return false;
+}
+
+/* Is the cdb a write command? */
+boolean_t
+pqisrc_cdb_is_write(uint8_t *cdb)
+{
+ if (cdb == NULL)
+ return false;
+ if (cdb[0] == SCMD_WRITE_6 || cdb[0] == SCMD_WRITE_10 ||
+ cdb[0] == SCMD_WRITE_12 || cdb[0] == SCMD_WRITE_16)
+ return true;
+ return false;
}
-/*Function used to build and send RAID/AIO */
+/*Subroutine used to show the AIO request */
+void
+pqisrc_show_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
+{
+ boolean_t is_write;
+ DBG_FUNC("IN\n");
+
+ is_write = pqisrc_cdb_is_write(rcb->cdbp);
+
+ if (!is_write) {
+ pqisrc_show_aio_common(softs, rcb, aio_req);
+ goto out;
+ }
+
+ switch (rcb->dvp->raid_level) {
+ case SA_RAID_0:
+ pqisrc_show_aio_common(softs, rcb, aio_req);
+ break;
+ case SA_RAID_1:
+ case SA_RAID_ADM:
+ pqisrc_show_aio_R1_write(softs, rcb,
+ (pqi_aio_raid1_write_req_t *)aio_req);
+ break;
+ case SA_RAID_5:
+ case SA_RAID_6:
+ pqisrc_show_aio_R5or6_write(softs, rcb,
+ (pqi_aio_raid5or6_write_req_t *)aio_req);
+ break;
+ }
+
+out:
+ DBG_FUNC("OUT\n");
+
+}
+
+
+void
+pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
+{
+ boolean_t is_write;
+ DBG_FUNC("IN\n");
+
+ is_write = pqisrc_cdb_is_write(rcb->cdbp);
+
+ if (is_write) {
+ switch (rcb->dvp->raid_level) {
+ case SA_RAID_0:
+ pqisrc_build_aio_common(softs, aio_req,
+ rcb, num_elem_alloted);
+ break;
+ case SA_RAID_1:
+ case SA_RAID_ADM:
+ pqisrc_build_aio_R1_write(softs,
+ (pqi_aio_raid1_write_req_t *)aio_req,
+ rcb, num_elem_alloted);
+
+ break;
+ case SA_RAID_5:
+ case SA_RAID_6:
+ pqisrc_build_aio_R5or6_write(softs,
+ (pqi_aio_raid5or6_write_req_t *)aio_req,
+ rcb, num_elem_alloted);
+ break;
+ }
+ } else {
+ pqisrc_build_aio_common(softs, aio_req, rcb, num_elem_alloted);
+ }
+
+ pqisrc_show_aio_io(softs, rcb, aio_req, num_elem_alloted);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Return true from this function to prevent AIO from handling this request.
+ * True is returned if the request is determined to be part of a stream, or
+ * if the controller does not handle AIO at the appropriate RAID level.
+ */
+static boolean_t
+pqisrc_is_parity_write_stream(pqisrc_softstate_t *softs, rcb_t *rcb)
+{
+ os_ticks_t oldest_ticks;
+ uint8_t lru_index;
+ int i;
+ int rc;
+ pqi_scsi_dev_t *device;
+ struct pqi_stream_data *pqi_stream_data;
+ aio_req_locator_t loc;
+
+ DBG_FUNC("IN\n");
+
+ rc = fill_lba_for_scsi_rw(softs, rcb->cdbp , &loc);
+ if (rc != PQI_STATUS_SUCCESS) {
+ return false;
+ }
+
+ /* check writes only */
+ if (!pqisrc_cdb_is_write(rcb->cdbp)) {
+ return false;
+ }
+
+ if (!softs->enable_stream_detection) {
+ return false;
+ }
+
+ device = rcb->dvp;
+ if (!device) {
+ return false;
+ }
+
+ /*
+ * check for R5/R6 streams.
+ */
+ if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) {
+ return false;
+ }
+
+ /*
+ * If controller does not support AIO R{5,6} writes, need to send
+ * requests down non-aio path.
+ */
+ if ((device->raid_level == SA_RAID_5 && !softs->aio_raid5_write_bypass) ||
+ (device->raid_level == SA_RAID_6 && !softs->aio_raid6_write_bypass)) {
+ return true;
+ }
+
+ lru_index = 0;
+ oldest_ticks = INT_MAX;
+ for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+ pqi_stream_data = &device->stream_data[i];
+ /*
+ * check for adjacent request or request is within
+ * the previous request.
+ */
+ if ((pqi_stream_data->next_lba &&
+ loc.block.first >= pqi_stream_data->next_lba) &&
+ loc.block.first <= pqi_stream_data->next_lba +
+ loc.block.cnt) {
+ pqi_stream_data->next_lba = loc.block.first +
+ loc.block.cnt;
+ pqi_stream_data->last_accessed = TICKS;
+ return true;
+ }
+
+ /* unused entry */
+ if (pqi_stream_data->last_accessed == 0) {
+ lru_index = i;
+ break;
+ }
+
+ /* Find entry with oldest last accessed time */
+ if (pqi_stream_data->last_accessed <= oldest_ticks) {
+ oldest_ticks = pqi_stream_data->last_accessed;
+ lru_index = i;
+ }
+ }
+
+ /*
+ * Set LRU entry
+ */
+ pqi_stream_data = &device->stream_data[lru_index];
+ pqi_stream_data->last_accessed = TICKS;
+ pqi_stream_data->next_lba = loc.block.first + loc.block.cnt;
+
+ DBG_FUNC("OUT\n");
+
+ return false;
+}
+
+/**
+ Determine if a request is eligible for AIO. Build/map
+ the request if using AIO path to a RAID volume.
+
+ return the path that should be used for this request
+*/
+static IO_PATH_T
+determine_io_path_build_bypass(pqisrc_softstate_t *softs,rcb_t *rcb)
+{
+ IO_PATH_T io_path = AIO_PATH;
+ pqi_scsi_dev_t *devp = rcb->dvp;
+ int ret = PQI_STATUS_FAILURE;
+
+ /* Default to using the host CDB directly (will be used if targeting RAID
+ path or HBA mode */
+ rcb->cdbp = OS_GET_CDBP(rcb);
+
+ if(!rcb->aio_retry) {
+
+ /** IO for Physical Drive, Send in AIO PATH **/
+ if(IS_AIO_PATH(devp)) {
+ rcb->ioaccel_handle = devp->ioaccel_handle;
+ return io_path;
+ }
+
+ /** IO for RAID Volume, ByPass IO, Send in AIO PATH unless part of stream **/
+ if (devp->offload_enabled && !pqisrc_is_parity_write_stream(softs, rcb)) {
+ ret = pqisrc_build_scsi_cmd_raidbypass(softs, devp, rcb);
+ }
+
+ if (PQI_STATUS_FAILURE == ret) {
+ io_path = RAID_PATH;
+ } else {
+ ASSERT(rcb->cdbp == rcb->bypass_cdb);
+ }
+ } else {
+ /* Retrying failed AIO IO */
+ io_path = RAID_PATH;
+ }
+
+ return io_path;
+}
+
+uint8_t
+pqisrc_get_aio_data_direction(rcb_t *rcb)
+{
+ switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN: return SOP_DATA_DIR_FROM_DEVICE;
+ case CAM_DIR_OUT: return SOP_DATA_DIR_TO_DEVICE;
+ case CAM_DIR_NONE: return SOP_DATA_DIR_NONE;
+ default: return SOP_DATA_DIR_UNKNOWN;
+ }
+}
+
+uint8_t
+pqisrc_get_raid_data_direction(rcb_t *rcb)
+{
+ switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN: return SOP_DATA_DIR_TO_DEVICE;
+ case CAM_DIR_OUT: return SOP_DATA_DIR_FROM_DEVICE;
+ case CAM_DIR_NONE: return SOP_DATA_DIR_NONE;
+ default: return SOP_DATA_DIR_UNKNOWN;
+ }
+}
+
+/* Function used to build and send RAID/AIO */
int
pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
{
ib_queue_t *ib_q_array = softs->op_aio_ib_q;
ib_queue_t *ib_q = NULL;
char *ib_iu = NULL;
- IO_PATH_T io_path = AIO_PATH;
+ IO_PATH_T io_path;
uint32_t TraverseCount = 0;
int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
int qindex = first_qindex;
@@ -352,43 +996,32 @@
uint32_t num_elem_needed;
uint32_t num_elem_alloted = 0;
pqi_scsi_dev_t *devp = rcb->dvp;
- uint8_t raidbypass_cdb[16];
+ boolean_t is_write;
- DBG_FUNC(" IN ");
+ DBG_FUNC("IN\n");
- if(!rcb->aio_retry) {
- rcb->cdbp = OS_GET_CDBP(rcb);
- if(IS_AIO_PATH(devp)) {
- /** IO for Physical Drive **/
- /** Send in AIO PATH**/
- rcb->ioaccel_handle = devp->ioaccel_handle;
- } else {
- int ret = PQI_STATUS_FAILURE;
- /** IO for RAID Volume **/
- if (devp->offload_enabled) {
- /** ByPass IO ,Send in AIO PATH **/
- ret = pqisrc_send_scsi_cmd_raidbypass(softs,
- devp, rcb, raidbypass_cdb);
- }
- if (PQI_STATUS_FAILURE == ret) {
- /** Send in RAID PATH **/
- io_path = RAID_PATH;
- num_op_ib_q = softs->num_op_raid_ibq;
- ib_q_array = softs->op_raid_ib_q;
- } else {
- rcb->cdbp = raidbypass_cdb;
- }
- }
- } else {
- /* Retrying failed AIO IO */
- io_path = RAID_PATH;
- rcb->cdbp = OS_GET_CDBP(rcb);
+ /* Note: this will determine if the request is eligble for AIO */
+ io_path = determine_io_path_build_bypass(softs, rcb);
+
+ if (io_path == RAID_PATH)
+ {
+ /* Update direction for RAID path */
+ rcb->data_dir = pqisrc_get_raid_data_direction(rcb);
num_op_ib_q = softs->num_op_raid_ibq;
ib_q_array = softs->op_raid_ib_q;
}
+ else {
+ rcb->data_dir = pqisrc_get_aio_data_direction(rcb);
+ if (rcb->data_dir == SOP_DATA_DIR_UNKNOWN) {
+ DBG_ERR("Unknown Direction\n");
+ }
+ }
- num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
- DBG_IO("num_elem_needed :%d",num_elem_needed);
+ is_write = pqisrc_cdb_is_write(rcb->cdbp);
+ /* coverity[unchecked_value] */
+ num_elem_needed = pqisrc_num_elem_needed(softs,
+ OS_GET_IO_SG_COUNT(rcb), devp, is_write, io_path);
+ DBG_IO("num_elem_needed :%u",num_elem_needed);
do {
uint32_t num_elem_available;
@@ -397,7 +1030,7 @@
num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
*(ib_q->ci_virt_addr), ib_q->num_elem);
- DBG_IO("num_elem_avialable :%d\n",num_elem_available);
+ DBG_IO("num_elem_avialable :%u\n",num_elem_available);
if(num_elem_available >= num_elem_needed) {
num_elem_alloted = num_elem_needed;
break;
@@ -414,7 +1047,7 @@
}
}while(TraverseCount < 2);
- DBG_IO("num_elem_alloted :%d",num_elem_alloted);
+ DBG_IO("num_elem_alloted :%u",num_elem_alloted);
if (num_elem_alloted == 0) {
DBG_WARN("OUT: IB Queues were full\n");
return PQI_STATUS_QFULL;
@@ -426,12 +1059,12 @@
ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
if(io_path == AIO_PATH) {
- /** Build AIO structure **/
- pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
- num_elem_alloted);
+ /* Fill in the AIO IU per request and raid type */
+ pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t *)ib_iu,
+ num_elem_alloted);
} else {
/** Build RAID structure **/
- pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
+ pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t *)ib_iu,
num_elem_alloted);
}
@@ -439,81 +1072,595 @@
rcb->req_q = ib_q;
rcb->path = io_path;
+ pqisrc_increment_io_counters(softs, rcb);
+
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
- DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
- DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
+ DBG_IO("ib_q->pi_local : %x\n", ib_q->pi_local);
+ DBG_IO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
- PQI_UNLOCK(&ib_q->lock);
- DBG_FUNC(" OUT ");
- return PQI_STATUS_SUCCESS;
+ PQI_UNLOCK(&ib_q->lock);
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+/* Subroutine used to set encryption info as part of RAID bypass IO*/
+static inline void
+pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
+ struct raid_map *raid_map, uint64_t first_block)
+{
+ uint32_t volume_blk_size;
+
+ /*
+ * Set the encryption tweak values based on logical block address.
+ * If the block size is 512, the tweak value is equal to the LBA.
+ * For other block sizes, tweak value is (LBA * block size) / 512.
+ */
+ volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
+ if (volume_blk_size != 512)
+ first_block = (first_block * volume_blk_size) / 512;
+
+ enc_info->data_enc_key_index =
+ GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
+ enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
+ enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
+}
+
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define HPSA_RAID_MAX HPSA_RAID_ADM
+#define HPSA_RAID_UNKNOWN 0xff
+
+/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
+static int
+fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l)
+{
+
+ if (!l) {
+ DBG_INFO("No locator ptr: AIO ineligible");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (cdb == NULL)
+ return PQI_STATUS_FAILURE;
+
+ switch (cdb[0]) {
+ case SCMD_WRITE_6:
+ l->is_write = true;
+ /* coverity[fallthrough] */
+ case SCMD_READ_6:
+ l->block.first = (uint64_t)(((cdb[1] & 0x1F) << 16) |
+ (cdb[2] << 8) | cdb[3]);
+ l->block.cnt = (uint32_t)cdb[4];
+ if (l->block.cnt == 0)
+ l->block.cnt = 256; /*blkcnt 0 means 256 */
+ break;
+ case SCMD_WRITE_10:
+ l->is_write = true;
+ /* coverity[fallthrough] */
+ case SCMD_READ_10:
+ l->block.first = (uint64_t)GET_BE32(&cdb[2]);
+ l->block.cnt = (uint32_t)GET_BE16(&cdb[7]);
+ break;
+ case SCMD_WRITE_12:
+ l->is_write = true;
+ /* coverity[fallthrough] */
+ case SCMD_READ_12:
+ l->block.first = (uint64_t)GET_BE32(&cdb[2]);
+ l->block.cnt = GET_BE32(&cdb[6]);
+ break;
+ case SCMD_WRITE_16:
+ l->is_write = true;
+ /* coverity[fallthrough] */
+ case SCMD_READ_16:
+ l->block.first = GET_BE64(&cdb[2]);
+ l->block.cnt = GET_BE32(&cdb[10]);
+ break;
+ default:
+ /* Process via normal I/O path. */
+ DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible");
+ return PQI_STATUS_FAILURE;
+ }
+ return PQI_STATUS_SUCCESS;
+}
+
+
+/* determine whether writes to certain types of RAID are supported. */
+inline boolean_t
+pqisrc_is_supported_write(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+
+ DBG_FUNC("IN\n");
+
+ /* Raid0 was always supported */
+ if (device->raid_level == SA_RAID_0)
+ return true;
+
+ /* module params for individual adv. aio write features may be on,
+ * which affects ALL controllers, but some controllers
+ * do not support adv. aio write.
+ */
+ if (!softs->adv_aio_capable)
+ return false;
+
+ /* if the raid write bypass feature is turned on,
+ * then the write is supported.
+ */
+ switch (device->raid_level) {
+ case SA_RAID_1:
+ case SA_RAID_ADM:
+ if (softs->aio_raid1_write_bypass)
+ return true;
+ break;
+ case SA_RAID_5:
+ if (softs->aio_raid5_write_bypass)
+ return true;
+ break;
+ case SA_RAID_6:
+ if (softs->aio_raid6_write_bypass)
+ return true;
+ }
+
+ /* otherwise, it must be an unsupported write. */
+ DBG_IO("AIO ineligible: write not supported for raid type\n");
+ DBG_FUNC("OUT\n");
+ return false;
+
+}
+
+/* check for zero-byte transfers, invalid blocks, and wraparound */
+static inline boolean_t
+pqisrc_is_invalid_block(pqisrc_softstate_t *softs, aio_req_locator_t *l)
+{
+ DBG_FUNC("IN\n");
+
+ if (l->block.cnt == 0) {
+ DBG_AIO("AIO ineligible: blk_cnt=0\n");
+ DBG_FUNC("OUT\n");
+ return true;
+ }
+
+ if (l->block.last < l->block.first ||
+ l->block.last >=
+ GET_LE64((uint8_t *)&l->raid_map->volume_blk_cnt)) {
+ DBG_AIO("AIO ineligible: last block < first\n");
+ DBG_FUNC("OUT\n");
+ return true;
+ }
+
+ DBG_FUNC("OUT\n");
+ return false;
+}
+
+/* Compute various attributes of request's location */
+static inline boolean_t
+pqisrc_calc_disk_params(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb)
+{
+ DBG_FUNC("IN\n");
+
+ /* grab #disks, strip size, and layout map count from raid map */
+ l->row.data_disks =
+ GET_LE16((uint8_t *)&l->raid_map->data_disks_per_row);
+ l->strip_sz =
+ GET_LE16((uint8_t *)(&l->raid_map->strip_size));
+ l->map.layout_map_count =
+ GET_LE16((uint8_t *)(&l->raid_map->layout_map_count));
+
+ /* Calculate stripe information for the request. */
+ l->row.blks_per_row = l->row.data_disks * l->strip_sz;
+ if (!l->row.blks_per_row || !l->strip_sz) {
+ DBG_AIO("AIO ineligible\n");
+ DBG_FUNC("OUT\n");
+ return false;
+ }
+ /* use __udivdi3 ? */
+ rcb->blocks_per_row = l->row.blks_per_row;
+ l->row.first = l->block.first / l->row.blks_per_row;
+ rcb->row_num = l->row.first;
+ l->row.last = l->block.last / l->row.blks_per_row;
+ l->row.offset_first = (uint32_t)(l->block.first -
+ (l->row.first * l->row.blks_per_row));
+ l->row.offset_last = (uint32_t)(l->block.last -
+ (l->row.last * l->row.blks_per_row));
+ l->col.first = l->row.offset_first / l->strip_sz;
+ l->col.last = l->row.offset_last / l->strip_sz;
+
+ DBG_FUNC("OUT\n");
+ return true;
+}
+
+/* Not AIO-eligible if it isnt' a single row/column. */
+static inline boolean_t
+pqisrc_is_single_row_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
+{
+ boolean_t ret = true;
+ DBG_FUNC("IN\n");
+
+ if (l->row.first != l->row.last || l->col.first != l->col.last) {
+ DBG_AIO("AIO ineligible\n");
+ ret = false;
+ }
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* figure out disks/row, row, and map index. */
+static inline boolean_t
+pqisrc_set_map_row_and_idx(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb)
+{
+ if (!l->row.data_disks) {
+ DBG_INFO("AIO ineligible: no data disks?\n");
+ return false;
+ }
+
+ l->row.total_disks = l->row.data_disks +
+ LE_16(l->raid_map->metadata_disks_per_row);
+
+ l->map.row = ((uint32_t)(l->row.first >>
+ l->raid_map->parity_rotation_shift)) %
+ GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
+
+ l->map.idx = (l->map.row * l->row.total_disks) + l->col.first;
+ rcb->raid_map_index = l->map.idx;
+ rcb->raid_map_row = l->map.row;
+
+ return true;
+}
+
+/* set the mirror for a raid 1/10/ADM */
+static inline void
+pqisrc_set_read_mirror(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, aio_req_locator_t *l)
+{
+ /* Avoid direct use of device->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of device->layout_map_count -1.
+ */
+
+ int mirror = device->offload_to_mirror[l->map.idx];
+ int next_mirror = mirror + 1;
+
+ if (next_mirror >= l->map.layout_map_count)
+ next_mirror = 0;
+
+ device->offload_to_mirror[l->map.idx] = next_mirror;
+ l->map.idx += mirror * l->row.data_disks;
+}
+
+/* collect ioaccel handles for mirrors of given location. */
+static inline boolean_t
+pqisrc_set_write_mirrors(
+ pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device,
+ aio_req_locator_t *l,
+ rcb_t *rcb)
+{
+ uint32_t mirror = 0;
+ uint32_t index;
+
+ if (l->map.layout_map_count > PQISRC_MAX_SUPPORTED_MIRRORS)
+ return false;
+
+ do {
+ index = l->map.idx + (l->row.data_disks * mirror);
+ rcb->it_nexus[mirror] =
+ l->raid_map->dev_data[index].ioaccel_handle;
+ mirror++;
+ } while (mirror != l->map.layout_map_count);
+
+ return true;
+}
+
+/* Make sure first and last block are in the same R5/R6 RAID group. */
+static inline boolean_t
+pqisrc_is_r5or6_single_group(pqisrc_softstate_t *softs, aio_req_locator_t *l)
+{
+ boolean_t ret = true;
+
+ DBG_FUNC("IN\n");
+ l->r5or6.row.blks_per_row = l->strip_sz * l->row.data_disks;
+ l->stripesz = l->r5or6.row.blks_per_row * l->map.layout_map_count;
+ l->group.first = (l->block.first % l->stripesz) /
+ l->r5or6.row.blks_per_row;
+ l->group.last = (l->block.last % l->stripesz) /
+ l->r5or6.row.blks_per_row;
+
+ if (l->group.first != l->group.last) {
+ DBG_AIO("AIO ineligible");
+ ret = false;
+ }
+
+ DBG_FUNC("OUT\n");
+ ASSERT(ret == true);
+ return ret;
+}
+/* Make sure R5 or R6 request doesn't span rows. */
+static inline boolean_t
+pqisrc_is_r5or6_single_row(pqisrc_softstate_t *softs, aio_req_locator_t *l)
+{
+ boolean_t ret = true;
+
+ DBG_FUNC("IN\n");
+
+ /* figure row nums containing first & last block */
+ l->row.first = l->r5or6.row.first =
+ l->block.first / l->stripesz;
+ l->r5or6.row.last = l->block.last / l->stripesz;
+
+ if (l->r5or6.row.first != l->r5or6.row.last) {
+ DBG_AIO("AIO ineligible");
+ ret = false;
+ }
+
+ DBG_FUNC("OUT\n");
+ ASSERT(ret == true);
+ return ret;
+}
+
+/* Make sure R5 or R6 request doesn't span columns. */
+static inline boolean_t
+pqisrc_is_r5or6_single_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
+{
+ boolean_t ret = true;
+
+ /* Find the columns of the first and last block */
+ l->row.offset_first = l->r5or6.row.offset_first =
+ (uint32_t)((l->block.first % l->stripesz) %
+ l->r5or6.row.blks_per_row);
+ l->r5or6.row.offset_last =
+ (uint32_t)((l->block.last % l->stripesz) %
+ l->r5or6.row.blks_per_row);
+
+ l->col.first = l->r5or6.row.offset_first / l->strip_sz;
+ l->r5or6.col.first = l->col.first;
+ l->r5or6.col.last = l->r5or6.row.offset_last / l->strip_sz;
+
+ if (l->r5or6.col.first != l->r5or6.col.last) {
+ DBG_AIO("AIO ineligible");
+ ret = false;
+ }
+
+ ASSERT(ret == true);
+ return ret;
+}
+
+
+/* Set the map row and index for a R5 or R6 AIO request */
+static inline void
+pqisrc_set_r5or6_row_and_index(aio_req_locator_t *l,
+ rcb_t *rcb)
+{
+ l->map.row = ((uint32_t)
+ (l->row.first >> l->raid_map->parity_rotation_shift)) %
+ GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
+
+ l->map.idx = (l->group.first *
+ (GET_LE16((uint8_t *)(&l->raid_map->row_cnt))
+ * l->row.total_disks))
+ + (l->map.row * l->row.total_disks)
+ + l->col.first;
+
+ rcb->raid_map_index = l->map.idx;
+ rcb->raid_map_row = l->map.row;
+}
+
+/* calculate physical disk block for aio request */
+static inline boolean_t
+pqisrc_calc_aio_block(aio_req_locator_t *l)
+{
+ boolean_t ret = true;
+
+ l->block.disk_block =
+ GET_LE64((uint8_t *) (&l->raid_map->disk_starting_blk))
+ + (l->row.first * l->strip_sz)
+ + ((uint64_t)(l->row.offset_first) - (uint64_t)(l->col.first) * l->strip_sz);
+
+ /* any values we should be checking here? if not convert to void */
+ return ret;
+}
+
+/* Handle differing logical/physical block sizes. */
+static inline uint32_t
+pqisrc_handle_blk_size_diffs(aio_req_locator_t *l)
+{
+ uint32_t disk_blk_cnt;
+ disk_blk_cnt = l->block.cnt;
+
+ if (l->raid_map->phys_blk_shift) {
+ l->block.disk_block <<= l->raid_map->phys_blk_shift;
+ disk_blk_cnt <<= l->raid_map->phys_blk_shift;
+ }
+ return disk_blk_cnt;
+}
+
+/* Make sure AIO request doesn't exceed the max that AIO device can
+ * handle based on dev type, Raid level, and encryption status.
+ * TODO: make limits dynamic when this becomes possible.
+ */
+inline boolean_t
+pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, rcb_t *rcb,
+ aio_req_locator_t *l, uint32_t disk_blk_cnt)
+{
+ boolean_t ret = false;
+ uint32_t dev_max;
+ uint32_t size = disk_blk_cnt * device->raid_map->volume_blk_size;
+ dev_max = size;
+
+ /* filter for nvme crypto */
+ if (device->is_nvme && rcb->encrypt_enable) {
+ if (softs->max_aio_rw_xfer_crypto_nvme != 0) {
+ dev_max = MIN(dev_max,softs->max_aio_rw_xfer_crypto_nvme);
+ }
+ }
+
+ /* filter for RAID 5/6/50/60 */
+ if (!device->is_physical_device &&
+ (device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_51 ||
+ device->raid_level == SA_RAID_6)) {
+ if (softs->max_aio_write_raid5_6 != 0) {
+ dev_max = MIN(dev_max,softs->max_aio_write_raid5_6);
+ }
+ }
+
+ /* filter for RAID ADM */
+ if (!device->is_physical_device &&
+ (device->raid_level == SA_RAID_ADM) &&
+ (softs->max_aio_write_raid1_10_3drv != 0)) {
+ dev_max = MIN(dev_max,
+ softs->max_aio_write_raid1_10_3drv);
+ }
+
+ /* filter for RAID 1/10 */
+ if (!device->is_physical_device &&
+ (device->raid_level == SA_RAID_1) &&
+ (softs->max_aio_write_raid1_10_2drv != 0)) {
+ dev_max = MIN(dev_max,
+ softs->max_aio_write_raid1_10_2drv);
+ }
+
+
+ if (size > dev_max) {
+ DBG_AIO("AIO ineligible: size=%u, max=%u", size, dev_max);
+ ret = true;
+ }
+
+ return ret;
}
-/* Subroutine used to set encryption info as part of RAID bypass IO*/
+
+#ifdef DEBUG_RAID_MAP
static inline void
-pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
- struct raid_map *raid_map, uint64_t first_block)
+pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m)
{
- uint32_t volume_blk_size;
+ int i;
- /*
- * Set the encryption tweak values based on logical block address.
- * If the block size is 512, the tweak value is equal to the LBA.
- * For other block sizes, tweak value is (LBA * block size) / 512.
- */
- volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
- if (volume_blk_size != 512)
- first_block = (first_block * volume_blk_size) / 512;
+ if (!m) {
+ DBG_WARN("No RAID MAP!\n");
+ return;
+ }
+ DBG_INFO("======= Raid Map ================\n");
+ DBG_INFO("%-25s: 0x%x\n", "StructureSize", m->structure_size);
+ DBG_INFO("%-25s: 0x%x\n", "LogicalBlockSize", m->volume_blk_size);
+ DBG_INFO("%-25s: 0x%lx\n", "LogicalBlockCount", m->volume_blk_cnt);
+ DBG_INFO("%-25s: 0x%x\n", "PhysicalBlockShift", m->phys_blk_shift);
+ DBG_INFO("%-25s: 0x%x\n", "ParityRotationShift",
+ m->parity_rotation_shift);
+ DBG_INFO("%-25s: 0x%x\n", "StripSize", m->strip_size);
+ DBG_INFO("%-25s: 0x%lx\n", "DiskStartingBlock", m->disk_starting_blk);
+ DBG_INFO("%-25s: 0x%lx\n", "DiskBlockCount", m->disk_blk_cnt);
+ DBG_INFO("%-25s: 0x%x\n", "DataDisksPerRow", m->data_disks_per_row);
+ DBG_INFO("%-25s: 0x%x\n", "MetdataDisksPerRow",
+ m->metadata_disks_per_row);
+ DBG_INFO("%-25s: 0x%x\n", "RowCount", m->row_cnt);
+ DBG_INFO("%-25s: 0x%x\n", "LayoutMapCnt", m->layout_map_count);
+ DBG_INFO("%-25s: 0x%x\n", "fEncryption", m->flags);
+ DBG_INFO("%-25s: 0x%x\n", "DEK", m->data_encryption_key_index);
+ for (i = 0; i < RAID_MAP_MAX_ENTRIES; i++) {
+ if (m->dev_data[i].ioaccel_handle == 0)
+ break;
+ DBG_INFO("%-25s: %d: 0x%04x\n", "ioaccel_handle, disk",
+ i, m->dev_data[i].ioaccel_handle);
+ }
+}
+#endif /* DEBUG_RAID_MAP */
- enc_info->data_enc_key_index =
- GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
- enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
- enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
+static inline void
+pqisrc_aio_show_locator_info(pqisrc_softstate_t *softs,
+ aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t *rcb)
+{
+#ifdef DEBUG_AIO_LOCATOR
+ pqisrc_aio_show_raid_map(softs, l->raid_map);
+
+ DBG_INFO("======= AIO Locator Content, tag#0x%08x =====\n", rcb->tag);
+ DBG_INFO("%-25s: 0x%lx\n", "block.first", l->block.first);
+ DBG_INFO("%-25s: 0x%lx\n", "block.last", l->block.last);
+ DBG_INFO("%-25s: 0x%x\n", "block.cnt", l->block.cnt);
+ DBG_INFO("%-25s: 0x%lx\n", "block.disk_block", l->block.disk_block);
+ DBG_INFO("%-25s: 0x%x\n", "row.blks_per_row", l->row.blks_per_row);
+ DBG_INFO("%-25s: 0x%lx\n", "row.first", l->row.first);
+ DBG_INFO("%-25s: 0x%lx\n", "row.last", l->row.last);
+ DBG_INFO("%-25s: 0x%x\n", "row.offset_first", l->row.offset_first);
+ DBG_INFO("%-25s: 0x%x\n", "row.offset_last", l->row.offset_last);
+ DBG_INFO("%-25s: 0x%x\n", "row.data_disks", l->row.data_disks);
+ DBG_INFO("%-25s: 0x%x\n", "row.total_disks", l->row.total_disks);
+ DBG_INFO("%-25s: 0x%x\n", "col.first", l->col.first);
+ DBG_INFO("%-25s: 0x%x\n", "col.last", l->col.last);
+
+ if (l->raid_level == SA_RAID_5 || l->raid_level == SA_RAID_6) {
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.row.blks_per_row",
+ l->r5or6.row.blks_per_row);
+ DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.first", l->r5or6.row.first);
+ DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.last", l->r5or6.row.last);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_first",
+ l->r5or6.row.offset_first);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_last",
+ l->r5or6.row.offset_last);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.row.data_disks",
+ l->r5or6.row.data_disks);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.row.total_disks",
+ l->r5or6.row.total_disks);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.col.first", l->r5or6.col.first);
+ DBG_INFO("%-25s: 0x%x\n", "r5or6.col.last", l->r5or6.col.last);
+ }
+ DBG_INFO("%-25s: 0x%x\n", "map.row", l->map.row);
+ DBG_INFO("%-25s: 0x%x\n", "map.idx", l->map.idx);
+ DBG_INFO("%-25s: 0x%x\n", "map.layout_map_count",
+ l->map.layout_map_count);
+ DBG_INFO("%-25s: 0x%x\n", "group.first", l->group.first);
+ DBG_INFO("%-25s: 0x%x\n", "group.last", l->group.last);
+ DBG_INFO("%-25s: 0x%x\n", "group.cur", l->group.cur);
+ DBG_INFO("%-25s: %d\n", "is_write", l->is_write);
+ DBG_INFO("%-25s: 0x%x\n", "stripesz", l->stripesz);
+ DBG_INFO("%-25s: 0x%x\n", "strip_sz", l->strip_sz);
+ DBG_INFO("%-25s: %d\n", "offload_to_mirror", l->offload_to_mirror);
+ DBG_INFO("%-25s: %d\n", "raid_level", l->raid_level);
+
+#endif /* DEBUG_AIO_LOCATOR */
}
-/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
-int
-check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
- uint32_t *blk_cnt)
+/* build the aio cdb */
+inline void
+pqisrc_aio_build_cdb(aio_req_locator_t *l,
+ uint32_t disk_blk_cnt, rcb_t *rcb, uint8_t *cdb)
{
+ uint8_t cdb_length;
- switch (cdb[0]) {
- case SCMD_WRITE_6:
- *is_write = true;
- case SCMD_READ_6:
- *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
- (cdb[2] << 8) | cdb[3]);
- *blk_cnt = (uint32_t)cdb[4];
- if (*blk_cnt == 0)
- *blk_cnt = 256;
- break;
- case SCMD_WRITE_10:
- *is_write = true;
- case SCMD_READ_10:
- *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
- *blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
- break;
- case SCMD_WRITE_12:
- *is_write = true;
- case SCMD_READ_12:
- *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
- *blk_cnt = GET_BE32(&cdb[6]);
- break;
- case SCMD_WRITE_16:
- *is_write = true;
- case SCMD_READ_16:
- *fst_blk = GET_BE64(&cdb[2]);
- *blk_cnt = GET_BE32(&cdb[10]);
- break;
- default:
- /* Process via normal I/O path. */
- return PQI_STATUS_FAILURE;
+ if (l->block.disk_block > 0xffffffff) {
+ cdb[0] = l->is_write ? SCMD_WRITE_16 : SCMD_READ_16;
+ cdb[1] = 0;
+ PUT_BE64(l->block.disk_block, &cdb[2]);
+ PUT_BE32(disk_blk_cnt, &cdb[10]);
+ cdb[15] = 0;
+ cdb_length = 16;
+ } else {
+ cdb[0] = l->is_write ? SCMD_WRITE_10 : SCMD_READ_10;
+ cdb[1] = 0;
+ PUT_BE32(l->block.disk_block, &cdb[2]);
+ cdb[6] = 0;
+ PUT_BE16(disk_blk_cnt, &cdb[7]);
+ cdb[9] = 0;
+ cdb_length = 10;
}
- return PQI_STATUS_SUCCESS;
+
+ rcb->cmdlen = cdb_length;
+
}
/* print any arbitrary buffer of length total_len */
@@ -532,6 +1679,8 @@
if (user_buf == NULL)
return;
+ memset(line_buf, 0, LINE_BUF_LEN);
+
/* Print index columns */
if (flags & PRINT_FLAG_HDR_COLUMN)
{
@@ -539,9 +1688,9 @@
{
line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
if ((line_len + 4) >= LINE_BUF_LEN)
- break;
+ break;
}
- DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf);
+ DBG_INFO("%15.15s:[ %s ]\n", "header", line_buf);
}
/* Print index columns */
@@ -556,10 +1705,64 @@
buf_consumed++;
if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
- break;
+ break;
}
- DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf);
+ DBG_INFO("%15.15s:[ %s ]\n", msg, line_buf);
+ }
+}
+
+/* print CDB with column header */
+void
+pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb)
+{
+ /* Print the CDB contents */
+ pqisrc_print_buffer(softs, msg, cdb, rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
+}
+
+void
+pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info)
+{
+ pqi_scsi_dev_t *devp;
+
+ if (rcb == NULL || rcb->dvp == NULL)
+ {
+ DBG_ERR("Invalid rcb or dev ptr! rcb=%p\n", rcb);
+ return;
+ }
+
+ devp = rcb->dvp;
+
+ /* print the host and mapped CDB */
+ DBG_INFO("\n");
+ DBG_INFO("----- Start Dump: %s -----\n", msg);
+ pqisrc_print_buffer(softs, "host cdb", OS_GET_CDBP(rcb), rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
+ if (OS_GET_CDBP(rcb) != rcb->cdbp)
+ pqisrc_print_buffer(softs, "aio mapped cdb", rcb->cdbp, rcb->cmdlen, 0);
+
+ DBG_INFO("tag=0x%x dir=%u host_timeout=%ums\n", rcb->tag,
+ rcb->data_dir, (uint32_t)rcb->host_timeout_ms);
+
+ DBG_INFO("BTL: %d:%d:%d addr=0x%x\n", devp->bus, devp->target,
+ devp->lun, GET_LE32(devp->scsi3addr));
+
+ if (rcb->path == AIO_PATH)
+ {
+ DBG_INFO("handle=0x%x\n", rcb->ioaccel_handle);
+ DBG_INFO("row=%u blk/row=%u index=%u map_row=%u\n",
+ rcb->row_num, rcb->blocks_per_row, rcb->raid_map_index, rcb->raid_map_row);
+
+ if (err_info)
+ pqisrc_show_aio_error_info(softs, rcb, err_info);
+ }
+
+ else /* RAID path */
+ {
+ if (err_info)
+ pqisrc_show_raid_error_info(softs, rcb, err_info);
}
+
+
+ DBG_INFO("----- Done -----\n\n");
}
@@ -567,238 +1770,116 @@
* Function used to build and send RAID bypass request to the adapter
*/
int
-pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
+pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, rcb_t *rcb)
{
- struct raid_map *raid_map;
- boolean_t is_write = false;
- uint32_t map_idx;
- uint64_t fst_blk, lst_blk;
- uint32_t blk_cnt, blks_per_row;
- uint64_t fst_row, lst_row;
- uint32_t fst_row_offset, lst_row_offset;
- uint32_t fst_col, lst_col;
- uint32_t r5or6_blks_per_row;
- uint64_t r5or6_fst_row, r5or6_lst_row;
- uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
- uint32_t r5or6_fst_col, r5or6_lst_col;
- uint16_t data_disks_per_row, total_disks_per_row;
- uint16_t layout_map_count;
- uint32_t stripesz;
- uint16_t strip_sz;
- uint32_t fst_grp, lst_grp, cur_grp;
- uint32_t map_row;
- uint64_t disk_block;
uint32_t disk_blk_cnt;
- uint8_t cdb_length;
- int offload_to_mirror;
- int i;
- DBG_FUNC(" IN \n");
- DBG_IO("!!!!!\n");
+ struct aio_req_locator loc;
+ struct aio_req_locator *l = &loc;
+ int rc;
+ memset(l, 0, sizeof(*l));
- /* Check for eligible opcode, get LBA and block count. */
- memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
+ DBG_FUNC("IN\n");
- for(i = 0; i < rcb->cmdlen ; i++)
- DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
- if(check_for_scsi_opcode(cdb, &is_write,
- &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
- return PQI_STATUS_FAILURE;
- /* Check for write to non-RAID-0. */
- if (is_write && device->raid_level != SA_RAID_0)
+ if (device == NULL) {
+ DBG_INFO("device is NULL\n");
return PQI_STATUS_FAILURE;
-
- if(blk_cnt == 0)
+ }
+ if (device->raid_map == NULL) {
+ DBG_INFO("tag=0x%x BTL: %d:%d:%d Raid map is NULL\n",
+ rcb->tag, device->bus, device->target, device->lun);
return PQI_STATUS_FAILURE;
+ }
- lst_blk = fst_blk + blk_cnt - 1;
- raid_map = device->raid_map;
-
- /* Check for invalid block or wraparound. */
- if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
- lst_blk < fst_blk)
+ /* Check for eligible op, get LBA and block count. */
+ rc = fill_lba_for_scsi_rw(softs, OS_GET_CDBP(rcb), l);
+ if (rc == PQI_STATUS_FAILURE)
return PQI_STATUS_FAILURE;
- data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
- strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
- layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
-
- /* Calculate stripe information for the request. */
- blks_per_row = data_disks_per_row * strip_sz;
- if (!blks_per_row)
- return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/
-
- /* use __udivdi3 ? */
- fst_row = fst_blk / blks_per_row;
- lst_row = lst_blk / blks_per_row;
- fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
- lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
- fst_col = fst_row_offset / strip_sz;
- lst_col = lst_row_offset / strip_sz;
-
- /* If this isn't a single row/column then give to the controller. */
- if (fst_row != lst_row || fst_col != lst_col)
+ if (l->is_write && !pqisrc_is_supported_write(softs, device))
return PQI_STATUS_FAILURE;
- /* Proceeding with driver mapping. */
- total_disks_per_row = data_disks_per_row +
- GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
- map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
- GET_LE16((uint8_t *)(&raid_map->row_cnt));
- map_idx = (map_row * total_disks_per_row) + fst_col;
-
- /* RAID 1 */
- if (device->raid_level == SA_RAID_1) {
- if (device->offload_to_mirror)
- map_idx += data_disks_per_row;
- device->offload_to_mirror = !device->offload_to_mirror;
- } else if (device->raid_level == SA_RAID_ADM) {
- /* RAID ADM */
- /*
- * Handles N-way mirrors (R1-ADM) and R10 with # of drives
- * divisible by 3.
- */
- offload_to_mirror = device->offload_to_mirror;
- if (offload_to_mirror == 0) {
- /* use physical disk in the first mirrored group. */
- map_idx %= data_disks_per_row;
- } else {
- do {
- /*
- * Determine mirror group that map_idx
- * indicates.
- */
- cur_grp = map_idx / data_disks_per_row;
-
- if (offload_to_mirror != cur_grp) {
- if (cur_grp <
- layout_map_count - 1) {
- /*
- * Select raid index from
- * next group.
- */
- map_idx += data_disks_per_row;
- cur_grp++;
- } else {
- /*
- * Select raid index from first
- * group.
- */
- map_idx %= data_disks_per_row;
- cur_grp = 0;
- }
- }
- } while (offload_to_mirror != cur_grp);
- }
-
- /* Set mirror group to use next time. */
- offload_to_mirror =
- (offload_to_mirror >= layout_map_count - 1) ?
- 0 : offload_to_mirror + 1;
- if(offload_to_mirror >= layout_map_count)
- return PQI_STATUS_FAILURE;
+ l->raid_map = device->raid_map;
+ l->block.last = l->block.first + l->block.cnt - 1;
+ l->raid_level = device->raid_level;
- device->offload_to_mirror = offload_to_mirror;
- /*
- * Avoid direct use of device->offload_to_mirror within this
- * function since multiple threads might simultaneously
- * increment it beyond the range of device->layout_map_count -1.
- */
- } else if ((device->raid_level == SA_RAID_5 ||
- device->raid_level == SA_RAID_6) && layout_map_count > 1) {
- /* RAID 50/60 */
- /* Verify first and last block are in same RAID group */
- r5or6_blks_per_row = strip_sz * data_disks_per_row;
- stripesz = r5or6_blks_per_row * layout_map_count;
+ if (pqisrc_is_invalid_block(softs, l))
+ return PQI_STATUS_FAILURE;
- fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
- lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
+ if (!pqisrc_calc_disk_params(softs, l, rcb))
+ return PQI_STATUS_FAILURE;
- if (fst_grp != lst_grp)
- return PQI_STATUS_FAILURE;
+ if (!pqisrc_is_single_row_column(softs, l))
+ return PQI_STATUS_FAILURE;
- /* Verify request is in a single row of RAID 5/6 */
- fst_row = r5or6_fst_row =
- fst_blk / stripesz;
- r5or6_lst_row = lst_blk / stripesz;
+ if (!pqisrc_set_map_row_and_idx(softs, l, rcb))
+ return PQI_STATUS_FAILURE;
- if (r5or6_fst_row != r5or6_lst_row)
- return PQI_STATUS_FAILURE;
+ /* Proceeding with driver mapping. */
- /* Verify request is in a single column */
- fst_row_offset = r5or6_fst_row_offset =
- (uint32_t)((fst_blk % stripesz) %
- r5or6_blks_per_row);
- r5or6_lst_row_offset =
- (uint32_t)((lst_blk % stripesz) %
- r5or6_blks_per_row);
+ switch (device->raid_level) {
+ case SA_RAID_1:
+ case SA_RAID_ADM:
+ if (l->is_write) {
+ if (!pqisrc_set_write_mirrors(softs, device, l, rcb))
+ return PQI_STATUS_FAILURE;
+ } else
+ pqisrc_set_read_mirror(softs, device, l);
+ break;
+ case SA_RAID_5:
+ case SA_RAID_6:
+ if (l->map.layout_map_count > 1 || l->is_write) {
- fst_col = r5or6_fst_row_offset / strip_sz;
- r5or6_fst_col = fst_col;
- r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
+ if (!pqisrc_is_r5or6_single_group(softs, l))
+ return PQI_STATUS_FAILURE;
- if (r5or6_fst_col != r5or6_lst_col)
- return PQI_STATUS_FAILURE;
+ if (!pqisrc_is_r5or6_single_row(softs, l))
+ return PQI_STATUS_FAILURE;
- /* Request is eligible */
- map_row =
- ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
- GET_LE16((uint8_t *)(&raid_map->row_cnt));
+ if (!pqisrc_is_r5or6_single_column(softs, l))
+ return PQI_STATUS_FAILURE;
- map_idx = (fst_grp *
- (GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
- total_disks_per_row)) +
- (map_row * total_disks_per_row) + fst_col;
+ pqisrc_set_r5or6_row_and_index(l, rcb);
+ }
+ break;
}
- rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
- disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
- fst_row * strip_sz +
- (fst_row_offset - fst_col * strip_sz);
- disk_blk_cnt = blk_cnt;
-
- /* Handle differing logical/physical block sizes. */
- if (raid_map->phys_blk_shift) {
- disk_block <<= raid_map->phys_blk_shift;
- disk_blk_cnt <<= raid_map->phys_blk_shift;
+ if (l->map.idx >= RAID_MAP_MAX_ENTRIES) {
+ DBG_INFO("AIO ineligible: index exceeds max map entries");
+ return PQI_STATUS_FAILURE;
}
- if (disk_blk_cnt > 0xffff)
+ rcb->ioaccel_handle =
+ l->raid_map->dev_data[l->map.idx].ioaccel_handle;
+
+ if (!pqisrc_calc_aio_block(l))
return PQI_STATUS_FAILURE;
- /* Build the new CDB for the physical disk I/O. */
- if (disk_block > 0xffffffff) {
- cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
- cdb[1] = 0;
- PUT_BE64(disk_block, &cdb[2]);
- PUT_BE32(disk_blk_cnt, &cdb[10]);
- cdb[14] = 0;
- cdb[15] = 0;
- cdb_length = 16;
- } else {
- cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
- cdb[1] = 0;
- PUT_BE32(disk_block, &cdb[2]);
- cdb[6] = 0;
- PUT_BE16(disk_blk_cnt, &cdb[7]);
- cdb[9] = 0;
- cdb_length = 10;
- }
+ disk_blk_cnt = pqisrc_handle_blk_size_diffs(l);
+
- if (GET_LE16((uint8_t *)(&raid_map->flags)) &
+ /* Set encryption flag if needed. */
+ rcb->encrypt_enable = false;
+ if (GET_LE16((uint8_t *)(&l->raid_map->flags)) &
RAID_MAP_ENCRYPTION_ENABLED) {
- pqisrc_set_enc_info(&rcb->enc_info, raid_map,
- fst_blk);
+ pqisrc_set_enc_info(&rcb->enc_info, l->raid_map,
+ l->block.first);
rcb->encrypt_enable = true;
- } else {
- rcb->encrypt_enable = false;
}
- rcb->cmdlen = cdb_length;
+ if (pqisrc_aio_req_too_big(softs, device, rcb, l, disk_blk_cnt))
+ return PQI_STATUS_FAILURE;
+
+ /* set the cdb ptr to the local bypass cdb */
+ rcb->cdbp = &rcb->bypass_cdb[0];
+
+ /* Build the new CDB for the physical disk I/O. */
+ pqisrc_aio_build_cdb(l, disk_blk_cnt, rcb, rcb->cdbp);
+ pqisrc_aio_show_locator_info(softs, l, disk_blk_cnt, rcb);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
@@ -806,6 +1887,7 @@
/* Function used to submit an AIO TMF to the adapter
* DEVICE_RESET is not supported.
*/
+
static int
pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
@@ -813,32 +1895,46 @@
int rval = PQI_STATUS_SUCCESS;
pqi_aio_tmf_req_t tmf_req;
ib_queue_t *op_ib_q = NULL;
+ boolean_t is_write;
memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
tmf_req.req_id = rcb->tag;
tmf_req.error_idx = rcb->tag;
tmf_req.nexus = devp->ioaccel_handle;
- //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ /* memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); */
tmf_req.tmf = tmf_type;
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
op_ib_q = &softs->op_aio_ib_q[0];
+ is_write = pqisrc_cdb_is_write(rcb->cdbp);
+
+ uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
+ if (lun && (rcb->dvp->is_multi_lun)) {
+ int_to_scsilun(lun, tmf_req.lun);
+ }
+ else {
+ memset(tmf_req.lun, 0, sizeof(tmf_req.lun));
+ }
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
tmf_req.req_id_to_manage = rcb_to_manage->tag;
tmf_req.nexus = rcb_to_manage->ioaccel_handle;
}
- DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage);
- DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id);
+ if (devp->raid_level == SA_RAID_1 ||
+ devp->raid_level == SA_RAID_5 ||
+ devp->raid_level == SA_RAID_6) {
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK && is_write)
+ tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT;
+ }
DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n",
tmf_req.header.iu_type, tmf_req.req_id_to_manage);
- DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%d\n",
+ DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%u\n",
tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id);
rcb->path = AIO_PATH;
@@ -858,13 +1954,13 @@
rcb->status = rval;
}
- if (rcb->status != REQUEST_SUCCESS) {
+ if (rcb->status != PQI_STATUS_SUCCESS) {
DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
"stat:0x%x\n", tmf_type, rcb->status);
rval = PQI_STATUS_FAILURE;
}
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return rval;
}
@@ -879,13 +1975,15 @@
memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
tmf_req.req_id = rcb->tag;
memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
+
tmf_req.tmf = tmf_type;
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
@@ -897,7 +1995,7 @@
if (softs->timeout_in_tmf &&
tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
- /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */
+ /* OS_TMF_TIMEOUT_SEC - 1 to accomodate driver processing */
tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
/* if OS tmf timeout is 0, set minimum value for timeout */
if (!tmf_req.timeout_in_sec)
@@ -905,6 +2003,10 @@
}
op_ib_q = &softs->op_raid_ib_q[0];
+
+ DBG_WARN("raid tmf: iu_type=0x%x req_id_to_manage=%d\n",
+ tmf_req.header.iu_type, tmf_req.req_id_to_manage);
+
rcb->path = RAID_PATH;
rcb->req_pending = true;
/* Timedout tmf response goes here */
@@ -922,25 +2024,50 @@
rcb->status = rval;
}
- if (rcb->status != REQUEST_SUCCESS) {
+ if (rcb->status != PQI_STATUS_SUCCESS) {
DBG_NOTE("Task Management failed tmf_type:%d "
"stat:0x%x\n", tmf_type, rcb->status);
rval = PQI_STATUS_FAILURE;
}
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return rval;
}
+void
+dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg)
+{
+ uint32_t qid = rcb->req_q ? rcb->req_q->q_id : -1;
+
+ DBG_INFO("%s: pending=%d path=%d tag=0x%x=%u qid=%u timeout=%ums\n",
+ msg, rcb->req_pending, rcb->path, rcb->tag,
+ rcb->tag, qid, (uint32_t)rcb->host_timeout_ms);
+}
+
int
pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
{
int ret = PQI_STATUS_SUCCESS;
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
+
+ DBG_WARN("sending TMF. io outstanding=%u\n",
+ softs->max_outstanding_io - softs->taglist.num_elem);
+
+ rcb->is_abort_cmd_from_host = true;
rcb->softs = softs;
+ /* No target rcb for general purpose TMFs like LUN RESET */
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
+ {
+ rcb_to_manage->host_wants_to_abort_this = true;
+ dump_tmf_details(softs, rcb_to_manage, "rcb_to_manage");
+ }
+
+
+ dump_tmf_details(softs, rcb, "rcb");
+
if(!devp->is_physical_device) {
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
if(rcb_to_manage->path == AIO_PATH) {
@@ -962,93 +2089,236 @@
ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
}
- DBG_FUNC("IN");
+ DBG_FUNC("OUT\n");
return ret;
}
-/*
- * Function used to build and send the vendor general request
- * Used for configuring PQI feature bits between firmware and driver
- */
-int
-pqisrc_build_send_vendor_request(
- pqisrc_softstate_t *softs,
- pqi_vendor_general_request_t *request,
- raid_path_error_info_elem_t *error_info)
+/* return index into the global (softs) counters based on raid level */
+static counter_types_t
+get_counter_index(rcb_t *rcb)
{
- int ret = PQI_STATUS_SUCCESS;
- ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
- ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ if (IS_AIO_PATH(rcb->dvp))
+ return HBA_COUNTER;
+
+ switch (rcb->dvp->raid_level) {
+ case SA_RAID_0: return RAID0_COUNTER;
+ case SA_RAID_1:
+ case SA_RAID_ADM: return RAID1_COUNTER;
+ case SA_RAID_5: return RAID5_COUNTER;
+ case SA_RAID_6: return RAID6_COUNTER;
+ case SA_RAID_UNKNOWN:
+ default:
+ {
+ static boolean_t asserted = false;
+ if (!asserted)
+ {
+ asserted = true;
+ ASSERT(rcb->path == RAID_PATH);
+ ASSERT(0);
+ }
+ return UNKNOWN_COUNTER;
+ }
+ }
+}
- rcb_t *rcb = NULL;
+/* return the counter type as ASCII-string */
+static char *
+counter_type_to_raid_ascii(counter_types_t type)
+{
+ switch (type)
+ {
+ case UNKNOWN_COUNTER: return "Unknown";
+ case HBA_COUNTER: return "HbaPath";
+ case RAID0_COUNTER: return "Raid0";
+ case RAID1_COUNTER: return "Raid1";
+ case RAID5_COUNTER: return "Raid5";
+ case RAID6_COUNTER: return "Raid6";
+ default: return "Unsupported";
+ }
+}
- uint16_t request_id = 0;
+/* return the path as ASCII-string */
+char *
+io_path_to_ascii(IO_PATH_T path)
+{
+ switch (path)
+ {
+ case AIO_PATH: return "Aio";
+ case RAID_PATH: return "Raid";
+ default: return "Unknown";
+ }
+}
- /* Get the tag */
- request_id = pqisrc_get_tag(&softs->taglist);
- if (INVALID_ELEM == request_id) {
- DBG_ERR("Tag not available\n");
- ret = PQI_STATUS_FAILURE;
- goto err_notag;
+/* return the io type as ASCII-string */
+static char *
+io_type_to_ascii(io_type_t io_type)
+{
+ switch (io_type)
+ {
+ case UNKNOWN_IO_TYPE: return "Unknown";
+ case READ_IO_TYPE: return "Read";
+ case WRITE_IO_TYPE: return "Write";
+ case NON_RW_IO_TYPE: return "NonRW";
+ default: return "Unsupported";
}
+}
+
- ((pqi_vendor_general_request_t *)request)->request_id = request_id;
- ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id;
+/* return the io type based on cdb */
+io_type_t
+get_io_type_from_cdb(uint8_t *cdb)
+{
+ if (cdb == NULL)
+ return UNKNOWN_IO_TYPE;
- rcb = &softs->rcb[request_id];
+ else if (pqisrc_cdb_is_read(cdb))
+ return READ_IO_TYPE;
- rcb->req_pending = true;
- rcb->tag = request_id;
+ else if (pqisrc_cdb_is_write(cdb))
+ return WRITE_IO_TYPE;
- ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
+ return NON_RW_IO_TYPE;
+}
- if (ret != PQI_STATUS_SUCCESS) {
- DBG_ERR("Unable to submit command\n");
- goto err_out;
- }
+/* increment this counter based on path and read/write */
+OS_ATOMIC64_T
+increment_this_counter(io_counters_t *pcounter, IO_PATH_T path, io_type_t io_type)
+{
+ OS_ATOMIC64_T ret_val;
- ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
- if (ret != PQI_STATUS_SUCCESS) {
- DBG_ERR("Management request timed out!\n");
- goto err_out;
+ if (path == AIO_PATH)
+ {
+ if (io_type == READ_IO_TYPE)
+ ret_val = OS_ATOMIC64_INC(&pcounter->aio_read_cnt);
+ else if (io_type == WRITE_IO_TYPE)
+ ret_val = OS_ATOMIC64_INC(&pcounter->aio_write_cnt);
+ else
+ ret_val = OS_ATOMIC64_INC(&pcounter->aio_non_read_write);
+ }
+ else
+ {
+ if (io_type == READ_IO_TYPE)
+ ret_val = OS_ATOMIC64_INC(&pcounter->raid_read_cnt);
+ else if (io_type == WRITE_IO_TYPE)
+ ret_val = OS_ATOMIC64_INC(&pcounter->raid_write_cnt);
+ else
+ ret_val = OS_ATOMIC64_INC(&pcounter->raid_non_read_write);
}
- ret = rcb->status;
- if (ret) {
- ret = PQI_STATUS_FAILURE;
- if(error_info) {
- // TODO: config table err handling.
- }
- } else {
- if(error_info) {
- ret = PQI_STATUS_SUCCESS;
- memset(error_info, 0, sizeof(*error_info));
- }
+ return ret_val;
+}
+
+/* increment appropriate counter(s) anytime we post a new request */
+static void
+pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb)
+{
+ io_type_t io_type = get_io_type_from_cdb(rcb->cdbp);
+ counter_types_t type_index = get_counter_index(rcb);
+ io_counters_t *pcounter = &softs->counters[type_index];
+ OS_ATOMIC64_T ret_val;
+
+ ret_val = increment_this_counter(pcounter, rcb->path, io_type);
+
+#if 1 /* leave this enabled while we gain confidence for each io path */
+ if (ret_val == 1)
+ {
+ char *raid_type = counter_type_to_raid_ascii(type_index);
+ char *path = io_path_to_ascii(rcb->path);
+ char *io_ascii = io_type_to_ascii(io_type);
+
+ DBG_INFO("Got first path/type hit. "
+ "Path=%s RaidType=%s IoType=%s\n",
+ path, raid_type, io_ascii);
}
+#endif
- os_reset_rcb(rcb);
- pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
- DBG_FUNC("OUT\n");
- return ret;
+ /* @todo future: may want to make a per-dev counter */
+}
-err_out:
- DBG_ERR("Vender general request submission failed.\n");
- os_reset_rcb(rcb);
- pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
-err_notag:
- DBG_FUNC("FAILED \n");
- return ret;
+/* public routine to print a particular counter with header msg */
+void
+print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg)
+{
+ io_counters_t counter;
+ uint32_t percent_reads;
+ uint32_t percent_aio;
+
+ if (!softs->log_io_counters)
+ return;
+
+ /* Use a cached copy so percentages are based on the data that is printed */
+ memcpy(&counter, pcounter, sizeof(counter));
+
+ DBG_NOTE("Counter: %s (ptr=%p)\n", msg, pcounter);
+
+ percent_reads = CALC_PERCENT_VS(counter.aio_read_cnt + counter.raid_read_cnt,
+ counter.aio_write_cnt + counter.raid_write_cnt);
+
+ percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt + counter.aio_write_cnt,
+ counter.raid_read_cnt + counter.raid_write_cnt);
+
+ DBG_NOTE(" R/W Percentages: Reads=%3u%% AIO=%3u%%\n", percent_reads, percent_aio);
+
+ /* Print the Read counts */
+ percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt, counter.raid_read_cnt);
+ DBG_NOTE(" Reads : AIO=%8u(%3u%%) RAID=%8u\n",
+ (uint32_t)counter.aio_read_cnt, percent_aio, (uint32_t)counter.raid_read_cnt);
+
+ /* Print the Write counts */
+ percent_aio = CALC_PERCENT_VS(counter.aio_write_cnt, counter.raid_write_cnt);
+ DBG_NOTE(" Writes: AIO=%8u(%3u%%) RAID=%8u\n",
+ (uint32_t)counter.aio_write_cnt, percent_aio, (uint32_t)counter.raid_write_cnt);
+
+ /* Print the Non-Rw counts */
+ percent_aio = CALC_PERCENT_VS(counter.aio_non_read_write, counter.raid_non_read_write);
+ DBG_NOTE(" Non-RW: AIO=%8u(%3u%%) RAID=%8u\n",
+ (uint32_t)counter.aio_non_read_write, percent_aio, (uint32_t)counter.raid_non_read_write);
}
-/* return the path as ASCII-string */
-char *
-io_path_to_ascii(IO_PATH_T path)
+/* return true if buffer is all zeroes */
+boolean_t
+is_buffer_zero(void *buffer, uint32_t size)
{
- switch (path)
+ char *buf = buffer;
+ DWORD ii;
+
+ if (buffer == NULL || size == 0)
+ return false;
+
+ for (ii = 0; ii < size; ii++)
{
- case AIO_PATH: return "Aio";
- case RAID_PATH: return "Raid";
- default: return "Unknown";
+ if (buf[ii] != 0x00)
+ return false;
+ }
+ return true;
+}
+
+/* public routine to print a all global counter types */
+void
+print_all_counters(pqisrc_softstate_t *softs, uint32_t flags)
+{
+ int ii;
+ io_counters_t *pcounter;
+ char *raid_type;
+
+ for (ii = 0; ii < MAX_IO_COUNTER; ii++)
+ {
+ pcounter = &softs->counters[ii];
+ raid_type = counter_type_to_raid_ascii(ii);
+
+ if ((flags & COUNTER_FLAG_ONLY_NON_ZERO) &&
+ is_buffer_zero(pcounter, sizeof(*pcounter)))
+ {
+ continue;
+ }
+
+ print_this_counter(softs, pcounter, raid_type);
+ }
+
+ if (flags & COUNTER_FLAG_CLEAR_COUNTS)
+ {
+ DBG_NOTE("Clearing all counters\n");
+ memset(softs->counters, 0, sizeof(softs->counters));
}
}
diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c
--- a/sys/dev/smartpqi/smartpqi_response.c
+++ b/sys/dev/smartpqi/smartpqi_response.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,12 +32,29 @@
void
pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb)
{
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
rcb->req_pending = false;
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
+}
+
+/* Safely determines if cdb is available and if so, will return SCSI opcode or
+ BMIC cmd if BMIC op code is detected */
+uint8_t
+pqisrc_get_cmd_from_rcb(rcb_t *rcb)
+{
+ uint8_t opcode = 0xFF;
+
+ if (rcb && rcb->cdbp)
+ {
+ opcode = rcb->cdbp[0];
+ if (IS_BMIC_OPCODE(opcode))
+ return rcb->cdbp[6];
+ }
+
+ return opcode;
}
/*
@@ -49,33 +66,40 @@
{
raid_path_error_info_elem_t error_info;
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
(err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
memcpy(&error_info, rcb->error_info, sizeof(error_info));
- DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
- error_info.status, error_info.data_in_result, error_info.data_out_result);
-
- rcb->status = REQUEST_FAILED;
+ rcb->status = PQI_STATUS_TIMEOUT;
switch (error_info.data_out_result) {
case PQI_RAID_DATA_IN_OUT_GOOD:
if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD)
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
break;
case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD ||
error_info.status == PQI_RAID_STATUS_CHECK_CONDITION)
- rcb->status = REQUEST_SUCCESS;
+ rcb->status = PQI_STATUS_SUCCESS;
break;
+ default:
+ DBG_WARN("error_status 0x%x data_in_result 0x%x data_out_result 0x%x cmd rcb tag 0x%x\n",
+ error_info.status, error_info.data_in_result, error_info.data_out_result, rcb->tag);
+ }
+
+ if (rcb->status != PQI_STATUS_SUCCESS)
+ {
+ DBG_INFO("error_status=0x%x data_in=0x%x data_out=0x%x detail=0x%x\n",
+ error_info.status, error_info.data_in_result, error_info.data_out_result,
+ pqisrc_get_cmd_from_rcb(rcb));
}
rcb->req_pending = false;
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
}
/*
@@ -84,11 +108,11 @@
void
pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb)
{
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
os_io_response_success(rcb);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
}
static void
@@ -120,6 +144,23 @@
}
}
+/* Suppress common errors unless verbose debug flag is on */
+boolean_t
+suppress_innocuous_error_prints(pqisrc_softstate_t *softs, rcb_t *rcb)
+{
+ uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
+
+ if ((opcode == SCSI_INQUIRY || /* 0x12 */
+ opcode == SCSI_MODE_SENSE || /* 0x1a */
+ opcode == SCSI_REPORT_LUNS || /* 0xa0 */
+ opcode == SCSI_LOG_SENSE || /* 0x4d */
+ opcode == SCSI_ATA_PASSTHRU16) /* 0x85 */
+ && (softs->err_resp_verbose == false))
+ return true;
+
+ return false;
+}
+
static void
pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
{
@@ -136,6 +177,9 @@
void
pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
{
+ if (suppress_innocuous_error_prints(softs, rcb))
+ return;
+
pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0);
pqisrc_show_sense_data_simple(softs, rcb, sense_data);
@@ -144,6 +188,105 @@
}
+/* dumps the aio error info and sense data then breaks down the output */
+void
+pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, aio_path_error_info_elem_t *aio_err)
+{
+ DBG_NOTE("\n");
+ DBG_NOTE("aio err: status=0x%x serv_resp=0x%x data_pres=0x%x data_len=0x%x\n",
+ aio_err->status, aio_err->service_resp, aio_err->data_pres, aio_err->data_len);
+
+ pqisrc_print_buffer(softs, "aio err info", aio_err,
+ offsetof(aio_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
+
+ pqisrc_show_sense_data_full(softs, rcb, &aio_err->sense_data);
+}
+
+
+/* dumps the raid error info and sense data then breaks down the output */
+void
+pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, raid_path_error_info_elem_t *raid_err)
+{
+ DBG_NOTE("\n");
+ DBG_NOTE("raid err: data_in=0x%x out=0x%x status=0x%x sense_len=0x%x resp_len=0x%x\n",
+ raid_err->data_in_result, raid_err->data_in_result,
+ raid_err->status, raid_err->sense_data_len, raid_err->resp_data_len);
+
+ pqisrc_print_buffer(softs, "raid err info", raid_err,
+ offsetof(raid_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
+
+ pqisrc_show_sense_data_full(softs, rcb, &raid_err->sense_data);
+}
+
+/* return true if this an innocuous error */
+boolean_t
+pqisrc_is_innocuous_error(pqisrc_softstate_t *softs, rcb_t *rcb, void *err_info)
+{
+ uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
+
+ /* These SCSI cmds are frequently cause "underrun" and other minor "error"
+ conditions while determining log page length, support, etc. */
+ if (opcode != SCSI_INQUIRY && /* 0x12 */
+ opcode != SCSI_MODE_SENSE && /* 0x1a */
+ opcode != SCSI_REPORT_LUNS && /* 0xa0 */
+ opcode != SCSI_LOG_SENSE && /* 0x4d */
+ opcode != SCSI_ATA_PASSTHRU16) /* 0x85 */
+ {
+ return false;
+ }
+
+ /* treat all cmds above as innocuous unless verbose flag is set. */
+ if (softs->err_resp_verbose == false)
+ return true;
+
+ if (rcb->path == AIO_PATH)
+ {
+ aio_path_error_info_elem_t *aio_err = err_info;
+ uint8_t key, asc, ascq;
+
+ /* Byte[0]=Status=0x51, Byte[1]=service_resp=0x01 */
+ if (aio_err->status == PQI_AIO_STATUS_UNDERRUN &&
+ aio_err->service_resp == PQI_AIO_SERV_RESPONSE_FAILURE)
+ {
+ return true;
+ }
+
+ /* get the key info so we can apply more filters... */
+ pqisrc_extract_sense_data(&aio_err->sense_data, &key, &asc, &ascq);
+
+ /* Seeing a lot of invalid field in CDB for REPORT LUNs on AIO path.
+ Example CDB = a0 00 11 00 00 00 00 00 20 08 00 00
+ So filter out the full dump info for now. Also wonder if we should
+ just send REPORT LUNS to raid path? */
+ if (opcode == SCSI_REPORT_LUNS &&
+ key == 5 && asc == 0x24)
+ {
+ pqisrc_show_sense_data_simple(softs, rcb, &aio_err->sense_data);
+ return true;
+ }
+
+ /* may want to return true here eventually? */
+ }
+ else
+ {
+ raid_path_error_info_elem_t *raid_err = err_info;
+
+ /* Byte[1]=data_out=0x01 */
+ if (raid_err->data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW)
+ return true;
+
+ /* We get these a alot: leave a tiny breadcrumb about the error,
+ but don't do full spew about it */
+ if (raid_err->status == PQI_AIO_STATUS_CHECK_CONDITION)
+ {
+ pqisrc_show_sense_data_simple(softs, rcb, &raid_err->sense_data);
+ return true;
+ }
+ }
+
+ return false;
+}
+
/*
* Process the error info for AIO in the case of failure.
*/
@@ -153,20 +296,30 @@
{
aio_path_error_info_elem_t *err_info = NULL;
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
+
+ ASSERT(rcb->path == AIO_PATH);
err_info = (aio_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
- DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
+ DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
return;
}
+ /* filter out certain underrun/success "errors" from printing */
+ if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
+
+ if (softs->err_resp_verbose == true)
+ pqisrc_show_rcb_details(softs, rcb,
+ "aio error", err_info);
+ }
+
os_aio_response_error(rcb, err_info);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
}
/*
@@ -178,20 +331,31 @@
{
raid_path_error_info_elem_t *err_info = NULL;
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
+
+ ASSERT(rcb->path == RAID_PATH);
err_info = (raid_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
- DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
+ DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
return;
}
+ /* filter out certain underrun/success "errors" from printing */
+ if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
+
+ if( softs->err_resp_verbose == true )
+ pqisrc_show_rcb_details(softs, rcb,
+ "raid error", err_info);
+
+ }
+
os_raid_response_error(rcb, err_info);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
}
/*
@@ -201,7 +365,7 @@
pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
pqi_tmf_resp_t *tmf_resp)
{
- int ret = REQUEST_SUCCESS;
+ int ret = PQI_STATUS_SUCCESS;
uint32_t tag = (uint32_t)tmf_resp->req_id;
rcb_t *rcb = &softs->rcb[tag];
@@ -212,18 +376,19 @@
switch (tmf_resp->resp_code) {
case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
- ret = REQUEST_SUCCESS;
+ ret = PQI_STATUS_SUCCESS;
break;
default:
- DBG_WARN("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
- ret = REQUEST_FAILED;
+ DBG_ERR("Tag #0x%08x TMF Failed, Response code : 0x%x\n",
+ rcb->tag, tmf_resp->resp_code);
+ ret = PQI_STATUS_TIMEOUT;
break;
}
rcb->status = ret;
rcb->req_pending = false;
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return ret;
}
@@ -231,7 +396,7 @@
pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
{
- int ret = REQUEST_SUCCESS;
+ int ret = PQI_STATUS_SUCCESS;
switch(response->status) {
case PQI_VENDOR_RESPONSE_IU_SUCCESS:
@@ -239,7 +404,7 @@
case PQI_VENDOR_RESPONSE_IU_UNSUCCESS:
case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM:
case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC:
- ret = REQUEST_FAILED;
+ ret = PQI_STATUS_TIMEOUT;
break;
}
@@ -256,21 +421,22 @@
ob_queue_t *ob_q;
struct pqi_io_response *response;
uint32_t oq_pi, oq_ci;
- pqi_scsi_dev_t *dvp = NULL;
+ pqi_scsi_dev_t *dvp = NULL;
+
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
oq_ci = ob_q->ci_local;
oq_pi = *(ob_q->pi_virt_addr);
- DBG_INFO("ci : %d pi : %d qid : %d\n", oq_ci, oq_pi, ob_q->q_id);
+ DBG_IO("ci : %u pi : %u qid : %u\n", oq_ci, oq_pi, ob_q->q_id);
while (1) {
+ boolean_t os_scsi_cmd = false;
rcb_t *rcb = NULL;
uint32_t tag = 0;
uint32_t offset;
- boolean_t os_scsi_cmd = false;
if (oq_pi == oq_ci)
break;
@@ -282,7 +448,7 @@
rcb = &softs->rcb[tag];
/* Make sure we are processing a valid response. */
if ((rcb->tag != tag) || (rcb->req_pending == false)) {
- DBG_ERR("No such request pending with tag : %x", tag);
+ DBG_ERR("No such request pending with tag : %x rcb->tag : %x", tag, rcb->tag);
oq_ci = (oq_ci + 1) % ob_q->num_elem;
break;
}
@@ -291,13 +457,21 @@
* pqisrc_wait_on_condition(softs,rcb,timeout).
*/
if (rcb->timedout) {
- DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag %d\n", tag);
+ DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag 0x%x\n", tag);
oq_ci = (oq_ci + 1) % ob_q->num_elem;
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, tag);
break;
}
+ if (rcb->host_wants_to_abort_this)
+ {
+ DBG_INFO("cmd that was aborted came back. tag=%u\n", rcb->tag);
+ }
+ if (rcb->is_abort_cmd_from_host)
+ {
+ DBG_INFO("abort cmd came back. tag=%u\n", rcb->tag);
+ }
if (IS_OS_SCSICMD(rcb)) {
dvp = rcb->dvp;
if (dvp)
@@ -306,8 +480,7 @@
DBG_WARN("Received IO completion for the Null device!!!\n");
}
-
- DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
+ DBG_IO("response.header.iu_type : %x \n", response->header.iu_type);
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
@@ -315,7 +488,6 @@
rcb->success_cmp_callback(softs, rcb);
if (os_scsi_cmd)
pqisrc_decrement_device_active_io(softs, dvp);
-
break;
case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
@@ -345,6 +517,6 @@
ob_q->ci_local = oq_ci;
PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
- ob_q->ci_register_offset, ob_q->ci_local );
- DBG_FUNC("OUT");
+ ob_q->ci_register_offset, ob_q->ci_local );
+ DBG_FUNC("OUT\n");
}
diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c
--- a/sys/dev/smartpqi/smartpqi_sis.c
+++ b/sys/dev/smartpqi/smartpqi_sis.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,7 @@
db_reg &= ~SIS_ENABLE_MSIX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
DBG_FUNC("OUT\n");
}
@@ -55,6 +56,7 @@
db_reg |= SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
!= PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to wait for enable intx db bit to clear\n");
@@ -74,6 +76,7 @@
db_reg &= ~SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
DBG_FUNC("OUT\n");
}
@@ -90,7 +93,7 @@
break;
case INTR_TYPE_MSI:
case INTR_TYPE_MSIX:
- sis_disable_msix(softs);
+ sis_disable_msix(softs);
break;
default:
DBG_ERR("Inerrupt mode none!\n");
@@ -124,6 +127,7 @@
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
+ OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
REENABLE_SIS) == 0), timeout)
@@ -284,6 +288,21 @@
softs->pqi_cap.max_sg_elem = mb[1];
softs->pqi_cap.max_transfer_size = mb[2];
softs->pqi_cap.max_outstanding_io = mb[3];
+ if (softs->pqi_cap.max_outstanding_io >
+ PQISRC_MAX_OUTSTANDING_REQ) {
+ DBG_WARN("Controller-supported max outstanding "
+ "commands %u reduced to %d to align with "
+ "driver-supported max.\n",
+ softs->pqi_cap.max_outstanding_io,
+ PQISRC_MAX_OUTSTANDING_REQ);
+ softs->pqi_cap.max_outstanding_io =
+ PQISRC_MAX_OUTSTANDING_REQ;
+ }
+
+#ifdef DEVICE_HINT
+ bsd_set_hint_adapter_cap(softs);
+#endif
+
softs->pqi_cap.conf_tab_off = mb[4];
softs->pqi_cap.conf_tab_sz = mb[5];
@@ -295,6 +314,11 @@
softs->pqi_cap.max_transfer_size);
DBG_INIT("max_outstanding_io = %x\n",
softs->pqi_cap.max_outstanding_io);
+ /* DBG_INIT("config_table_offset = %x\n",
+ softs->pqi_cap.conf_tab_off);
+ DBG_INIT("config_table_size = %x\n",
+ softs->pqi_cap.conf_tab_sz);
+ */
}
DBG_FUNC("OUT\n");
@@ -318,7 +342,7 @@
memset(&init_struct_mem, 0, sizeof(struct dma_mem));
init_struct_mem.size = sizeof(struct init_base_struct);
init_struct_mem.align = PQISRC_INIT_STRUCT_DMA_ALIGN;
- init_struct_mem.tag = "init_struct";
+ os_strlcpy(init_struct_mem.tag, "init_struct", sizeof(init_struct_mem.tag));
ret = os_dma_mem_alloc(softs, &init_struct_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
@@ -337,7 +361,7 @@
/* Allocate error buffer */
softs->err_buf_dma_mem.align = PQISRC_ERR_BUF_DMA_ALIGN;
- softs->err_buf_dma_mem.tag = "error_buffer";
+ os_strlcpy(softs->err_buf_dma_mem.tag, "error_buffer", sizeof(softs->err_buf_dma_mem.tag));
ret = os_dma_mem_alloc(softs, &softs->err_buf_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
@@ -422,7 +446,7 @@
if (ext_prop & SIS_SUPPORT_PQI_RESET_QUIESCE)
softs->pqi_reset_quiesce_allowed = true;
- /* Send GET_COMM_PREFERRED_SETTINGS (26h) */
+ /* Send GET_COMM_PREFERRED_SETTINGS (26h), TODO : is it required */
ret = pqisrc_get_preferred_settings(softs);
if (ret) {
DBG_ERR("Failed to get adapter pref settings\n");
diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h
--- a/sys/dev/smartpqi/smartpqi_structures.h
+++ b/sys/dev/smartpqi/smartpqi_structures.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,6 +27,9 @@
#ifndef _PQI_STRUCTURES_H
#define _PQI_STRUCTURES_H
+
+#include "smartpqi_defines.h"
+
struct bmic_host_wellness_driver_version {
uint8_t start_tag[4];
uint8_t driver_version_tag[2];
@@ -36,6 +39,7 @@
}OS_ATTRIBUTE_PACKED;
+
struct bmic_host_wellness_time {
uint8_t start_tag[4];
uint8_t time_tag[2];
@@ -53,6 +57,7 @@
}OS_ATTRIBUTE_PACKED;
+
/* As per PQI Spec pqi-2r00a , 6.2.2. */
/* device capability register , for admin q table 24 */
@@ -187,15 +192,17 @@
/* Memory descriptor for DMA memory allocation */
typedef struct dma_mem {
void *virt_addr;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr;
uint32_t size;
uint32_t align;
- char *tag;
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
+ char tag[32];
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
}dma_mem_t;
-/* Lock should be 8 byte aligned */
+/* Lock should be 8 byte aligned
+ TODO : need to apply aligned for lock alone ?
+*/
#ifndef LOCKFREE_STACK
@@ -433,27 +440,6 @@
uint32_t additional_event_id;
};
-typedef struct pqi_vendor_general_request {
- iu_header_t header;
- uint16_t response_queue_id;
- uint8_t work_area[2];
- uint16_t request_id;
- uint16_t function_code;
- union {
- struct {
- uint16_t first_section;
- uint16_t last_section;
- uint8_t reserved1[48];
- } OS_ATTRIBUTE_PACKED config_table_update;
-
- struct {
- uint64_t buffer_address;
- uint32_t buffer_length;
- uint8_t reserved2[40];
- } OS_ATTRIBUTE_PACKED ofa_memory_allocation;
- } data;
-} OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
-
typedef struct pqi_vendor_general_response {
iu_header_t header;
uint16_t reserved1;
@@ -474,29 +460,30 @@
} OS_ATTRIBUTE_PACKED op_q_params;
+
/* "Fixed Format Sense Data" (0x70 or 0x71) (Table 45 in SPC5) */
typedef struct sense_data_fixed {
- uint8_t response_code : 7; // Byte 0, 0x70 or 0x71
- uint8_t valid : 1; // Byte 0, bit 7
- uint8_t byte_1; // Byte 1
- uint8_t sense_key : 4; // Byte 2, bit 0-3 (Key)
- uint8_t byte_2_other : 4; // Byte 2, bit 4-7
- uint32_t information; // Byte 3-6, big-endian like block # in CDB
- uint8_t addtnl_length; // Byte 7
- uint8_t cmd_specific[4]; // Byte 8-11
- uint8_t sense_code; // Byte 12 (ASC)
- uint8_t sense_qual; // Byte 13 (ASCQ)
- uint8_t fru_code; // Byte 14
- uint8_t sense_key_specific[3]; // Byte 15-17
- uint8_t addtnl_sense[1]; // Byte 18+
+ uint8_t response_code : 7; /* Byte 0, 0x70 or 0x71 */
+ uint8_t valid : 1; /* Byte 0, bit 7 */
+ uint8_t byte_1; /* Byte 1 */
+ uint8_t sense_key : 4; /* Byte 2, bit 0-3 (Key) */
+ uint8_t byte_2_other : 4; /* Byte 2, bit 4-7 */
+ uint32_t information; /* Byte 3-6, big-endian like block # in CDB */
+ uint8_t addtnl_length; /* Byte 7 */
+ uint8_t cmd_specific[4]; /* Byte 8-11 */
+ uint8_t sense_code; /* Byte 12 (ASC) */
+ uint8_t sense_qual; /* Byte 13 (ASCQ) */
+ uint8_t fru_code; /* Byte 14 */
+ uint8_t sense_key_specific[3]; /* Byte 15-17 */
+ uint8_t addtnl_sense[1]; /* Byte 18+ */
} OS_ATTRIBUTE_PACKED sense_data_fixed_t;
/* Generic Sense Data Descriptor (Table 29 in SPC5) */
typedef struct descriptor_entry
{
- uint8_t desc_type; // Byte 9/0
- uint8_t desc_type_length; // Byte 10/1
+ uint8_t desc_type; /* Byte 9/0 */
+ uint8_t desc_type_length; /* Byte 10/1 */
union
{
/* Sense data descriptor specific */
@@ -504,10 +491,10 @@
/* Information (Type 0) (Table 31 is SPC5) */
struct {
- uint8_t byte_2_rsvd : 7; // Byte 11/2
- uint8_t valid : 1; // Byte 11/2, bit 7
- uint8_t byte_3; // Byte 12/3
- uint8_t information[8]; // Byte 13-20/4-11
+ uint8_t byte_2_rsvd : 7; /* Byte 11/2 */
+ uint8_t valid : 1; /* Byte 11/2, bit 7 */
+ uint8_t byte_3; /* Byte 12/3 */
+ uint8_t information[8]; /* Byte 13-20/4-11 */
} OS_ATTRIBUTE_PACKED type_0;
}u;
@@ -515,15 +502,15 @@
/* "Descriptor Format Sense Data" (0x72 or 0x73) (Table 28 in SPC5) */
typedef struct sense_data_descriptor {
- uint8_t response_code : 7; // Byte 0, 0x72 or 0x73
- uint8_t byte_0_rsvd: 1; // Byte 0, bit 7
- uint8_t sense_key : 4; // Byte 1, bit 0-3 (Key)
- uint8_t byte_1_other : 4; // Byte 1, bit 4-7
- uint8_t sense_code; // Byte 2 (ASC)
- uint8_t sense_qual; // Byte 3 (ASCQ)
- uint8_t byte4_6[3]; // Byte 4-6
- uint8_t more_length; // Byte 7
- descriptor_entry_t descriptor_list; // Bytes 8+
+ uint8_t response_code : 7; /* Byte 0, 0x72 or 0x73 */
+ uint8_t byte_0_rsvd: 1; /* Byte 0, bit 7 */
+ uint8_t sense_key : 4; /* Byte 1, bit 0-3 (Key) */
+ uint8_t byte_1_other : 4; /* Byte 1, bit 4-7 */
+ uint8_t sense_code; /* Byte 2 (ASC) */
+ uint8_t sense_qual; /* Byte 3 (ASCQ) */
+ uint8_t byte4_6[3]; /* Byte 4-6 */
+ uint8_t more_length; /* Byte 7 */
+ descriptor_entry_t descriptor_list; /* Bytes 8+ */
} OS_ATTRIBUTE_PACKED sense_data_descriptor_t;
@@ -535,20 +522,18 @@
} sense_data_u_t;
-
-
/* Driver will use this structure to interpret the error
info element returned from a failed requests */
typedef struct raid_path_error_info_elem {
- uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
- uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
- uint8_t reserved[3]; /* !< Bytes 2-4. */
- uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */
- uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */
- uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */
- uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
- uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
- uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
+ uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
+ uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
+ uint8_t reserved[3]; /* !< Bytes 2-4. */
+ uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40.*/
+ uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */
+ uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */
+ uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
+ uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
+ uint32_t data_out_transferred;/* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
union
{
sense_data_u_t sense_data;
@@ -560,22 +545,26 @@
typedef enum error_data_present
{
- DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
- DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
- DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
+ DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
+ DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
+ DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
} error_data_present_t;
typedef struct aio_path_error_info_elem
{
- uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */
- uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
- uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
- uint8_t reserved1; /* !< Byte 3. Reserved. */
- uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
- uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
- uint16_t reserved2; /* !< Bytes 10. Reserved. */
- uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */
- uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
+ uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40.*/
+ uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
+ uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
+ uint8_t reserved1; /* !< Byte 3. Reserved. */
+ uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
+ uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
+ uint16_t reserved2; /* !< Bytes 10-11. Reserved. */
+ union
+ {
+ sense_data_u_t sense_data; /* */
+ uint8_t data[256]; /* !< Bytes 12-267. Response data buffer or Sense data buffer but not both. */
+ };
+ uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
}OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t;
struct init_base_struct {
@@ -603,6 +592,7 @@
boolean_t lockcreated;
char lockname[LOCKNAME_SIZE];
OS_PQILOCK_T lock OS_ATTRIBUTE_ALIGNED(8);
+ struct dma_mem alloc_dma;
}ib_queue_t;
typedef struct ob_queue {
@@ -618,6 +608,7 @@
uint32_t *pi_virt_addr;
dma_addr_t pi_dma_addr;
boolean_t created;
+ struct dma_mem alloc_dma;
}ob_queue_t;
typedef struct pqisrc_sg_desc{
@@ -670,23 +661,33 @@
/* IO path */
-typedef struct pqi_aio_req {
- iu_header_t header;
- uint16_t response_queue_id;
- uint8_t work_area[2];
- uint16_t req_id;
- uint8_t res1[2];
- uint32_t nexus;
- uint32_t buf_len;
+typedef struct iu_cmd_flags
+{
uint8_t data_dir : 2;
uint8_t partial : 1;
uint8_t mem_type : 1;
uint8_t fence : 1;
uint8_t encrypt_enable : 1;
uint8_t res2 : 2;
+}OS_ATTRIBUTE_PACKED iu_cmd_flags_t;
+
+typedef struct iu_attr_prio
+{
uint8_t task_attr : 3;
uint8_t cmd_prio : 4;
uint8_t res3 : 1;
+}OS_ATTRIBUTE_PACKED iu_attr_prio_t;
+
+typedef struct pqi_aio_req {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint8_t res1[2];
+ uint32_t nexus;
+ uint32_t buf_len;
+ iu_cmd_flags_t cmd_flags;
+ iu_attr_prio_t attr_prio;
uint16_t encrypt_key_index;
uint32_t encrypt_twk_low;
uint32_t encrypt_twk_high;
@@ -699,6 +700,55 @@
sgt_t sg_desc[4];
}OS_ATTRIBUTE_PACKED pqi_aio_req_t;
+typedef struct pqi_aio_raid1_write_req {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t volume_id; /* ID of raid volume */
+ uint32_t nexus_1; /* 1st drive in RAID 1 */
+ uint32_t nexus_2; /* 2nd drive in RAID 1 */
+ uint32_t nexus_3; /* 3rd drive in RAID 1 */
+ uint32_t buf_len;
+ iu_cmd_flags_t cmd_flags;
+ iu_attr_prio_t attr_prio;
+ uint16_t encrypt_key_index;
+ uint8_t cdb[16];
+ uint16_t err_idx;
+ uint8_t num_sg;
+ uint8_t cdb_len;
+ uint8_t num_drives; /* drives in raid1 (2 or 3) */
+ uint8_t reserved_bytes[3];
+ uint32_t encrypt_twk_low;
+ uint32_t encrypt_twk_high;
+ sgt_t sg_desc[4];
+}OS_ATTRIBUTE_PACKED pqi_aio_raid1_write_req_t;
+
+typedef struct pqi_aio_raid5or6_write_req {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t volume_id; /* ID of raid volume */
+ uint32_t data_it_nexus; /* IT nexus of data drive */
+ uint32_t p_parity_it_nexus;/* It nexus of p parity disk */
+ uint32_t q_parity_it_nexus;/* It nexus of q parity disk (R6) */
+ uint32_t buf_len;
+ iu_cmd_flags_t cmd_flags;
+ iu_attr_prio_t attr_prio;
+ uint16_t encrypt_key_index;
+ uint8_t cdb[16];
+ uint16_t err_idx;
+ uint8_t num_sg;
+ uint8_t cdb_len;
+ uint8_t xor_multiplier; /* for generating RAID 6 Q parity */
+ uint8_t reserved[3];
+ uint32_t encrypt_twk_low;
+ uint32_t encrypt_twk_high;
+ uint64_t row; /* logical lba / blocks per row */
+ uint8_t reserved2[8]; /* changed to reserved, used to stripe_lba */
+ sgt_t sg_desc[3]; /* only 3 entries for R5/6 */
+}OS_ATTRIBUTE_PACKED pqi_aio_raid5or6_write_req_t;
typedef struct pqisrc_raid_request {
iu_header_t header;
@@ -722,28 +772,43 @@
uint8_t reserved4 : 2;
uint8_t additional_cdb_bytes_usage : 3;
uint8_t reserved5 : 3;
- uint8_t cdb[16];
- uint8_t reserved[12];
+ union
+ {
+ uint8_t cdb[16];
+ struct
+ {
+ uint8_t op_code; /* Byte 0. SCSI opcode (0x26 or 0x27) */
+ uint8_t lun_lower; /* Byte 1 */
+ uint32_t detail; /* Byte 2-5 */
+ uint8_t cmd; /* Byte 6. Vendor specific op code. */
+ uint16_t xfer_len; /* Byte 7-8 */
+ uint8_t lun_upper; /* Byte 9 */
+ uint8_t unused[6]; /* Bytes 10-15. */
+ }OS_ATTRIBUTE_PACKED bmic_cdb;
+ }OS_ATTRIBUTE_PACKED cmd;
+ uint8_t reserved[11];
+ uint8_t ml_device_lun_number;
uint32_t timeout_in_sec;
sgt_t sg_descriptors[4];
-} OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
+}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
typedef struct pqi_raid_tmf_req {
- iu_header_t header;
- uint16_t resp_qid;
- uint8_t work_area[2];
- uint16_t req_id;
- uint16_t nexus;
- uint8_t res1[2];
- uint16_t timeout_in_sec;
- uint8_t lun[8];
- uint16_t protocol_spec;
- uint16_t obq_id_to_manage;
- uint16_t req_id_to_manage;
- uint8_t tmf;
- uint8_t res2 : 7;
- uint8_t fence : 1;
+ iu_header_t header;
+ uint16_t resp_qid;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t nexus;
+ uint8_t res1[1];
+ uint8_t ml_device_lun_number;
+ uint16_t timeout_in_sec;
+ uint8_t lun[8];
+ uint16_t protocol_spec;
+ uint16_t obq_id_to_manage;
+ uint16_t req_id_to_manage;
+ uint8_t tmf;
+ uint8_t res2 : 7;
+ uint8_t fence : 1;
} OS_ATTRIBUTE_PACKED pqi_raid_tmf_req_t;
typedef struct pqi_aio_tmf_req {
@@ -788,6 +853,12 @@
uint32_t encrypt_tweak_upper;
};
+typedef uint32_t os_ticks_t;
+
+struct pqi_stream_data {
+ uint64_t next_lba;
+ os_ticks_t last_accessed;
+};
typedef struct pqi_scsi_device {
device_type_t devtype; /* as reported by INQUIRY command */
@@ -810,13 +881,13 @@
uint8_t device_gone : 1;
uint8_t new_device : 1;
uint8_t volume_offline : 1;
- uint8_t scsi_rescan : 1;
+ uint8_t is_nvme : 1;
+ uint8_t scsi_rescan : 1;
uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
uint8_t model[16]; /* bytes 16-31 of inquiry data */
uint64_t sas_address;
uint8_t raid_level;
uint16_t queue_depth; /* max. queue_depth for this device */
- uint16_t advertised_queue_depth;
uint32_t ioaccel_handle;
uint8_t volume_status;
uint8_t active_path_index;
@@ -827,7 +898,7 @@
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
int offload_enabled_pending;
- int offload_to_mirror; /* Send next I/O accelerator RAID
+ int *offload_to_mirror; /* Send next I/O accelerator RAID
offload request to mirror drive. */
struct raid_map *raid_map; /* I/O accelerator RAID map */
@@ -838,33 +909,13 @@
boolean_t path_destroyed;
boolean_t firmware_queue_depth_set;
OS_ATOMIC64_T active_requests;
-}pqi_scsi_dev_t;
-
-typedef struct pqisrc_softstate pqisrc_softstate_t;
-typedef struct pqi_firmware_feature pqi_firmware_feature_t;
-typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
- pqi_firmware_feature_t *firmware_feature);
-
-struct pqi_firmware_feature {
- char *feature_name;
- unsigned int feature_bit;
- boolean_t supported;
- boolean_t enabled;
- feature_status_fn feature_status;
-};
-
-struct pqi_conf_table_firmware_features {
- struct pqi_conf_table_section_header header;
- uint16_t num_elements;
- uint8_t features_supported[];
-};
+ struct pqisrc_softstate *softs;
+ boolean_t schedule_rescan;
+ boolean_t in_remove;
+ struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
+ boolean_t is_multi_lun;
-struct pqi_conf_table_section_info {
- struct pqisrc_softstate *softs;
- void *section;
- uint32_t section_offset;
- void *section_addr;
-};
+}pqi_scsi_dev_t;
struct sense_header_scsi { /* See SPC-3 section 4.5 */
uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
@@ -877,8 +928,6 @@
uint8_t additional_length; /* always 0 for fixed sense format */
}OS_ATTRIBUTE_PACKED;
-
-
typedef struct report_lun_header {
uint32_t list_length;
uint8_t extended_response;
@@ -924,7 +973,7 @@
typedef struct reportlun_queue_depth_data {
reportlun_header_t header;
- reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
+ reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
}OS_ATTRIBUTE_PACKED reportlun_queue_depth_data_t;
typedef struct raidmap_data {
@@ -958,6 +1007,59 @@
raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
}OS_ATTRIBUTE_PACKED pqisrc_raid_map_t;
+typedef struct aio_row {
+ uint32_t blks_per_row; /* blocks per row */
+ uint64_t first; /* first row */
+ uint64_t last; /* last row */
+ uint32_t offset_first; /* offset in first row */
+ uint32_t offset_last; /* offset in last row */
+ uint16_t data_disks; /* number of data disks per row */
+ uint16_t total_disks; /* data + parity disks per row. */
+}OS_ATTRIBUTE_PACKED pqisrc_aio_row_t;
+
+typedef struct aio_column {
+ uint32_t first; /* 1st column of req */
+ uint32_t last; /* last column of req */
+}OS_ATTRIBUTE_PACKED pqisrc_aio_column_t;
+
+typedef struct aio_block {
+ uint64_t first; /* 1st block number of req */
+ uint64_t last; /* last block number of req */
+ uint32_t cnt; /* total blocks in req */
+ uint64_t disk_block; /* block number of phys disk */
+}OS_ATTRIBUTE_PACKED pqisrc_aio_block_t;
+
+typedef struct aio_r5or6_loc {
+ struct aio_row row; /* row information */
+ struct aio_column col; /* column information */
+}OS_ATTRIBUTE_PACKED pqisrc_aio_r5or6_loc_t;
+
+typedef struct aio_map {
+ uint32_t row;
+ uint32_t idx; /* index into array of handles */
+ uint16_t layout_map_count;
+}OS_ATTRIBUTE_PACKED pqisrc_aio_map_t;
+
+typedef struct aio_disk_group {
+ uint32_t first; /* first group */
+ uint32_t last; /* last group */
+ uint32_t cur; /* current group */
+}OS_ATTRIBUTE_PACKED pqisrc_aio_disk_group_t;
+
+typedef struct aio_req_locator {
+ uint8_t raid_level;
+ struct raid_map *raid_map; /* relevant raid map */
+ struct aio_block block; /* block range and count */
+ struct aio_row row; /* row range and offset info */
+ struct aio_column col; /* first/last column info */
+ struct aio_r5or6_loc r5or6; /* Raid 5/6-specific bits */
+ struct aio_map map; /* map row, count, and index */
+ struct aio_disk_group group; /* first, last, and curr group */
+ boolean_t is_write;
+ uint32_t stripesz;
+ uint16_t strip_sz;
+ int offload_to_mirror;
+}OS_ATTRIBUTE_PACKED aio_req_locator_t;
typedef struct bmic_ident_ctrl {
uint8_t conf_ld_count;
@@ -1042,6 +1144,55 @@
uint8_t padding[112];
}OS_ATTRIBUTE_PACKED bmic_ident_physdev_t;
+typedef struct bmic_sense_feature {
+ uint8_t opcode;
+ uint8_t reserved1[1];
+ uint8_t page;
+ uint8_t sub_page;
+ uint8_t reserved2[2];
+ uint8_t cmd;
+ uint16_t transfer_length;
+ uint8_t reserved3[7];
+}OS_ATTRIBUTE_PACKED bmic_sense_feature_t;
+
+typedef struct bmic_sense_feature_buffer_header {
+ uint8_t page;
+ uint8_t sub_page;
+ uint16_t buffer_length;
+} OS_ATTRIBUTE_PACKED bmic_sense_feature_buffer_header_t;
+
+typedef struct bmic_sense_feature_page_header {
+ uint8_t page;
+ uint8_t sub_page;
+ uint16_t total_length; /** Total length of the page.
+ * The length is the same wheteher the request buffer is too short or not.
+ * When printing out the page, only print the buffer length. */
+} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_header_t;
+
+typedef struct bmic_sense_feature_page_io {
+ struct bmic_sense_feature_page_header header;
+ uint8_t flags1;
+} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_t;
+
+typedef struct bmic_sense_feature_page_io_aio_subpage {
+ struct bmic_sense_feature_page_header header;
+ uint8_t fw_aio_read_support;
+ uint8_t driver_aio_read_support;
+ uint8_t fw_aio_write_support;
+ uint8_t driver_aio_write_support;
+ uint16_t max_aio_rw_xfer_crypto_sas_sata; /* in kb */
+ uint16_t max_aio_rw_xfer_crypto_nvme; /* in kb */
+ uint16_t max_aio_write_raid5_6; /* in kb */
+ uint16_t max_aio_write_raid1_10_2drv; /* in kb */
+ uint16_t max_aio_write_raid1_10_3drv; /* in kb */
+} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_aio_subpage_t;
+
+typedef struct bmic_sense_feature_aio_buffer {
+ struct bmic_sense_feature_buffer_header header;
+ struct bmic_sense_feature_page_io_aio_subpage aio_subpage;
+} OS_ATTRIBUTE_PACKED bmic_sense_feature_aio_buffer_t;
+
+
typedef struct pqisrc_bmic_flush_cache {
uint8_t disable_cache;
uint8_t power_action;
@@ -1067,7 +1218,7 @@
typedef struct request_container_block {
void *req;
void *error_info;
- REQUEST_STATUS_T status;
+ int status;
uint32_t tag;
sgt_t *sg_chain_virt;
dma_addr_t sg_chain_dma;
@@ -1076,34 +1227,71 @@
struct pqisrc_softstate *softs;
success_callback success_cmp_callback;
error_callback error_cmp_callback;
- uint8_t *cdbp;
+ uint8_t *cdbp; /* points to either the bypass_cdb below or original host cdb */
+ uint8_t bypass_cdb[16]; /* bypass cmds will use this cdb memory */
int cmdlen;
uint32_t bcount; /* buffer size in byte */
uint32_t ioaccel_handle;
boolean_t encrypt_enable;
struct pqi_enc_info enc_info;
+ uint32_t row_num;
+ uint32_t blocks_per_row;
+ uint32_t raid_map_index;
+ uint32_t raid_map_row;
ib_queue_t *req_q;
- int path;
+ IO_PATH_T path;
int resp_qid;
boolean_t req_pending;
+ uint32_t it_nexus[PQISRC_MAX_SUPPORTED_MIRRORS];
boolean_t timedout;
int tm_req;
int aio_retry;
+ boolean_t is_abort_cmd_from_host; /* true if this is a TMF abort */
+ boolean_t host_wants_to_abort_this; /* set to true to ID the request targeted by TMF */
+ uint64_t submit_time_user_secs; /* host submit time in user seconds */
+ uint64_t host_timeout_ms; /* original host timeout value in msec */
int cm_flags;
void *cm_data; /* pointer to data in kernel space */
bus_dmamap_t cm_datamap;
uint32_t nseg;
union ccb *cm_ccb;
sgt_t *sgt; /* sg table */
-
}rcb_t;
-typedef struct tid_pool {
- int tid[PQI_MAX_PHYSICALS];
- int index;
-}tid_pool_t;
+typedef struct bit_map {
+ boolean_t bit_vector[MAX_TARGET_BIT];
+}bit_map_t;
-struct pqisrc_softstate {
+typedef enum _io_type
+{
+ UNKNOWN_IO_TYPE, /* IO Type is TBD or cannot be determined */
+ NON_RW_IO_TYPE, /* IO Type is non-Read/Write opcode (could separate BMIC, etc. if we wanted) */
+ READ_IO_TYPE, /* IO Type is SCSI Read */
+ WRITE_IO_TYPE, /* IO Type is SCSI Write */
+} io_type_t;
+
+typedef enum _counter_types
+{
+ UNKNOWN_COUNTER,
+ HBA_COUNTER,
+ RAID0_COUNTER,
+ RAID1_COUNTER,
+ RAID5_COUNTER,
+ RAID6_COUNTER,
+ MAX_IO_COUNTER,
+} counter_types_t;
+
+typedef struct _io_counters
+{
+ OS_ATOMIC64_T raid_read_cnt;
+ OS_ATOMIC64_T raid_write_cnt;
+ OS_ATOMIC64_T aio_read_cnt;
+ OS_ATOMIC64_T aio_write_cnt;
+ OS_ATOMIC64_T raid_non_read_write;
+ OS_ATOMIC64_T aio_non_read_write;
+} io_counters_t;
+
+typedef struct pqisrc_softstate {
OS_SPECIFIC_T os_specific;
struct ioa_registers *ioa_reg;
struct pqi_registers *pqi_reg;
@@ -1120,11 +1308,7 @@
uint16_t subsysid; /* sub system id */
controller_state_t ctlr_state;
struct dma_mem err_buf_dma_mem;
- struct dma_mem admin_queue_dma_mem;
- struct dma_mem op_ibq_dma_mem;
- struct dma_mem op_obq_dma_mem;
- struct dma_mem event_q_dma_mem;
- struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ];
+ struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ + 1];
ib_queue_t admin_ib_queue;
ob_queue_t admin_ob_queue;
ob_queue_t event_q;
@@ -1138,12 +1322,14 @@
uint32_t num_op_obq;
uint32_t num_elem_per_op_ibq;
uint32_t num_elem_per_op_obq;
- uint32_t ibq_elem_size;
- uint32_t obq_elem_size;
+ uint32_t max_ibq_elem_size;
+ uint32_t max_obq_elem_size;
pqi_dev_cap_t pqi_dev_cap;
uint16_t max_ib_iu_length_per_fw;
- uint16_t max_ib_iu_length;
- unsigned max_sg_per_iu;
+ uint16_t max_ib_iu_length; /* should be 1152 */
+ uint16_t max_spanning_elems; /* should be 9 spanning elements */
+ unsigned max_sg_per_single_iu_element; /* should be 8 */
+ unsigned max_sg_per_spanning_cmd; /* should be 68, 67 with AIO writes */
uint8_t ib_spanning_supported : 1;
uint8_t ob_spanning_supported : 1;
pqi_event_config_t event_config;
@@ -1151,6 +1337,7 @@
int intr_type;
int intr_count;
int num_cpus_online;
+ int num_devs;
boolean_t share_opq_and_eventq;
rcb_t *rcb;
#ifndef LOCKFREE_STACK
@@ -1162,27 +1349,117 @@
OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8);
char devlist_lock_name[LOCKNAME_SIZE];
pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN];
+ pqi_scsi_dev_t *dev_list[PQI_MAX_DEVICES];
OS_SEMA_LOCK_T scan_lock;
uint8_t lun_count[PQI_MAX_DEVICES];
uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
+ uint64_t phys_list_pos;
uint64_t prev_heartbeat_count;
uint64_t *heartbeat_counter_abs_addr;
uint64_t heartbeat_counter_off;
- uint8_t *fw_features_section_abs_addr;
- uint64_t fw_features_section_off;
uint32_t bus_id;
uint32_t device_id;
uint32_t func_id;
+ uint8_t adapter_num; /* globally unique adapter number */
char *os_name;
boolean_t ctrl_online;
uint8_t pqi_reset_quiesce_allowed : 1;
boolean_t ctrl_in_pqi_mode;
- tid_pool_t tid_pool;
+ bit_map_t bit_map;
uint32_t adapterQDepth;
uint32_t dma_mem_consumed;
+ boolean_t adv_aio_capable;
+ boolean_t aio_raid1_write_bypass;
+ boolean_t aio_raid5_write_bypass;
+ boolean_t aio_raid6_write_bypass;
+ boolean_t enable_stream_detection;
+ uint16_t max_aio_write_raid5_6; /* bytes */
+ uint16_t max_aio_write_raid1_10_2drv; /* bytes */
+ uint16_t max_aio_write_raid1_10_3drv; /* bytes */
+ uint16_t max_aio_rw_xfer_crypto_nvme; /* bytes */
+ uint16_t max_aio_rw_xfer_crypto_sas_sata; /* bytes */
+ io_counters_t counters[MAX_IO_COUNTER];
+ boolean_t log_io_counters;
+ boolean_t ld_rescan;
+
+#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
+ reportlun_data_ext_t *log_dev_list;
+ size_t log_dev_data_length;
+ uint32_t num_ptraid_targets;
+#endif
boolean_t timeout_in_passthrough;
boolean_t timeout_in_tmf;
-};
+ boolean_t sata_unique_wwn;
+ boolean_t page83id_in_rpl;
+ boolean_t err_resp_verbose;
+
+#ifdef DEVICE_HINT
+ device_hint hint;
+#endif
+
+}pqisrc_softstate_t;
+
+struct pqi_config_table {
+ uint8_t signature[8]; /* "CFGTABLE" */
+ uint32_t first_section_offset; /* offset in bytes from the base */
+ /* address of this table to the */
+ /* first section */
+}OS_ATTRIBUTE_PACKED;
+
+struct pqi_config_table_section_header {
+ uint16_t section_id; /* as defined by the */
+ /* PQI_CONFIG_TABLE_SECTION_* */
+ /* manifest constants above */
+ uint16_t next_section_offset; /* offset in bytes from base */
+ /* address of the table of the */
+ /* next section or 0 if last entry */
+}OS_ATTRIBUTE_PACKED;
+
+struct pqi_config_table_general_info {
+ struct pqi_config_table_section_header header;
+ uint32_t section_length; /* size of this section in bytes */
+ /* including the section header */
+ uint32_t max_outstanding_requests; /* max. outstanding */
+ /* commands supported by */
+ /* the controller */
+ uint32_t max_sg_size; /* max. transfer size of a single */
+ /* command */
+ uint32_t max_sg_per_request; /* max. number of scatter-gather */
+ /* entries supported in a single */
+ /* command */
+}OS_ATTRIBUTE_PACKED;
+
+struct pqi_config_table_firmware_features {
+ struct pqi_config_table_section_header header;
+ uint16_t num_elements;
+ uint8_t features_supported[];
+/* u8 features_requested_by_host[]; */
+/* u8 features_enabled[]; */
+/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
+/* uint16_t firmware_max_known_feature; */
+/* uint16_t host_max_known_feature; */
+}OS_ATTRIBUTE_PACKED;
+
+typedef struct pqi_vendor_general_request {
+ iu_header_t header; /* bytes 0-3 */
+ uint16_t response_id; /* bytes 4-5 */
+ uint16_t work; /* bytes 6-7 */
+ uint16_t request_id;
+ uint16_t function_code;
+ union {
+ struct {
+ uint16_t first_section;
+ uint16_t last_section;
+ uint8_t reserved[48];
+ } OS_ATTRIBUTE_PACKED config_table_update;
+
+ struct {
+ uint64_t buffer_address;
+ uint32_t buffer_length;
+ uint8_t reserved[40];
+ } OS_ATTRIBUTE_PACKED ofa_memory_allocation;
+ } data;
+}OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
typedef struct vpd_logical_volume_status {
uint8_t peripheral_info;
diff --git a/sys/dev/smartpqi/smartpqi_tag.c b/sys/dev/smartpqi/smartpqi_tag.c
--- a/sys/dev/smartpqi/smartpqi_tag.c
+++ b/sys/dev/smartpqi/smartpqi_tag.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,7 +36,7 @@
{
OS_ACQUIRE_SPINLOCK(&(taglist->lock));
- DBG_FUNC("IN\n");
+/* DBG_FUNC("IN\n");*/
ASSERT(taglist->num_elem < taglist->max_elem);
@@ -48,7 +48,7 @@
OS_RELEASE_SPINLOCK(&taglist->lock);
- DBG_FUNC("OUT\n");
+/* DBG_FUNC("OUT\n");*/
}
/*
@@ -101,14 +101,14 @@
goto err_out;
}
- os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
- ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
- if(ret){
- DBG_ERR("tag lock initialization failed\n");
- taglist->lockcreated=false;
- goto err_lock;
+ os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
+ ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
+ if(ret){
+ DBG_ERR("tag lock initialization failed\n");
+ taglist->lockcreated=false;
+ goto err_lock;
}
- taglist->lockcreated = true;
+ taglist->lockcreated = true;
/* indices 1 to max_elem are considered as valid tags */
for (i=1; i <= max_elem; i++) {
@@ -120,8 +120,8 @@
return ret;
err_lock:
- os_mem_free(softs, (char *)taglist->elem_array,
- (taglist->max_elem * sizeof(uint32_t)));
+ os_mem_free(softs, (char *)taglist->elem_array,
+ (taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
err_out:
DBG_FUNC("OUT failed\n");
@@ -139,10 +139,10 @@
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
- if(taglist->lockcreated==true){
- os_uninit_spinlock(&taglist->lock);
- taglist->lockcreated = false;
- }
+ if(taglist->lockcreated==true){
+ os_uninit_spinlock(&taglist->lock);
+ taglist->lockcreated = false;
+ }
DBG_FUNC("OUT\n");
}
@@ -215,7 +215,7 @@
union head_list cur_head, new_head;
DBG_FUNC("IN\n");
- DBG_INFO("push tag :%d\n",index);
+ DBG_INFO("push tag :%u\n",index);
if (index >= stack->max_elem) {
ASSERT(false);
@@ -264,7 +264,7 @@
stack->next_index_array[cur_head.top.index] = 0;
stack->num_elem--;
- DBG_INFO("pop tag: %d\n",cur_head.top.index);
+ DBG_INFO("pop tag: %u\n",cur_head.top.index);
DBG_FUNC("OUT\n");
return cur_head.top.index; /*tag*/
}
diff --git a/sys/modules/smartpqi/Makefile b/sys/modules/smartpqi/Makefile
--- a/sys/modules/smartpqi/Makefile
+++ b/sys/modules/smartpqi/Makefile
@@ -4,7 +4,7 @@
.PATH: ${SRCTOP}/sys/dev/${KMOD}
-SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c
+SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c smartpqi_features.c
SRCS+= device_if.h bus_if.h pci_if.h opt_scsi.h opt_cam.h

File Metadata

Mime Type
text/plain
Expires
Fri, Oct 4, 11:14 AM (21 h, 50 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13542005
Default Alt Text
D41550.diff (373 KB)

Event Timeline