Page MenuHomeFreeBSD

D24428.diff
No OneTemporary

D24428.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
--- a/sys/dev/smartpqi/smartpqi_cam.c
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,11 +33,15 @@
/*
* Set cam sim properties of the smartpqi adapter.
*/
-static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
+static void
+update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
{
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
+
+ device_t dev = softs->os_specific.pqi_dev;
+
DBG_FUNC("IN\n");
cpi->version_num = 1;
@@ -50,9 +53,9 @@
cpi->max_target = 1088;
cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
cpi->initiator_id = 255;
- strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
- strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
- strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
+ strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
@@ -61,6 +64,11 @@
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->ccb_h.status = CAM_REQ_CMP;
+ cpi->hba_vendor = pci_get_vendor(dev);
+ cpi->hba_device = pci_get_device(dev);
+ cpi->hba_subvendor = pci_get_subvendor(dev);
+ cpi->hba_subdevice = pci_get_subdevice(dev);
+
DBG_FUNC("OUT\n");
}
@@ -68,7 +76,8 @@
/*
* Get transport settings of the smartpqi adapter
*/
-static void get_transport_settings(struct pqisrc_softstate *softs,
+static void
+get_transport_settings(struct pqisrc_softstate *softs,
struct ccb_trans_settings *cts)
{
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
@@ -94,8 +103,10 @@
/*
* Add the target to CAM layer and rescan, when a new device is found
*/
-void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
- union ccb *ccb;
+void
+os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ union ccb *ccb;
DBG_FUNC("IN\n");
@@ -121,8 +132,9 @@
/*
* Remove the device from CAM layer when deleted or hot removed
*/
-void os_remove_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device) {
+void
+os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
struct cam_path *tmppath;
DBG_FUNC("IN\n");
@@ -136,6 +148,7 @@
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
+ softs->device_list[device->target][device->lun] = NULL;
pqisrc_free_device(softs, device);
}
@@ -146,7 +159,8 @@
/*
* Function to release the frozen simq
*/
-static void pqi_release_camq( rcb_t *rcb )
+static void
+pqi_release_camq(rcb_t *rcb)
{
pqisrc_softstate_t *softs;
struct ccb_scsiio *csio;
@@ -167,18 +181,12 @@
DBG_FUNC("OUT\n");
}
-/*
- * Function to dma-unmap the completed request
- */
-static void pqi_unmap_request(void *arg)
+static void
+pqi_synch_request(rcb_t *rcb)
{
- pqisrc_softstate_t *softs;
- rcb_t *rcb;
-
- DBG_IO("IN rcb = %p\n", arg);
+ pqisrc_softstate_t *softs = rcb->softs;
- rcb = (rcb_t *)arg;
- softs = rcb->softs;
+ DBG_IO("IN rcb = %p\n", rcb);
if (!(rcb->cm_flags & PQI_CMD_MAPPED))
return;
@@ -199,9 +207,21 @@
if(rcb->sgt && rcb->nseg)
os_mem_free(rcb->softs, (void*)rcb->sgt,
- rcb->nseg*sizeof(sgt_t));
+ rcb->nseg*sizeof(sgt_t));
- pqisrc_put_tag(&softs->taglist, rcb->tag);
+ DBG_IO("OUT\n");
+}
+
+/*
+ * Function to dma-unmap the completed request
+ */
+static inline void
+pqi_unmap_request(rcb_t *rcb)
+{
+ DBG_IO("IN rcb = %p\n", rcb);
+
+ pqi_synch_request(rcb);
+ pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
DBG_IO("OUT\n");
}
@@ -218,9 +238,12 @@
DBG_FUNC("IN\n");
+ if (pqisrc_ctrl_offline(softs))
+ return;
+
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
- if(cdb[0] == INQUIRY &&
+ if(cdb[0] == INQUIRY &&
(cdb[1] & SI_EVPD) == 0 &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
@@ -231,49 +254,87 @@
/* Let the disks be probed and dealt with via CAM. Only for LD
let it fall through and inquiry be tweaked */
- if( !device || !pqisrc_is_logical_device(device) ||
- (device->devtype != DISK_DEVICE) ||
+ if (!device || !pqisrc_is_logical_device(device) ||
+ (device->devtype != DISK_DEVICE) ||
pqisrc_is_external_raid_device(device)) {
return;
}
- strncpy(inq->vendor, "MSCC",
- SID_VENDOR_SIZE);
- strncpy(inq->product,
- pqisrc_raidlevel_to_string(device->raid_level),
- SID_PRODUCT_SIZE);
+ strncpy(inq->vendor, device->vendor,
+ SID_VENDOR_SIZE);
+ strncpy(inq->product,
+ pqisrc_raidlevel_to_string(device->raid_level),
+ SID_PRODUCT_SIZE);
strncpy(inq->revision, device->volume_offline?"OFF":"OK",
- SID_REVISION_SIZE);
+ SID_REVISION_SIZE);
}
DBG_FUNC("OUT\n");
}
+static void
+pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
+{
+ uint32_t release_tag;
+ pqisrc_softstate_t *softs = rcb->softs;
+
+ DBG_IO("IN scsi io = %p\n", csio);
+
+ pqi_synch_request(rcb);
+ smartpqi_fix_ld_inquiry(rcb->softs, csio);
+ pqi_release_camq(rcb);
+ release_tag = rcb->tag;
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, release_tag);
+ xpt_done((union ccb *)csio);
+
+ DBG_FUNC("OUT\n");
+}
+
/*
* Handle completion of a command - pass results back through the CCB
*/
void
os_io_response_success(rcb_t *rcb)
{
- struct ccb_scsiio *csio;
+ struct ccb_scsiio *csio;
DBG_IO("IN rcb = %p\n", rcb);
- if (rcb == NULL)
+ if (rcb == NULL)
panic("rcb is null");
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
-
- if (csio == NULL)
+
+ if (csio == NULL)
panic("csio is null");
rcb->status = REQUEST_SUCCESS;
csio->ccb_h.status = CAM_REQ_CMP;
- smartpqi_fix_ld_inquiry(rcb->softs, csio);
- pqi_release_camq(rcb);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+ pqi_complete_scsi_io(csio, rcb);
+
+ DBG_IO("OUT\n");
+}
+
+static void
+copy_sense_data_to_csio(struct ccb_scsiio *csio,
+ uint8_t *sense_data, uint16_t sense_data_len)
+{
+ DBG_IO("IN csio = %p\n", csio);
+
+ memset(&csio->sense_data, 0, csio->sense_len);
+
+ sense_data_len = (sense_data_len > csio->sense_len) ?
+ csio->sense_len : sense_data_len;
+
+ if (sense_data)
+ memcpy(&csio->sense_data, sense_data, sense_data_len);
+
+ if (csio->sense_len > sense_data_len)
+ csio->sense_resid = csio->sense_len - sense_data_len;
+ else
+ csio->sense_resid = 0;
DBG_IO("OUT\n");
}
@@ -281,7 +342,8 @@
/*
* Error response handling for raid IO
*/
-void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
+void
+os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
{
struct ccb_scsiio *csio;
pqisrc_softstate_t *softs;
@@ -295,10 +357,16 @@
softs = rcb->softs;
- ASSERT(err_info != NULL);
- csio->scsi_status = err_info->status;
csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ if (!err_info || !rcb->dvp) {
+ DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+ err_info, rcb->dvp);
+ goto error_out;
+ }
+
+ csio->scsi_status = err_info->status;
+
if (csio->ccb_h.func_code == XPT_SCSI_IO) {
/*
* Handle specific SCSI status values.
@@ -306,30 +374,18 @@
switch(csio->scsi_status) {
case PQI_RAID_STATUS_QUEUE_FULL:
csio->ccb_h.status = CAM_REQ_CMP;
- DBG_ERR("Queue Full error");
+ DBG_ERR("Queue Full error\n");
break;
/* check condition, sense data included */
case PQI_RAID_STATUS_CHECK_CONDITION:
{
- uint16_t sense_data_len =
- LE_16(err_info->sense_data_len);
- uint8_t *sense_data = NULL;
- if (sense_data_len)
- sense_data = err_info->data;
- memset(&csio->sense_data, 0, csio->sense_len);
- sense_data_len = (sense_data_len >
- csio->sense_len) ?
- csio->sense_len :
- sense_data_len;
- if (sense_data)
- memcpy(&csio->sense_data, sense_data,
- sense_data_len);
- if (csio->sense_len > sense_data_len)
- csio->sense_resid = csio->sense_len
- - sense_data_len;
- else
- csio->sense_resid = 0;
- csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ uint16_t sense_data_len =
+ LE_16(err_info->sense_data_len);
+ uint8_t *sense_data = NULL;
+ if (sense_data_len)
+ sense_data = err_info->data;
+ copy_sense_data_to_csio(csio, sense_data, sense_data_len);
+ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
| CAM_AUTOSNS_VALID
| CAM_REQ_CMP_ERR;
@@ -338,44 +394,36 @@
case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
{
- uint32_t resid = 0;
- resid = rcb->bcount-err_info->data_out_transferred;
- csio->resid = resid;
- csio->ccb_h.status = CAM_REQ_CMP;
- break;
+ uint32_t resid = 0;
+ resid = rcb->bcount-err_info->data_out_transferred;
+ csio->resid = resid;
+ csio->ccb_h.status = CAM_REQ_CMP;
}
+ break;
default:
csio->ccb_h.status = CAM_REQ_CMP;
break;
}
}
- if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
- softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
- if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
- xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
- else
- csio->ccb_h.status |= CAM_RELEASE_SIMQ;
- }
-
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+error_out:
+ pqi_complete_scsi_io(csio, rcb);
DBG_IO("OUT\n");
}
-
/*
* Error response handling for aio.
*/
-void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
+void
+os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
{
struct ccb_scsiio *csio;
pqisrc_softstate_t *softs;
DBG_IO("IN\n");
- if (rcb == NULL)
+ if (rcb == NULL)
panic("rcb is null");
rcb->status = REQUEST_SUCCESS;
@@ -385,6 +433,13 @@
softs = rcb->softs;
+ if (!err_info || !rcb->dvp) {
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+ err_info, rcb->dvp);
+ goto error_out;
+ }
+
switch (err_info->service_resp) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
csio->ccb_h.status = err_info->status;
@@ -405,6 +460,14 @@
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
+ /* Timed out TMF response comes here */
+ if (rcb->tm_req) {
+ rcb->req_pending = false;
+ rcb->status = REQUEST_SUCCESS;
+ DBG_ERR("AIO Disabled for TMF\n");
+ return;
+ }
+ rcb->dvp->aio_enabled = false;
rcb->dvp->offload_enabled = false;
csio->ccb_h.status |= CAM_REQUEUE_REQ;
break;
@@ -420,18 +483,24 @@
break;
case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
- csio->ccb_h.status = CAM_REQ_CMP;
- break;
+ DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+ (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
+ rcb->status = REQUEST_SUCCESS;
+ rcb->req_pending = false;
+ return;
case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
- DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
- csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
- break;
+ DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+ (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
+ rcb->status = REQUEST_FAILED;
+ rcb->req_pending = false;
+ return;
default:
DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
break;
}
+
if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
uint8_t *sense_data = NULL;
@@ -440,56 +509,52 @@
sense_data = err_info->data;
DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
sense_data_len);
- memset(&csio->sense_data, 0, csio->sense_len);
- if (sense_data)
- memcpy(&csio->sense_data, sense_data, ((sense_data_len >
- csio->sense_len) ? csio->sense_len : sense_data_len));
- if (csio->sense_len > sense_data_len)
- csio->sense_resid = csio->sense_len - sense_data_len;
- else
- csio->sense_resid = 0;
+ copy_sense_data_to_csio(csio, sense_data, sense_data_len);
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
}
- smartpqi_fix_ld_inquiry(softs, csio);
- pqi_release_camq(rcb);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+error_out:
+ pqi_complete_scsi_io(csio, rcb);
DBG_IO("OUT\n");
}
+static void
+pqi_freeze_ccb(union ccb *ccb)
+{
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
+ ccb->ccb_h.status |= CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ }
+}
+
/*
* Command-mapping helper function - populate this command's s/g table.
*/
static void
pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
- pqisrc_softstate_t *softs;
- rcb_t *rcb;
-
- rcb = (rcb_t *)arg;
- softs = rcb->softs;
+ rcb_t *rcb = (rcb_t *)arg;
+ pqisrc_softstate_t *softs = rcb->softs;
+ union ccb *ccb;
- if( error || nseg > softs->pqi_cap.max_sg_elem )
- {
- xpt_freeze_simq(softs->os_specific.sim, 1);
- rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ|
- CAM_RELEASE_SIMQ);
- DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
+ if (error || nseg > softs->pqi_cap.max_sg_elem) {
+ DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
error, nseg, softs->pqi_cap.max_sg_elem);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)rcb->cm_ccb);
- return;
+ goto error_io;
+ }
+
+ rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
+
+ if (!rcb->sgt) {
+ DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
+ goto error_io;
}
- rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
rcb->nseg = nseg;
- if (rcb->sgt != NULL) {
- for (int i = 0; i < nseg; i++) {
- rcb->sgt[i].addr = segs[i].ds_addr;
- rcb->sgt[i].len = segs[i].ds_len;
- rcb->sgt[i].flags = 0;
- }
+ for (int i = 0; i < nseg; i++) {
+ rcb->sgt[i].addr = segs[i].ds_addr;
+ rcb->sgt[i].len = segs[i].ds_len;
+ rcb->sgt[i].flags = 0;
}
if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
@@ -506,39 +571,46 @@
if (error) {
rcb->req_pending = false;
- xpt_freeze_simq(softs->os_specific.sim, 1);
- rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ
- |CAM_RELEASE_SIMQ);
DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)rcb->cm_ccb);
+ } else {
+ /* Successfully IO was submitted to the device. */
return;
}
+
+error_io:
+ ccb = rcb->cm_ccb;
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ pqi_freeze_ccb(ccb);
+ pqi_unmap_request(rcb);
+ xpt_done(ccb);
+ return;
}
/*
- * Function to dma-map the request buffer
+ * Function to dma-map the request buffer
*/
-static int pqi_map_request( rcb_t *rcb )
+static int
+pqi_map_request(rcb_t *rcb)
{
pqisrc_softstate_t *softs = rcb->softs;
- int error = PQI_STATUS_SUCCESS;
+ int bsd_status = BSD_SUCCESS;
union ccb *ccb = rcb->cm_ccb;
DBG_FUNC("IN\n");
/* check that mapping is necessary */
if (rcb->cm_flags & PQI_CMD_MAPPED)
- return(0);
+ return BSD_SUCCESS;
+
rcb->cm_flags |= PQI_CMD_MAPPED;
if (rcb->bcount) {
- error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
+ bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
- if (error != 0){
- DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
- error, rcb->bcount);
- return error;
+ if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
+ DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
+ bsd_status, rcb->bcount);
+ return bsd_status;
}
} else {
/*
@@ -549,19 +621,21 @@
/* Call IO functions depending on pd or ld */
rcb->status = REQUEST_PENDING;
- error = pqisrc_build_send_io(softs, rcb);
-
+ if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
+ bsd_status = EIO;
+ }
}
- DBG_FUNC("OUT error = %d\n", error);
+ DBG_FUNC("OUT error = %d\n", bsd_status);
- return error;
+ return bsd_status;
}
/*
* Function to clear the request control block
*/
-void os_reset_rcb( rcb_t *rcb )
+void
+os_reset_rcb(rcb_t *rcb)
{
rcb->error_info = NULL;
rcb->req = NULL;
@@ -572,7 +646,7 @@
rcb->softs = NULL;
rcb->cm_flags = 0;
rcb->cm_data = NULL;
- rcb->bcount = 0;
+ rcb->bcount = 0;
rcb->nseg = 0;
rcb->sgt = NULL;
rcb->cm_ccb = NULL;
@@ -580,12 +654,14 @@
rcb->ioaccel_handle = 0;
rcb->resp_qid = 0;
rcb->req_pending = false;
+ rcb->tm_req = false;
}
/*
* Callback function for the lun rescan
*/
-static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
+static void
+smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
{
xpt_free_path(ccb->ccb_h.path);
xpt_free_ccb(ccb);
@@ -595,16 +671,22 @@
/*
* Function to rescan the lun
*/
-static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
+static void
+smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
int lun)
{
- union ccb *ccb = NULL;
- cam_status status = 0;
- struct cam_path *path = NULL;
+ union ccb *ccb = NULL;
+ cam_status status = 0;
+ struct cam_path *path = NULL;
DBG_FUNC("IN\n");
ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ DBG_ERR("Unable to alloc ccb for lun rescan\n");
+ return;
+ }
+
status = xpt_create_path(&path, NULL,
cam_sim_path(softs->os_specific.sim), target, lun);
if (status != CAM_REQ_CMP) {
@@ -628,7 +710,8 @@
/*
* Function to rescan the lun under each target
*/
-void smartpqi_target_rescan(struct pqisrc_softstate *softs)
+void
+smartpqi_target_rescan(struct pqisrc_softstate *softs)
{
int target = 0, lun = 0;
@@ -648,7 +731,8 @@
/*
* Set the mode of tagged command queueing for the current task.
*/
-uint8_t os_get_task_attr(rcb_t *rcb)
+uint8_t
+os_get_task_attr(rcb_t *rcb)
{
union ccb *ccb = rcb->cm_ccb;
uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
@@ -671,19 +755,24 @@
/*
* Complete all outstanding commands
*/
-void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
+void
+os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
{
int tag = 0;
+ pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN\n");
- for (tag = 1; tag < softs->max_outstanding_io; tag++) {
+ for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
rcb_t *prcb = &softs->rcb[tag];
+ dvp = prcb->dvp;
if(prcb->req_pending && prcb->cm_ccb ) {
prcb->req_pending = false;
prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
- xpt_done((union ccb *)prcb->cm_ccb);
- prcb->cm_ccb = NULL;
+ pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
+ if (dvp)
+ pqisrc_decrement_device_active_io(softs, dvp);
+
}
}
@@ -693,21 +782,22 @@
/*
* IO handling functionality entry point
*/
-static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
+static int
+pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
{
rcb_t *rcb;
uint32_t tag, no_transfer = 0;
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
- int32_t error = PQI_STATUS_FAILURE;
+ int32_t error;
pqi_scsi_dev_t *dvp;
DBG_FUNC("IN\n");
-
- if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
+
+ if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
@@ -716,40 +806,40 @@
ccb->ccb_h.status = CAM_SCSI_BUS_RESET
| CAM_BUSY | CAM_REQ_INPROG;
DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
/* Check device state */
if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
/* Check device reset */
- if (dvp->reset_in_progress) {
+ if (DEVICE_RESET(dvp)) {
ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
- return error;
+ return EBUSY;
}
if (dvp->expose_device == false) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
tag = pqisrc_get_tag(&softs->taglist);
- if( tag == INVALID_ELEM ) {
+ if (tag == INVALID_ELEM) {
DBG_ERR("Get Tag failed\n");
xpt_freeze_simq(softs->os_specific.sim, 1);
softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
- return PQI_STATUS_FAILURE;
+ return EIO;
}
DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
rcb = &softs->rcb[tag];
- os_reset_rcb( rcb );
+ os_reset_rcb(rcb);
rcb->tag = tag;
rcb->softs = softs;
rcb->cmdlen = ccb->csio.cdb_len;
@@ -786,57 +876,72 @@
* if we ever learn a transport layer other than simple, may fail
* if the adapter rejects the command).
*/
- if ((error = pqi_map_request(rcb)) != 0) {
- rcb->req_pending = false;
+ if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
xpt_freeze_simq(softs->os_specific.sim, 1);
- ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
if (error == EINPROGRESS) {
- DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
- error = 0;
+ /* Release simq in the completion */
+ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
+ error = BSD_SUCCESS;
} else {
- ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ rcb->req_pending = false;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
DBG_WARN("Requeue req error = %d target = %d\n", error,
ccb->ccb_h.target_id);
pqi_unmap_request(rcb);
+ error = EIO;
}
}
DBG_FUNC("OUT error = %d\n", error);
+
return error;
}
+static inline int
+pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
+{
+ if (PQI_STATUS_SUCCESS == pqi_status &&
+ REQUEST_SUCCESS == rcb->status)
+ return BSD_SUCCESS;
+ else
+ return EIO;
+}
+
/*
* Abort a task, task management functionality
*/
static int
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
{
- rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
- uint32_t abort_tag = rcb->tag;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
- uint16_t qid;
-
- DBG_FUNC("IN\n");
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ rcb_t *rcb = NULL;
+ rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
+ uint32_t tag;
+ int rval;
- qid = (uint16_t)rcb->resp_qid;
+ DBG_FUNC("IN\n");
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
- rcb->resp_qid = qid;
- rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
+ if (!rcb->dvp) {
+ DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+ rval = ENXIO;
+ goto error_tmf;
+ }
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
- if (PQI_STATUS_SUCCESS == rval) {
- rval = rcb->status;
- if (REQUEST_SUCCESS == rval) {
- ccb->ccb_h.status = CAM_REQ_ABORTED;
- }
- }
- pqisrc_put_tag(&softs->taglist, abort_tag);
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+ if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+
+error_tmf:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
@@ -849,9 +954,10 @@
static int
pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
{
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
+ uint32_t tag;
+ int rval;
DBG_FUNC("IN\n");
@@ -859,14 +965,22 @@
rcb = &softs->rcb[tag];
rcb->tag = tag;
- rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
+ if (!rcb->dvp) {
+ DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+ rval = ENXIO;
+ goto error_tmf;
+ }
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
- if (rval == PQI_STATUS_SUCCESS) {
- rval = rcb->status;
- }
+ rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+error_tmf:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
@@ -879,16 +993,17 @@
static int
pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
{
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
rcb_t *rcb = NULL;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
+ uint32_t tag;
+ int rval;
DBG_FUNC("IN\n");
if (devp == NULL) {
- DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
- return (-1);
+ DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
+ return ENXIO;
}
tag = pqisrc_get_tag(&softs->taglist);
@@ -896,24 +1011,29 @@
rcb->tag = tag;
devp->reset_in_progress = true;
- rval = pqisrc_send_tmf(softs, devp, rcb, 0,
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
SOP_TASK_MANAGEMENT_LUN_RESET);
- if (PQI_STATUS_SUCCESS == rval) {
- rval = rcb->status;
- }
+
+ rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
devp->reset_in_progress = false;
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
- return ((rval == REQUEST_SUCCESS) ?
- PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
+ return rval;
+
}
/*
* cam entry point of the smartpqi module.
*/
-static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
+static void
+smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct pqisrc_softstate *softs = cam_sim_softc(sim);
struct ccb_hdr *ccb_h = &ccb->ccb_h;
@@ -934,7 +1054,7 @@
ccg = &ccb->ccg;
if (ccg->block_size == 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
- ccb->ccb_h.status = CAM_REQ_INVALID;
+ ccb->ccb_h.status |= CAM_REQ_INVALID;
break;
}
cam_calc_geometry(ccg, /* extended */ 1);
@@ -1000,7 +1120,8 @@
* Function to poll the response, when interrupts are unavailable
* This also serves supporting crash dump.
*/
-static void smartpqi_poll(struct cam_sim *sim)
+static void
+smartpqi_poll(struct cam_sim *sim)
{
struct pqisrc_softstate *softs = cam_sim_softc(sim);
int i;
@@ -1012,7 +1133,8 @@
/*
* Function to adjust the queue depth of a device
*/
-void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
+void
+smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
{
struct ccb_relsim crs;
@@ -1056,6 +1178,10 @@
if (t_id <= (PQI_CTLR_INDEX - 1)) {
if (softs != NULL) {
pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
+ if (dvp == NULL) {
+ DBG_ERR("Target is null, target id=%d\n", t_id);
+ break;
+ }
smartpqi_adjust_queue_depth(path,
dvp->queue_depth);
}
@@ -1072,9 +1198,9 @@
/*
* Function to register sim with CAM layer for smartpqi driver
*/
-int register_sim(struct pqisrc_softstate *softs, int card_index)
+int
+register_sim(struct pqisrc_softstate *softs, int card_index)
{
- int error = 0;
int max_transactions;
union ccb *ccb = NULL;
cam_status status = 0;
@@ -1088,7 +1214,7 @@
if (softs->os_specific.devq == NULL) {
DBG_ERR("cam_simq_alloc failed txns = %d\n",
max_transactions);
- return PQI_STATUS_FAILURE;
+ return ENOMEM;
}
sim = cam_sim_alloc(smartpqi_cam_action, \
@@ -1099,7 +1225,7 @@
DBG_ERR("cam_sim_alloc failed txns = %d\n",
max_transactions);
cam_simq_free(softs->os_specific.devq);
- return PQI_STATUS_FAILURE;
+ return ENOMEM;
}
softs->os_specific.sim = sim;
@@ -1110,14 +1236,14 @@
cam_sim_free(softs->os_specific.sim, FALSE);
cam_simq_free(softs->os_specific.devq);
mtx_unlock(&softs->os_specific.cam_lock);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.sim_registered = TRUE;
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
DBG_ERR("xpt_create_path failed\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
if (xpt_create_path(&ccb->ccb_h.path, NULL,
@@ -1129,7 +1255,7 @@
xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
cam_sim_free(softs->os_specific.sim, TRUE);
mtx_unlock(&softs->os_specific.cam_lock);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
/*
* Callback to set the queue depth per target which is
@@ -1149,13 +1275,15 @@
mtx_unlock(&softs->os_specific.cam_lock);
DBG_INFO("OUT\n");
- return error;
+
+ return BSD_SUCCESS;
}
/*
* Function to deregister smartpqi sim from cam layer
*/
-void deregister_sim(struct pqisrc_softstate *softs)
+void
+deregister_sim(struct pqisrc_softstate *softs)
{
struct ccb_setasync csa;
@@ -1174,15 +1302,14 @@
xpt_action((union ccb *)&csa);
xpt_free_path(softs->os_specific.path);
- xpt_release_simq(softs->os_specific.sim, 0);
-
- xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
- softs->os_specific.sim_registered = FALSE;
-
if (softs->os_specific.sim) {
+ xpt_release_simq(softs->os_specific.sim, 0);
+ xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
+ softs->os_specific.sim_registered = FALSE;
cam_sim_free(softs->os_specific.sim, FALSE);
softs->os_specific.sim = NULL;
}
+
if (softs->os_specific.mtx_init) {
mtx_unlock(&softs->os_specific.cam_lock);
}
@@ -1198,3 +1325,27 @@
DBG_FUNC("OUT\n");
}
+
+void
+os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ struct cam_path *tmppath;
+
+ DBG_FUNC("IN\n");
+
+ if(softs->os_specific.sim_registered) {
+ if (xpt_create_path(&tmppath, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ device->target, device->lun) != CAM_REQ_CMP) {
+ DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
+ device->bus, device->target, device->lun);
+ return;
+ }
+ xpt_async(AC_INQ_CHANGED, tmppath, NULL);
+ xpt_free_path(tmppath);
+ }
+
+ device->scsi_rescan = false;
+
+ DBG_FUNC("OUT\n");
+}
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c
--- a/sys/dev/smartpqi/smartpqi_cmd.c
+++ b/sys/dev/smartpqi/smartpqi_cmd.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,8 +31,8 @@
* Function to submit the request to the adapter.
*/
-int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
- ib_queue_t *ib_q, void *req)
+int
+pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
{
char *slot = NULL;
uint32_t offset;
@@ -43,11 +42,11 @@
DBG_FUNC("IN\n");
PQI_LOCK(&ib_q->lock);
-
+
/* Check queue full */
if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) {
DBG_WARN("OUT Q full\n");
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
return PQI_STATUS_QFULL;
}
@@ -70,7 +69,7 @@
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h
--- a/sys/dev/smartpqi/smartpqi_defines.h
+++ b/sys/dev/smartpqi/smartpqi_defines.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,25 +28,34 @@
#ifndef _PQI_DEFINES_H
#define _PQI_DEFINES_H
-#define PQI_STATUS_FAILURE -1
-#define PQI_STATUS_TIMEOUT -2
-#define PQI_STATUS_QFULL -3
-#define PQI_STATUS_SUCCESS 0
+#define PQI_STATUS_FAILURE -1
+#define PQI_STATUS_TIMEOUT -2
+#define PQI_STATUS_QFULL -3
+#define PQI_STATUS_SUCCESS 0
-#define PQISRC_CMD_TIMEOUT_CNT 1200000 /* 500usec * 1200000 = 5 min */
-#define PQI_CMND_COMPLETE_TMO 1000 /* in millisecond */
+/* Maximum timeout for internal command completion */
+#define TIMEOUT_INFINITE ((uint32_t) (-1))
+#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
+#define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT
+/* Delay in milli seconds */
+#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
+/* Delay in micro seconds */
+#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
-#define INVALID_ELEM 0xffff
+/* If want to disable atomic operations on device active io, then set to zero */
+#define PQISRC_DEVICE_IO_COUNTER 1
+
+#define INVALID_ELEM 0xffff
#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif
-#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
-#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
+#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define ALIGN_BOUNDARY(a, n) { \
if (a % n) \
@@ -77,7 +85,7 @@
} \
} \
}
-
+
#define FILL_QUEUE_ARRAY_ADDR(q,virt,dma) { \
q->array_virt_addr = virt; \
q->array_dma_addr = dma; \
@@ -87,7 +95,7 @@
#define false 0
enum INTR_TYPE {
- LOCK_INTR,
+ LOCK_INTR,
LOCK_SLEEP
};
@@ -111,7 +119,6 @@
REQUEST_PENDING = -1,
REQUEST_FAILED = -2,
}REQUEST_STATUS_T;
-
typedef enum IO_PATH {
AIO_PATH,
RAID_PATH
@@ -174,10 +181,10 @@
#define PQI_CTRL_KERNEL_PANIC 0x100
#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
-#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
+#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
-#define SIS_CMD_STATUS_SUCCESS 0x1
+#define SIS_CMD_STATUS_SUCCESS 0x1
/* PQI specific */
@@ -189,18 +196,18 @@
-#define PQI_MIN_OP_IB_QUEUE_ID 1
-#define PQI_OP_EVENT_QUEUE_ID 1
-#define PQI_MIN_OP_OB_QUEUE_ID 2
+#define PQI_MIN_OP_IB_QUEUE_ID 1
+#define PQI_OP_EVENT_QUEUE_ID 1
+#define PQI_MIN_OP_OB_QUEUE_ID 2
#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
#define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2)
#define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q)
-#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
-#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
+#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
+#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2
#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
-#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
+#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */
#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
@@ -209,7 +216,7 @@
#define PQISRC_SGL_SUPPORTED_BIT_MASK 0
#define PQISRC_NUM_EVENT_Q_ELEM 32
-#define PQISRC_EVENT_Q_ELEM_SIZE 32
+#define PQISRC_EVENT_Q_ELEM_SIZE 32
/* PQI Registers state status */
@@ -228,19 +235,19 @@
};
/* PQI device performing internal initialization (e.g., POST). */
-#define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0
+#define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0
/* Upon entry to this state PQI device initialization begins. */
-#define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1
+#define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1
/* PQI device Standard registers are available to the driver. */
-#define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2
+#define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2
/* PQI device is initialized and ready to process any PCI transactions. */
-#define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3
+#define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3
/* The PQI Device Error register indicates the error. */
-#define PQI_DEV_STATE_ERROR 0x4
+#define PQI_DEV_STATE_ERROR 0x4
#define PQI_DEV_STATE_AT_INIT ( PQI_DEV_STATE_PQI_STATUS_AVAILABLE | \
PQI_DEV_STATE_ALL_REGISTERS_READY | \
- PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
+ PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
#define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG"
#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64
@@ -264,17 +271,38 @@
#define PQI_CONF_TABLE_SIGNATURE "CFGTABLE"
/* PQI configuration table section IDs */
+#define PQI_CONF_TABLE_ALL_SECTIONS (-1)
#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1
#define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2
#define PQI_CONF_TABLE_SECTION_DEBUG 3
#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
-#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
-#define PQI_NEW_HEARTBEAT_MECHANISM(softs) 1
+
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12
+#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13
+#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
+
+#define CTRLR_HEARTBEAT_CNT(softs) \
+ LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
+#define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */
+#define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600)
/* pqi-2r00a table 36 */
-#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
+#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
#define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31)
#define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01
@@ -294,13 +322,15 @@
#define PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE 0xe0
/* PQI / Vendor specific IU */
-#define PQI_FUNCTION_REPORT_DEV_CAP 0x00
-#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
+#define PQI_FUNCTION_REPORT_DEV_CAP 0x00
+#define PQI_REQUEST_IU_RAID_TASK_MANAGEMENT 0x13
#define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14
#define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15
+#define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75
#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
@@ -311,6 +341,7 @@
#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
#define PQI_RESPONSE_IU_AIO_PATH_IS_OFF 0xf4
#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7
#define PQI_REQUEST_HEADER_LENGTH 4
#define PQI_FUNCTION_CREATE_OPERATIONAL_IQ 0x10
#define PQI_FUNCTION_CREATE_OPERATIONAL_OQ 0x11
@@ -319,7 +350,14 @@
#define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14
#define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1
-#define PQI_DEFAULT_IB_QUEUE 0
+#define PQI_DEFAULT_IB_QUEUE 0
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+
+#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
+#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
+#define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2
+#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
+
/* Interface macros */
#define GET_FW_STATUS(softs) \
@@ -337,20 +375,22 @@
#define PQI_SAVE_CTRL_MODE(softs, mode) \
PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode)
-#define PQISRC_MAX_TARGETID 1024
-#define PQISRC_MAX_TARGETLUN 64
+#define PQISRC_MAX_TARGETID 1024
+#define PQISRC_MAX_TARGETLUN 64
/* Vendor specific IU Type for Event config Cmds */
-#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
-#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
-#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
-#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
-#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
-#define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000
+#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
+#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
+#define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000
/* Supported Event types by controller */
-#define PQI_NUM_SUPPORTED_EVENTS 7
+
+#define PQI_NUM_SUPPORTED_EVENTS 6
#define PQI_EVENT_TYPE_HOTPLUG 0x1
#define PQI_EVENT_TYPE_HARDWARE 0x2
@@ -358,18 +398,15 @@
#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
-#define PQI_EVENT_TYPE_HEARTBEAT 0xff
/* for indexing into the pending_events[] field of struct pqisrc_softstate */
-#define PQI_EVENT_HEARTBEAT 0
-#define PQI_EVENT_HOTPLUG 1
-#define PQI_EVENT_HARDWARE 2
-#define PQI_EVENT_PHYSICAL_DEVICE 3
-#define PQI_EVENT_LOGICAL_DEVICE 4
-#define PQI_EVENT_AIO_STATE_CHANGE 5
-#define PQI_EVENT_AIO_CONFIG_CHANGE 6
+#define PQI_EVENT_HOTPLUG 0
+#define PQI_EVENT_HARDWARE 1
+#define PQI_EVENT_PHYSICAL_DEVICE 2
+#define PQI_EVENT_LOGICAL_DEVICE 3
+#define PQI_EVENT_AIO_STATE_CHANGE 4
+#define PQI_EVENT_AIO_CONFIG_CHANGE 5
-#define PQI_MAX_HEARTBEAT_REQUESTS 5
/* Device flags */
@@ -381,30 +418,32 @@
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
-#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
-#define DEV_GONE(dev) (!dev || (dev->invalid == true))
-#define IS_AIO_PATH(dev) (dev->aio_enabled)
-#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
+#define DEV_GONE(dev) (!dev || (dev->invalid == true))
+#define IS_AIO_PATH(dev) (dev->aio_enabled)
+#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+
+#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
/* SOP data direction flags */
-#define SOP_DATA_DIR_NONE 0x00
-#define SOP_DATA_DIR_FROM_DEVICE 0x01
-#define SOP_DATA_DIR_TO_DEVICE 0x02
-#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
-#define SOP_PARTIAL_DATA_BUFFER 0x04
+#define SOP_DATA_DIR_NONE 0x00
+#define SOP_DATA_DIR_FROM_DEVICE 0x01
+#define SOP_DATA_DIR_TO_DEVICE 0x02
+#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
+#define SOP_PARTIAL_DATA_BUFFER 0x04
-#define PQISRC_DMA_VALID (1 << 0)
-#define PQISRC_CMD_NO_INTR (1 << 1)
+#define PQISRC_DMA_VALID (1 << 0)
+#define PQISRC_CMD_NO_INTR (1 << 1)
#define SOP_TASK_ATTRIBUTE_SIMPLE 0
#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
#define SOP_TASK_ATTRIBUTE_ORDERED 2
#define SOP_TASK_ATTRIBUTE_ACA 4
-#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
-#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
+#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
+#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
#define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5
-#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
+#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02
#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
@@ -446,46 +485,63 @@
#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
-#define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */
-
-#define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD
-#define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION
-#define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET
-#define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY
-#define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD
-#define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET
-#define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT
-#define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED
-#define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL
-#define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED
-#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
-#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
+#define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */
+
+#define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD
+#define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION
+#define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET
+#define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY
+#define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD
+#define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET
+#define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT
+#define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED
+#define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL
+#define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED
+#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
+#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
+#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */
#define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */
#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define VPD_PAGE (1 << 8)
+#define VPD_PAGE (1 << 8)
/* logical volume states */
-#define SA_LV_OK 0x0
-#define SA_LV_NOT_AVAILABLE 0xb
-#define SA_LV_UNDERGOING_ERASE 0xf
-#define SA_LV_UNDERGOING_RPI 0x12
-#define SA_LV_PENDING_RPI 0x13
-#define SA_LV_ENCRYPTED_NO_KEY 0x14
-#define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
-#define SA_LV_UNDERGOING_ENCRYPTION 0x16
-#define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
-#define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
-#define SA_LV_PENDING_ENCRYPTION 0x19
-#define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a
-#define SA_LV_STATUS_VPD_UNSUPPORTED 0xff
+#define SA_LV_OK 0x0
+#define SA_LV_FAILED 0x1
+#define SA_LV_NOT_CONFIGURED 0x2
+#define SA_LV_DEGRADED 0x3
+#define SA_LV_READY_FOR_RECOVERY 0x4
+#define SA_LV_UNDERGOING_RECOVERY 0x5
+#define SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED 0x6
+#define SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 0x7
+#define SA_LV_HARDWARE_OVERHEATING 0x8
+#define SA_LV_HARDWARE_HAS_OVERHEATED 0x9
+#define SA_LV_UNDERGOING_EXPANSION 0xA
+#define SA_LV_NOT_AVAILABLE 0xb
+#define SA_LV_QUEUED_FOR_EXPANSION 0xc
+#define SA_LV_DISABLED_SCSI_ID_CONFLICT 0xd
+#define SA_LV_EJECTED 0xe
+#define SA_LV_UNDERGOING_ERASE 0xf
+#define SA_LV_UNDERGOING_RPI 0x12
+#define SA_LV_PENDING_RPI 0x13
+#define SA_LV_ENCRYPTED_NO_KEY 0x14
+#define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define SA_LV_UNDERGOING_ENCRYPTION 0x16
+#define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define SA_LV_PENDING_ENCRYPTION 0x19
+#define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a
+#define SA_LV_STATUS_VPD_UNSUPPORTED 0xff
+
+
+/* constants for flags field of ciss_vpd_logical_volume_status */
+#define SA_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
/*
* assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
@@ -494,27 +550,38 @@
/* 0 = no limit */
#define PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 0
-
-
+#define PQI_LOG_EXT_QUEUE_DEPTH_ENABLED 0x20
+#define PQI_LOG_EXT_QUEUE_ENABLE 0x56
+#define MAX_RAW_M256_QDEPTH 32512
+#define MAX_RAW_M16_QDEPTH 2032
+#define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
#define SA_CACHE_FLUSH 0x1
+#define PQISRC_INQUIRY_TIMEOUT 30
#define SA_INQUIRY 0x12
#define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define SA_CISS_READ 0xc0
#define SA_GET_RAID_MAP 0xc8
-#define SA_REPORT_LOG_EXTENDED 0x1
-#define SA_REPORT_PHYS_EXTENDED 0x2
+#define SCSI_SENSE_RESPONSE_70 0x70
+#define SCSI_SENSE_RESPONSE_71 0x71
+#define SCSI_SENSE_RESPONSE_72 0x72
+#define SCSI_SENSE_RESPONSE_73 0x73
+
+#define SA_REPORT_LOG_EXTENDED 0x1
+#define SA_REPORT_PHYS_EXTENDED 0x2
+
+#define SA_CACHE_FLUSH_BUF_LEN 4
-#define SA_CACHE_FLUSH_BUF_LEN 4
+#define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber)
-#define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
-#define RAID_MAP_MAX_ENTRIES 1024
-#define RAID_MAP_ENCRYPTION_ENABLED 0x1
+#define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_ENCRYPTION_ENABLED 0x1
#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
#define ASC_LUN_NOT_READY 0x4
@@ -532,10 +599,27 @@
#define OFFLOAD_CONFIGURED_BIT 0x1
#define OFFLOAD_ENABLED_BIT 0x2
-#define PQI_RAID_DATA_IN_OUT_GOOD 0x0
-#define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1
-#define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
-#define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4
+#define PQI_RAID_DATA_IN_OUT_GOOD 0x0
+#define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1
+#define PQI_RAID_DATA_IN_OUT_BUFFER_ERROR 0x40
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
+#define PQI_RAID_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
+#define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
+#define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
+#define PQI_RAID_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x63
+#define PQI_RAID_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
+#define PQI_RAID_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
+#define PQI_RAID_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
+#define PQI_RAID_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
+#define PQI_RAID_DATA_IN_OUT_ERROR 0xf0
+#define PQI_RAID_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
+#define PQI_RAID_DATA_IN_OUT_HARDWARE_ERROR 0xf2
+#define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
+#define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4
+#define PQI_RAID_DATA_IN_OUT_TIMEOUT 0xf5
+
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
@@ -577,6 +661,12 @@
#define SA_RAID_MAX SA_RAID_ADM
#define SA_RAID_UNKNOWN 0xff
+#define BIT0 (1 << 0)
+#define BIT1 (1 << 1)
+#define BIT2 (1 << 2)
+#define BIT3 (1 << 3)
+
+#define BITS_PER_BYTE 8
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -587,6 +677,8 @@
#define BMIC_CACHE_FLUSH 0xc2
#define BMIC_FLASH_FIRMWARE 0xf7
#define BMIC_WRITE_HOST_WELLNESS 0xa5
+#define BMIC_SET_DIAGS_OPTIONS 0xf4
+#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0)
@@ -609,6 +701,10 @@
PQI_RESERVED_IO_SLOTS_TMF + \
PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS)
+/* Defines for print flags */
+#define PRINT_FLAG_HDR_COLUMN 0x0001
+
+
static inline uint16_t GET_LE16(const uint8_t *p)
{
return p[0] | p[1] << 8;
@@ -659,7 +755,6 @@
PUT_BE32(val, p + 4);
}
-
#define OS_FREEBSD
#define SIS_POLL_WAIT
@@ -689,7 +784,7 @@
typedef struct _driver_info
{
unsigned char major_version;
- unsigned char minor_version;
+ unsigned long minor_version;
unsigned char release_version;
unsigned long build_revision;
unsigned long max_targets;
@@ -700,17 +795,17 @@
typedef uint8_t *passthru_buf_type_t;
-#define PQISRC_DRIVER_MAJOR 1
-#define PQISRC_DRIVER_MINOR 0
-#define PQISRC_DRIVER_RELEASE 3
-#define PQISRC_DRIVER_REVISION 239
+#define PQISRC_OS_VERSION 1
+#define PQISRC_FEATURE_VERSION 4014
+#define PQISRC_PATCH_VERSION 0
+#define PQISRC_BUILD_VERSION 105
#define STR(s) # s
-#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
-#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \
- PQISRC_DRIVER_MINOR, \
- PQISRC_DRIVER_RELEASE, \
- PQISRC_DRIVER_REVISION)
+#define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d)
+#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \
+ PQISRC_FEATURE_VERSION, \
+ PQISRC_PATCH_VERSION, \
+ PQISRC_BUILD_VERSION)
/* End Management interface */
@@ -718,12 +813,36 @@
#undef ASSERT
#endif
+/*
+*os_atomic64_cas--
+*
+*Atomically read, compare, and conditionally write.
+*i.e. compare and swap.
+*retval True On Success
+*retval False On Failure
+*
+*/
+static inline boolean_t
+os_atomic64_cas(volatile uint64_t* var, uint64_t old_val, uint64_t new_val)
+{
+ return (atomic_cmpset_64(var, old_val, new_val));
+}
+
#define ASSERT(cond) {\
if (!(cond)) { \
printf("Assertion failed at file %s line %d\n",__FILE__,__LINE__); \
} \
}
+/* Atomic */
+typedef volatile uint64_t OS_ATOMIC64_T;
+#define OS_ATOMIC64_READ(p) atomic_load_acq_64(p)
+#define OS_ATOMIC64_INIT(p,val) atomic_store_rel_64(p, val)
+
+/* 64-bit post atomic increment and decrement operations on value in pointer.*/
+#define OS_ATOMIC64_DEC(p) (atomic_fetchadd_64(p, -1) - 1)
+#define OS_ATOMIC64_INC(p) (atomic_fetchadd_64(p, 1) + 1)
+
#define PQI_MAX_MSIX 64 /* vectors */
#define PQI_MSI_CTX_SIZE sizeof(pqi_intr_ctx)+1
@@ -760,6 +879,7 @@
#define LEGACY_SIS_IQN_H 0xd4 /* inbound queue native mode (high)*/
#define LEGACY_SIS_MAILBOX 0x7fc60 /* mailbox (20 bytes) */
#define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */
+#define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */
#define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */
#define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */
@@ -793,8 +913,8 @@
#define OS_SLEEP(timeout) \
DELAY(timeout);
-#define OS_HOST_WELLNESS_TIMEOUT (24 * 3600)
-
+/* TMF request timeout is 600 Sec */
+#define OS_TMF_TIMEOUT_SEC (10 * 60)
#define LE_16(x) htole16(x)
#define LE_32(x) htole32(x)
@@ -856,35 +976,48 @@
struct cam_path *path;
struct task event_task;
struct cdev *cdev;
- struct callout_handle wellness_periodic; /* periodic event handling */
- struct callout_handle heartbeat_timeout_id; /* heart beat event handling */
- eventhandler_tag eh;
+ struct callout wellness_periodic; /* periodic event handling */
+ struct callout heartbeat_timeout_id; /* heart beat event handling */
} OS_SPECIFIC_T;
typedef bus_addr_t dma_addr_t;
-/* Atomic */
-typedef volatile uint64_t OS_ATOMIC64_T;
-#define OS_ATOMIC64_SET(_softs, target, val) atomic_set_long(&(_softs)->target, val)
-#define OS_ATOMIC64_READ(_softs, target) atomic_load_acq_64(&(_softs)->target)
-#define OS_ATOMIC64_INC(_softs, target) atomic_add_64(&(_softs)->target, 1)
/* Register access macros */
#define PCI_MEM_GET32( _softs, _absaddr, _offset ) \
- bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
-
-#define PCI_MEM_GET64( _softs, _absaddr, _offset ) \
- bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
+ bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+
+
+#if defined(__i386__)
+#define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \
+ (uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset) + \
+ ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
+ })
+#else
+#define PCI_MEM_GET64(_softs, _absaddr, _offset ) \
+ bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+#endif
#define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \
- bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+#if defined(__i386__)
+#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32);
+#else
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
- bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+#endif
+
#define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \
bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\
@@ -894,7 +1027,7 @@
typedef struct mtx OS_LOCK_T;
typedef struct sema OS_SEMA_LOCK_T;
-#define OS_PQILOCK_T OS_LOCK_T
+#define OS_PQILOCK_T OS_LOCK_T
#define OS_ACQUIRE_SPINLOCK(_lock) mtx_lock_spin(_lock)
#define OS_RELEASE_SPINLOCK(_lock) mtx_unlock_spin(_lock)
@@ -905,8 +1038,10 @@
#define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock)
#define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock)
-#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
+#define OS_GET_CDBP(rcb) \
+ ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
#define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len)
+#define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb)
#define OS_GET_IO_QINDEX(softs,rcb) curcpu % softs->num_op_obq
#define OS_GET_IO_RESP_QID(softs,rcb) (softs->op_ob_q[(OS_GET_IO_QINDEX(softs,rcb))].q_id)
@@ -915,7 +1050,9 @@
#define OS_GET_TMF_REQ_QINDEX OS_GET_IO_REQ_QINDEX
/* check request type */
-#define is_internal_req(rcb) (!(rcb)->cm_ccb)
+#define is_internal_req(rcb) (!(rcb->cm_ccb))
+
+#define os_io_memcpy(dest, src, len) memcpy(dest, src, len)
/* sg elements addr, len, flags */
#define OS_GET_IO_SG_COUNT(rcb) rcb->nseg
@@ -932,6 +1069,10 @@
#define SCMD_READ_16 READ_16
#define SCMD_WRITE_16 WRITE_16
+/* FreeBSD status macros */
+#define BSD_SUCCESS 0
+
+
/* Debug facility */
#define PQISRC_LOG_LEVEL 0x60
@@ -946,6 +1087,7 @@
#define PQISRC_FLAGS_DISC 0x00000010
#define PQISRC_FLAGS_WARN 0x00000020
#define PQISRC_FLAGS_ERROR 0x00000040
+#define PQISRC_FLAGS_NOTE 0x00000080
#define DBG_INIT(fmt,args...) \
@@ -1017,4 +1159,11 @@
} \
}while(0);
-#endif // _PQI_DEFINES_H
+#define DBG_NOTE(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_NOTE) { \
+ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#endif /* _PQI_DEFINES_H */
diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c
--- a/sys/dev/smartpqi/smartpqi_discovery.c
+++ b/sys/dev/smartpqi/smartpqi_discovery.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +27,12 @@
#include "smartpqi_includes.h"
+#define MAX_RETRIES 3
+#define PQISRC_INQUIRY_TIMEOUT 30
+
/* Validate the scsi sense response code */
-static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
+static inline
+boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
{
DBG_FUNC("IN\n");
@@ -41,8 +44,11 @@
return (sshdr->response_code & 0x70) == 0x70;
}
-/* Initialize target ID pool for HBA/PDs */
-void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
+/*
+ * Initialize target ID pool for HBA/PDs .
+ */
+void
+pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
{
int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
@@ -52,8 +58,10 @@
softs->tid_pool.index = i - 1;
}
-int pqisrc_alloc_tid(pqisrc_softstate_t *softs)
+int
+pqisrc_alloc_tid(pqisrc_softstate_t *softs)
{
+
if(softs->tid_pool.index <= -1) {
DBG_ERR("Target ID exhausted\n");
return INVALID_ELEM;
@@ -62,19 +70,21 @@
return softs->tid_pool.tid[softs->tid_pool.index--];
}
-void pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
+void
+pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
{
- if(softs->tid_pool.index >= PQI_MAX_PHYSICALS) {
- DBG_ERR("Target ID queue is full\n");
- return;
- }
-
+ if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) {
+ DBG_ERR("Target ID queue is full\n");
+ return;
+ }
+
softs->tid_pool.index++;
softs->tid_pool.tid[softs->tid_pool.index] = tid;
}
/* Update scsi sense info to a local buffer*/
-boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
+boolean_t
+pqisrc_update_scsi_sense(const uint8_t *buff, int len,
struct sense_header_scsi *header)
{
@@ -122,11 +132,12 @@
/*
* Function used to build the internal raid request and analyze the response
*/
-int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+int
+pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
raid_path_error_info_elem_t *error_info)
{
-
+
uint8_t *cdb;
int ret = PQI_STATUS_SUCCESS;
uint32_t tag = 0;
@@ -137,7 +148,7 @@
ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
-
+
DBG_FUNC("IN\n");
memset(&device_mem, 0, sizeof(struct dma_mem));
@@ -149,7 +160,7 @@
device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
ret = os_dma_mem_alloc(softs, &device_mem);
-
+
if (ret) {
DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
return ret;
@@ -184,6 +195,9 @@
cdb[2] = (uint8_t)vpd_page;
}
cdb[4] = (uint8_t)datasize;
+ if (softs->timeout_in_passthrough) {
+ request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
+ }
break;
case SA_REPORT_LOG:
case SA_REPORT_PHYS:
@@ -196,6 +210,13 @@
cdb[8] = (uint8_t)((datasize) >> 8);
cdb[9] = (uint8_t)datasize;
break;
+ case PQI_LOG_EXT_QUEUE_ENABLE:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = SA_REPORT_LOG;
+ cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ cdb[9] = (uint8_t)datasize;
+ break;
case TEST_UNIT_READY:
request->data_direction = SOP_DATA_DIR_NONE;
break;
@@ -236,10 +257,11 @@
cdb[6] = cmd;
cdb[7] = (uint8_t)((datasize) << 8);
cdb[8] = (uint8_t)((datasize) >> 8);
- break;
+ break;
default:
DBG_ERR("unknown command 0x%x", cmd);
- break;
+ ret = PQI_STATUS_FAILURE;
+ return ret;
}
tag = pqisrc_get_tag(&softs->taglist);
@@ -266,7 +288,7 @@
goto err_out;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
goto err_out;
@@ -278,11 +300,11 @@
}
os_dma_mem_free(softs, &device_mem);
}
-
+
ret = rcb->status;
if (ret) {
if(error_info) {
- memcpy(error_info,
+ memcpy(error_info,
rcb->error_info,
sizeof(*error_info));
@@ -291,9 +313,9 @@
ret = PQI_STATUS_SUCCESS;
}
else{
- DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
- "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
- BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
+ "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
+ BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
cmd, ret);
ret = PQI_STATUS_FAILURE;
}
@@ -311,8 +333,8 @@
return ret;
err_out:
- DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
- BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
+ BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
cmd, ret);
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
@@ -324,7 +346,8 @@
}
/* common function used to send report physical and logical luns cmnds*/
-static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+static int
+pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
void *buff, size_t buf_len)
{
int ret;
@@ -333,7 +356,7 @@
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff,
buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
@@ -342,7 +365,8 @@
}
/* subroutine used to get physical and logical luns of the device */
-static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+int
+pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
reportlun_data_ext_t **buff, size_t *data_length)
{
int ret;
@@ -373,7 +397,7 @@
DBG_ERR("failed to allocate memory for lun_data\n");
return PQI_STATUS_FAILURE;
}
-
+
if (list_len == 0) {
DBG_DISC("list_len is 0\n");
memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
@@ -406,12 +430,79 @@
return ret;
}
+/*
+ * Function used to grab queue depth ext lun data for logical devices
+ */
+static int
+pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd,
+ reportlun_queue_depth_data_t **buff, size_t *data_length)
+{
+ int ret;
+ size_t list_len;
+ size_t data_len;
+ size_t new_lun_list_length;
+ reportlun_queue_depth_data_t *lun_data;
+ reportlun_header_t report_lun_header;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
+ sizeof(report_lun_header));
+
+ if (ret) {
+ DBG_ERR("failed return code: %d\n", ret);
+ return ret;
+ }
+ list_len = BE_32(report_lun_header.list_length);
+retry:
+ data_len = sizeof(reportlun_header_t) + list_len;
+ *data_length = data_len;
+ lun_data = os_mem_alloc(softs, data_len);
+
+ if (!lun_data) {
+ DBG_ERR("failed to allocate memory for lun_data\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (list_len == 0) {
+ DBG_INFO("list_len is 0\n");
+ memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
+ goto out;
+ }
+ ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
+
+ if (ret) {
+ DBG_ERR("error\n");
+ goto error;
+ }
+ new_lun_list_length = BE_32(lun_data->header.list_length);
+
+ if (new_lun_list_length > list_len) {
+ list_len = new_lun_list_length;
+ os_mem_free(softs, (void *)lun_data, data_len);
+ goto retry;
+ }
+
+out:
+ *buff = lun_data;
+ DBG_FUNC("OUT\n");
+ return 0;
+
+error:
+ os_mem_free(softs, (void *)lun_data, data_len);
+ DBG_ERR("FAILED\n");
+ return ret;
+}
+
/*
* Function used to get physical and logical device list
*/
-static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
+static int
+pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
reportlun_data_ext_t **physical_dev_list,
- reportlun_data_ext_t **logical_dev_list,
+ reportlun_data_ext_t **logical_dev_list,
+ reportlun_queue_depth_data_t **queue_dev_list,
+ size_t *queue_data_length,
size_t *phys_data_length,
size_t *log_data_length)
{
@@ -422,7 +513,6 @@
reportlun_data_ext_t *local_logdev_list;
reportlun_data_ext_t *logdev_data;
reportlun_header_t report_lun_header;
-
DBG_FUNC("IN\n");
@@ -438,6 +528,11 @@
return ret;
}
+ ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
+ if (ret) {
+ DBG_ERR("report logical LUNs failed");
+ return ret;
+ }
logdev_data = *logical_dev_list;
@@ -480,7 +575,8 @@
}
/* Subroutine used to set Bus-Target-Lun for the requested device */
-static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
+static inline void
+pqisrc_set_btl(pqi_scsi_dev_t *device,
int bus, int target, int lun)
{
DBG_FUNC("IN\n");
@@ -492,7 +588,8 @@
DBG_FUNC("OUT\n");
}
-inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
+inline
+boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
{
return device->is_external_raid_device;
}
@@ -503,7 +600,8 @@
}
/* Function used to assign Bus-Target-Lun for the requested device */
-static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
+static void
+pqisrc_assign_btl(pqi_scsi_dev_t *device)
{
uint8_t *scsi3addr;
uint32_t lunid;
@@ -517,7 +615,7 @@
if (pqisrc_is_hba_lunid(scsi3addr)) {
/* The specified device is the controller. */
- pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
+ pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1);
device->target_lun_valid = true;
return;
}
@@ -530,8 +628,8 @@
lun = lunid & 0xff;
} else {
bus = PQI_RAID_VOLUME_BUS;
- lun = 0;
- target = lunid & 0x3fff;
+ lun = (lunid & 0x3fff) + 1;
+ target = 0;
}
pqisrc_set_btl(device, bus, target, lun);
device->target_lun_valid = true;
@@ -542,7 +640,8 @@
}
/* Build and send the internal INQUIRY command to particular device */
-static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
+int
+pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
{
int ret = PQI_STATUS_SUCCESS;
@@ -552,15 +651,17 @@
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
SA_INQUIRY, vpd_page, scsi3addr, &error_info);
DBG_FUNC("OUT\n");
return ret;
}
+#if 0
/* Function used to parse the sense information from response */
-static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
+static void
+pqisrc_fetch_sense_info(const uint8_t *sense_data,
unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
{
struct sense_header_scsi header;
@@ -581,133 +682,65 @@
DBG_FUNC("OUT\n");
}
+#endif
-/* Function used to validate volume offline status */
-static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
- uint8_t *scsi3addr)
+/* Determine logical volume status from vpd buffer.*/
+static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
{
- int ret = PQI_STATUS_SUCCESS;
+ int ret;
uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
- uint8_t size;
- uint8_t *buff = NULL;
+ uint8_t vpd_size = sizeof(vpd_volume_status);
+ uint8_t offline = true;
+ size_t page_length;
+ vpd_volume_status *vpd;
DBG_FUNC("IN\n");
-
- buff = os_mem_alloc(softs, 64);
- if (!buff)
- return PQI_STATUS_FAILURE;
+
+ vpd = os_mem_alloc(softs, vpd_size);
+ if (vpd == NULL)
+ goto out;
/* Get the size of the VPD return buff. */
- ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
- buff, SCSI_VPD_HEADER_LENGTH);
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
+ (uint8_t *)vpd, vpd_size);
- if (ret)
+ if (ret) {
+ DBG_WARN("Inquiry returned failed status\n");
goto out;
+ }
- size = buff[3];
+ if (vpd->page_code != SA_VPD_LV_STATUS) {
+ DBG_WARN("Returned invalid buffer\n");
+ goto out;
+ }
- /* Now get the whole VPD buff. */
- ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
- buff, size + SCSI_VPD_HEADER_LENGTH);
- if (ret)
+ page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length;
+ if (page_length < vpd_size)
goto out;
- status = buff[4];
+ status = vpd->volume_status;
+ offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0;
out:
- os_mem_free(softs, (char *)buff, 64);
- DBG_FUNC("OUT\n");
-
- return status;
-}
-
-
-/* Determine offline status of a volume. Returns appropriate SA_LV_* status.*/
-static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
- uint8_t *scsi3addr)
-{
- int ret = PQI_STATUS_SUCCESS;
- uint8_t *sense_data;
- unsigned sense_data_len;
- uint8_t sense_key;
- uint8_t asc;
- uint8_t ascq;
- uint8_t off_status;
- uint8_t scsi_status;
- pqisrc_raid_req_t request;
- raid_path_error_info_elem_t error_info;
-
- DBG_FUNC("IN\n");
-
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, NULL, 0,
- TEST_UNIT_READY, 0, scsi3addr, &error_info);
-
- if (ret)
- goto error;
- sense_data = error_info.data;
- sense_data_len = LE_16(error_info.sense_data_len);
-
- if (sense_data_len > sizeof(error_info.data))
- sense_data_len = sizeof(error_info.data);
-
- pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
- &ascq);
-
- scsi_status = error_info.status;
-
- /* scsi status: "CHECK CONDN" / SK: "not ready" ? */
- if (scsi_status != 2 ||
- sense_key != 2 ||
- asc != ASC_LUN_NOT_READY) {
- return SA_LV_OK;
- }
+ device->volume_offline = offline;
+ device->volume_status = status;
- /* Determine the reason for not ready state. */
- off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
-
- DBG_DISC("offline_status 0x%x\n", off_status);
-
- /* Keep volume offline in certain cases. */
- switch (off_status) {
- case SA_LV_UNDERGOING_ERASE:
- case SA_LV_NOT_AVAILABLE:
- case SA_LV_UNDERGOING_RPI:
- case SA_LV_PENDING_RPI:
- case SA_LV_ENCRYPTED_NO_KEY:
- case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
- case SA_LV_UNDERGOING_ENCRYPTION:
- case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
- case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
- return off_status;
- case SA_LV_STATUS_VPD_UNSUPPORTED:
- /*
- * If the VPD status page isn't available,
- * use ASC/ASCQ to determine state.
- */
- if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
- ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
- return off_status;
- break;
- }
+ os_mem_free(softs, (char *)vpd, vpd_size);
DBG_FUNC("OUT\n");
- return SA_LV_OK;
-
-error:
- return SA_LV_STATUS_VPD_UNSUPPORTED;
+ return;
}
/* Validate the RAID map parameters */
-static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
+static int
+pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
{
char *error_msg;
uint32_t raidmap_size;
uint32_t r5or6_blocks_per_row;
- unsigned phys_dev_num;
- unsigned num_raidmap_entries;
DBG_FUNC("IN\n");
@@ -717,21 +750,11 @@
goto error;
}
- if (raidmap_size > sizeof(*raid_map)) {
- error_msg = "RAID map too large\n";
- goto error;
- }
-
+#if 0
phys_dev_num = LE_16(raid_map->layout_map_count) *
- (LE_16(raid_map->data_disks_per_row) +
- LE_16(raid_map->metadata_disks_per_row));
- num_raidmap_entries = phys_dev_num *
- LE_16(raid_map->row_cnt);
-
- if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
- error_msg = "invalid number of map entries in RAID map\n";
- goto error;
- }
+ (LE_16(raid_map->data_disks_per_row) +
+ LE_16(raid_map->metadata_disks_per_row));
+#endif
if (device->raid_level == SA_RAID_1) {
if (LE_16(raid_map->layout_map_count) != 2) {
@@ -740,7 +763,7 @@
}
} else if (device->raid_level == SA_RAID_ADM) {
if (LE_16(raid_map->layout_map_count) != 3) {
- error_msg = "invalid RAID-1(ADM) map\n";
+ error_msg = "invalid RAID-1(triple) map\n";
goto error;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -761,15 +784,17 @@
return 0;
error:
- DBG_ERR("%s\n", error_msg);
+ DBG_NOTE("%s\n", error_msg);
return PQI_STATUS_FAILURE;
}
/* Get device raidmap for the requested device */
-static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
+ int raidmap_size;
+
pqisrc_raid_req_t request;
pqisrc_raid_map_t *raid_map;
@@ -780,7 +805,7 @@
return PQI_STATUS_FAILURE;
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
+ ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
if (ret) {
@@ -788,9 +813,33 @@
goto err_out;
}
+ raidmap_size = LE_32(raid_map->structure_size);
+ if (raidmap_size > sizeof(*raid_map)) {
+ DBG_NOTE("Raid map is larger than 1024 entries, request once again");
+ os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
+
+ raid_map = os_mem_alloc(softs, raidmap_size);
+ if (!raid_map)
+ return PQI_STATUS_FAILURE;
+ memset(&request, 0, sizeof(request));
+
+ ret = pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size,
+ SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
+ if (ret) {
+ DBG_ERR("error in build send raid req ret=%d\n", ret);
+ goto err_out;
+ }
+
+ if(LE_32(raid_map->structure_size) != raidmap_size) {
+ DBG_WARN("Expected raid map size %d bytes and got %d bytes\n",
+ raidmap_size,LE_32(raid_map->structure_size));
+ goto err_out;
+ }
+ }
+
ret = pqisrc_raid_map_validation(softs, device, raid_map);
if (ret) {
- DBG_ERR("error in raid map validation ret=%d\n", ret);
+ DBG_NOTE("error in raid map validation ret=%d\n", ret);
goto err_out;
}
@@ -805,7 +854,8 @@
}
/* Get device ioaccel_status to validate the type of device */
-static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
+static void
+pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
@@ -824,7 +874,7 @@
DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
goto err_out;
}
-
+
ioaccel_status = buff[IOACCEL_STATUS_BYTE];
device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
@@ -835,8 +885,8 @@
if (pqisrc_get_device_raidmap(softs, device))
device->offload_enabled_pending = false;
}
-
- DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
+
+ DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
device->offload_config, device->offload_enabled_pending);
err_out:
@@ -845,8 +895,8 @@
}
/* Get RAID level of requested device */
-static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint8_t raid_level;
uint8_t *buff;
@@ -874,11 +924,12 @@
}
/* Parse the inquiry response and determine the type of device */
-static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
uint8_t *inq_buff;
+ int retry = MAX_RETRIES;
DBG_FUNC("IN\n");
@@ -886,10 +937,15 @@
if (!inq_buff)
return PQI_STATUS_FAILURE;
- /* Send an inquiry to the device to see what it is. */
- ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
- OBDR_TAPE_INQ_SIZE);
- if (ret)
+ while(retry--) {
+ /* Send an inquiry to the device to see what it is. */
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
+ OBDR_TAPE_INQ_SIZE);
+ if (!ret)
+ break;
+ DBG_WARN("Retrying inquiry !!!\n");
+ }
+ if(retry <= 0)
goto err_out;
pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
@@ -899,20 +955,18 @@
sizeof(device->vendor));
memcpy(device->model, &inq_buff[16],
sizeof(device->model));
- DBG_DISC("DEV_TYPE: %x VENDOR: %s MODEL: %s\n", device->devtype, device->vendor, device->model);
+ DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n", device->devtype, device->vendor, device->model);
if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
if (pqisrc_is_external_raid_device(device)) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = SA_LV_OK;
device->volume_offline = false;
- }
+ }
else {
pqisrc_get_dev_raid_level(softs, device);
pqisrc_get_dev_ioaccel_status(softs, device);
- device->volume_status = pqisrc_get_dev_vol_status(softs,
- device->scsi3addr);
- device->volume_offline = device->volume_status != SA_LV_OK;
+ pqisrc_get_dev_vol_status(softs, device);
}
}
@@ -934,16 +988,16 @@
* BMIC (Basic Management And Interface Commands) command
* to get the controller identify params
*/
-static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
- bmic_ident_ctrl_t *buff)
+static int
+pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff)
{
int ret = PQI_STATUS_SUCCESS;
pqisrc_raid_req_t request;
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
@@ -951,7 +1005,8 @@
}
/* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
-int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
+int
+pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
bmic_ident_ctrl_t *identify_ctrl;
@@ -969,7 +1024,7 @@
ret = pqisrc_identify_ctrl(softs, identify_ctrl);
if (ret)
goto out;
-
+
softs->fw_build_number = identify_ctrl->fw_build_number;
memcpy(softs->fw_version, identify_ctrl->fw_version,
sizeof(identify_ctrl->fw_version));
@@ -980,13 +1035,14 @@
"-%u", identify_ctrl->fw_build_number);
out:
os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
- DBG_INIT("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
+ DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
DBG_FUNC("OUT\n");
return ret;
}
/* BMIC command to determine scsi device identify params */
-static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
+static int
+pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device,
bmic_ident_physdev_t *buff,
int buf_len)
@@ -998,12 +1054,12 @@
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
request.cdb[2] = (uint8_t)bmic_device_index;
request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
return ret;
@@ -1013,7 +1069,8 @@
* Function used to get the scsi device information using one of BMIC
* BMIC_IDENTIFY_PHYSICAL_DEVICE
*/
-static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
+static void
+pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device,
bmic_ident_physdev_t *id_phys)
{
@@ -1048,9 +1105,9 @@
/* Function used to find the entry of the device in a list */
-static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device_to_find,
- pqi_scsi_dev_t **same_device)
+static
+device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device)
{
pqi_scsi_dev_t *device;
int i,j;
@@ -1079,9 +1136,9 @@
/* Update the newly added devices as existed device */
-static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device_exist,
- pqi_scsi_dev_t *new_device)
+static void
+pqisrc_exist_device_update(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device)
{
DBG_FUNC("IN\n");
device_exist->expose_device = new_device->expose_device;
@@ -1092,6 +1149,13 @@
device_exist->is_physical_device = new_device->is_physical_device;
device_exist->is_external_raid_device =
new_device->is_external_raid_device;
+
+ if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION ||
+ device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) &&
+ new_device->volume_status == SA_LV_OK) {
+ device_exist->scsi_rescan = true;
+ }
+
device_exist->sas_address = new_device->sas_address;
device_exist->raid_level = new_device->raid_level;
device_exist->queue_depth = new_device->queue_depth;
@@ -1105,7 +1169,6 @@
memcpy(device_exist->phys_connector, new_device->phys_connector,
sizeof(device_exist->phys_connector));
device_exist->offload_config = new_device->offload_config;
- device_exist->offload_enabled = false;
device_exist->offload_enabled_pending =
new_device->offload_enabled_pending;
device_exist->offload_to_mirror = 0;
@@ -1120,12 +1183,13 @@
}
/* Validate the ioaccel_handle for a newly added device */
-static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
+static
+pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
{
pqi_scsi_dev_t *device;
int i,j;
- DBG_FUNC("IN\n");
+ DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
@@ -1145,7 +1209,8 @@
}
/* Get the scsi device queue depth */
-static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
+static void
+pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
{
unsigned i;
unsigned phys_dev_num;
@@ -1209,16 +1274,17 @@
}
/* Function used to add a scsi device to OS scsi subsystem */
-static int pqisrc_add_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
device->invalid = false;
if(device->expose_device) {
+ pqisrc_init_device_active_io(softs, device);
/* TBD: Call OS upper layer function to add the device entry */
os_add_device(softs,device);
}
@@ -1228,26 +1294,36 @@
}
/* Function used to remove a scsi device from OS scsi subsystem */
-void pqisrc_remove_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+void
+pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
- /* TBD: Call OS upper layer function to remove the device entry */
device->invalid = true;
+ if (device->expose_device == false) {
+ /*Masked physical devices are not been exposed to storage stack.
+ *Hence, free the masked device resources such as
+ *device memory, Target ID,etc., here.
+ */
+ DBG_NOTE("Deallocated Masked Device Resources.\n");
+ pqisrc_free_device(softs,device);
+ return;
+ }
+ /* Wait for device outstanding Io's */
+ pqisrc_wait_for_device_commands_to_complete(softs, device);
+ /* Call OS upper layer function to remove the exposed device entry */
os_remove_device(softs,device);
DBG_FUNC("OUT\n");
}
-
/*
* When exposing new device to OS fails then adjst list according to the
* mid scsi list
*/
-static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
@@ -1265,8 +1341,8 @@
}
/* Debug routine used to display the RAID volume status of the device */
-static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
char *status;
@@ -1311,6 +1387,29 @@
case SA_LV_STATUS_VPD_UNSUPPORTED:
status = "Volume status is not available through vital product data pages.";
break;
+ case SA_LV_UNDERGOING_EXPANSION:
+ status = "Volume undergoing expansion";
+ break;
+ case SA_LV_QUEUED_FOR_EXPANSION:
+ status = "Volume queued for expansion";
+ case SA_LV_EJECTED:
+ status = "Volume ejected";
+ break;
+ case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
+ status = "Volume has wrong physical drive replaced";
+ break;
+ case SA_LV_DISABLED_SCSI_ID_CONFLICT:
+ status = "Volume disabled scsi id conflict";
+ break;
+ case SA_LV_HARDWARE_HAS_OVERHEATED:
+ status = "Volume hardware has over heated";
+ break;
+ case SA_LV_HARDWARE_OVERHEATING:
+ status = "Volume hardware over heating";
+ break;
+ case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
+ status = "Volume physical drive connection problem";
+ break;
default:
status = "Volume is in an unknown state.";
break;
@@ -1321,7 +1420,8 @@
DBG_FUNC("OUT\n");
}
-void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+void
+pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
if (!device)
@@ -1331,25 +1431,45 @@
}
os_mem_free(softs, (char *)device,sizeof(*device));
DBG_FUNC("OUT\n");
-
+
}
/* OS should call this function to free the scsi device */
-void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
+void
+pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
{
+ rcb_t *rcb;
+ int i;
- OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
- if (!pqisrc_is_logical_device(device)) {
- pqisrc_free_tid(softs,device->target);
+ /* Clear the "device" field in the rcb.
+ * Response coming after device removal shouldn't access this field
+ */
+ for(i = 1; i <= softs->max_outstanding_io; i++)
+ {
+ rcb = &softs->rcb[i];
+ if(rcb->dvp == device) {
+ DBG_WARN("Pending requests for the removing device\n");
+ rcb->dvp = NULL;
}
- pqisrc_device_mem_free(softs, device);
- OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+ }
-}
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+ if (!pqisrc_is_logical_device(device)) {
+ pqisrc_free_tid(softs,device->target);
+ }
+
+ softs->device_list[device->target][device->lun] = NULL;
+
+ pqisrc_device_mem_free(softs, device);
+
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+
+}
/* Update the newly added devices to the device list */
-static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
+static void
+pqisrc_update_device_list(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *new_device_list[], int num_new_devices)
{
int ret;
@@ -1362,6 +1482,7 @@
int nadded = 0, nremoved = 0;
int j;
int tid = 0;
+ boolean_t driver_queue_depth_flag = false;
DBG_FUNC("IN\n");
@@ -1372,9 +1493,9 @@
DBG_WARN("Out of memory \n");
goto free_and_out;
}
-
+
OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
-
+
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
@@ -1430,7 +1551,7 @@
continue;
if (device->volume_offline)
continue;
-
+
/* physical device */
if (!pqisrc_is_logical_device(device)) {
tid = pqisrc_alloc_tid(softs);
@@ -1438,7 +1559,16 @@
pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
}
- softs->device_list[device->target][device->lun] = device;
+ /* This is not expected. We may lose the reference to the old device entry.
+ * If the target & lun ids are same, it is supposed to detect as an existing
+ * device, and not as a new device
+ */
+ if(softs->device_list[device->target][device->lun] != NULL) {
+ DBG_WARN("Overwriting T : %d L :%d\n",device->target,device->lun);
+ }
+
+ softs->device_list[device->target][device->lun] = device;
+
DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
device->bus,device->target,device->lun);
/* To prevent this entry from being freed later. */
@@ -1447,8 +1577,7 @@
nadded++;
}
- pqisrc_update_log_dev_qdepth(softs);
-
+
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
@@ -1464,8 +1593,8 @@
device = removed[i];
if (device == NULL)
continue;
- pqisrc_remove_device(softs, device);
pqisrc_display_device_info(softs, "removed", device);
+ pqisrc_remove_device(softs, device);
}
@@ -1483,8 +1612,20 @@
device->advertised_queue_depth = device->queue_depth;
/* TBD: Call OS upper layer function to change device Q depth */
}
+ if (device->firmware_queue_depth_set == false)
+ driver_queue_depth_flag = true;
+ if (device->scsi_rescan)
+ os_rescan_target(softs, device);
}
}
+ /*
+ * If firmware queue depth is corrupt or not working
+ * use driver method to re-calculate the queue depth
+ * for all logical devices
+ */
+ if (driver_queue_depth_flag)
+ pqisrc_update_log_dev_qdepth(softs);
+
for(i = 0; i < nadded; i++) {
device = added[i];
if (device->expose_device) {
@@ -1517,10 +1658,10 @@
free_and_out:
if (added)
os_mem_free(softs, (char *)added,
- sizeof(*added) * PQI_MAX_DEVICES);
+ sizeof(*added) * PQI_MAX_DEVICES);
if (removed)
os_mem_free(softs, (char *)removed,
- sizeof(*removed) * PQI_MAX_DEVICES);
+ sizeof(*removed) * PQI_MAX_DEVICES);
DBG_FUNC("OUT\n");
}
@@ -1529,7 +1670,8 @@
* Let the Adapter know about driver version using one of BMIC
* BMIC_WRITE_HOST_WELLNESS
*/
-int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
+int
+pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
{
int rval = PQI_STATUS_SUCCESS;
struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
@@ -1538,7 +1680,7 @@
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
data_length = sizeof(*host_wellness_driver_ver);
host_wellness_driver_ver = os_mem_alloc(softs, data_length);
@@ -1562,6 +1704,7 @@
} else {
DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
strlen(softs->os_name));
+
}
host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
host_wellness_driver_ver->end_tag[0] = 'Z';
@@ -1571,16 +1714,17 @@
BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
-
+
DBG_FUNC("OUT");
return rval;
}
-/*
+/*
* Write current RTC time from host to the adapter using
* BMIC_WRITE_HOST_WELLNESS
*/
-int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
+int
+pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
{
int rval = PQI_STATUS_SUCCESS;
struct bmic_host_wellness_time *host_wellness_time;
@@ -1589,7 +1733,7 @@
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
data_length = sizeof(*host_wellness_time);
host_wellness_time = os_mem_alloc(softs, data_length);
@@ -1604,7 +1748,7 @@
host_wellness_time->start_tag[3] = '>';
host_wellness_time->time_tag[0] = 'T';
host_wellness_time->time_tag[1] = 'D';
- host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
+ host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
offsetof(struct bmic_host_wellness_time, century));
os_get_time(host_wellness_time);
@@ -1613,10 +1757,10 @@
host_wellness_time->dont_write_tag[1] = 'W';
host_wellness_time->end_tag[0] = 'Z';
host_wellness_time->end_tag[1] = 'Z';
-
+
rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
-
+
os_mem_free(softs, (char *)host_wellness_time, data_length);
DBG_FUNC("OUT");
@@ -1627,21 +1771,27 @@
* Function used to perform a rescan of scsi devices
* for any config change events
*/
-int pqisrc_scan_devices(pqisrc_softstate_t *softs)
+int
+pqisrc_scan_devices(pqisrc_softstate_t *softs)
{
boolean_t is_physical_device;
int ret = PQI_STATUS_FAILURE;
int i;
int new_dev_cnt;
int phy_log_dev_cnt;
+ size_t queue_log_data_length;
uint8_t *scsi3addr;
+ uint8_t multiplier;
+ uint16_t qdepth;
uint32_t physical_cnt;
uint32_t logical_cnt;
+ uint32_t logical_queue_cnt;
uint32_t ndev_allocated = 0;
size_t phys_data_length, log_data_length;
reportlun_data_ext_t *physical_dev_list = NULL;
reportlun_data_ext_t *logical_dev_list = NULL;
reportlun_ext_entry_t *lun_ext_entry = NULL;
+ reportlun_queue_depth_data_t *logical_queue_dev_list = NULL;
bmic_ident_physdev_t *bmic_phy_info = NULL;
pqi_scsi_dev_t **new_device_list = NULL;
pqi_scsi_dev_t *device = NULL;
@@ -1650,18 +1800,23 @@
DBG_FUNC("IN\n");
ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
- &phys_data_length, &log_data_length);
+ &logical_queue_dev_list, &queue_log_data_length,
+ &phys_data_length, &log_data_length);
if (ret)
goto err_out;
- physical_cnt = BE_32(physical_dev_list->header.list_length)
+ physical_cnt = BE_32(physical_dev_list->header.list_length)
/ sizeof(physical_dev_list->lun_entries[0]);
-
+
logical_cnt = BE_32(logical_dev_list->header.list_length)
/ sizeof(logical_dev_list->lun_entries[0]);
- DBG_DISC("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
+ logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length)
+ / sizeof(logical_queue_dev_list->lun_entries[0]);
+
+
+ DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt);
if (physical_cnt) {
bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
@@ -1706,6 +1861,7 @@
}
scsi3addr = lun_ext_entry->lunid;
+
/* Save the target sas adderess for external raid device */
if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
int target = lun_ext_entry->lunid[3] & 0x3f;
@@ -1722,10 +1878,34 @@
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->wwid = lun_ext_entry->wwid;
device->is_physical_device = is_physical_device;
- if (!is_physical_device)
+ if (!is_physical_device && logical_queue_cnt--) {
device->is_external_raid_device =
pqisrc_is_external_raid_addr(scsi3addr);
-
+ /* The multiplier is the value we multiply the queue
+ * depth value with to get the actual queue depth.
+ * If multiplier is 1 multiply by 256 if
+ * multiplier 0 then multiply by 16 */
+ multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier;
+ qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth;
+ if (multiplier) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = qdepth*256;
+ } else {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = qdepth*16;
+ }
+ if (device->queue_depth > softs->adapterQDepth) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = softs->adapterQDepth;
+ }
+ if ((multiplier == 1) &&
+ (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH))
+ device->firmware_queue_depth_set = false;
+ if ((multiplier == 0) &&
+ (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH))
+ device->firmware_queue_depth_set = false;
+ }
+
/* Get device type, vendor, model, device ID. */
ret = pqisrc_get_dev_data(softs, device);
@@ -1735,6 +1915,12 @@
DBG_DISC("INQUIRY FAILED \n");
continue;
}
+ /* Set controller queue depth to what
+ * it was from the scsi midlayer */
+ if (device->devtype == RAID_DEVICE) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = softs->adapterQDepth;
+ }
pqisrc_assign_btl(device);
/*
@@ -1780,7 +1966,7 @@
device->sas_address = BE_64(lun_ext_entry->wwid);
}
new_dev_cnt++;
- break;
+ break;
case TAPE_DEVICE:
case MEDIUM_CHANGER_DEVICE:
new_dev_cnt++;
@@ -1797,13 +1983,14 @@
break;
case SES_DEVICE:
case CONTROLLER_DEVICE:
+ default:
break;
}
}
DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
-
+
err_out:
if (new_device_list) {
for (i = 0; i < ndev_allocated; i++) {
@@ -1816,15 +2003,18 @@
}
}
os_mem_free(softs, (char *)new_device_list,
- sizeof(*new_device_list) * ndev_allocated);
+ sizeof(*new_device_list) * ndev_allocated);
}
if(physical_dev_list)
os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
if(logical_dev_list)
os_mem_free(softs, (char *)logical_dev_list, log_data_length);
+ if(logical_queue_dev_list)
+ os_mem_free(softs, (char*)logical_queue_dev_list,
+ queue_log_data_length);
if (bmic_phy_info)
os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
-
+
DBG_FUNC("OUT \n");
return ret;
@@ -1833,16 +2023,17 @@
/*
* Clean up memory allocated for devices.
*/
-void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
+void
+pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
{
int i = 0,j = 0;
pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN\n");
-
+
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if (softs->device_list[i][j] == NULL)
+ if (softs->device_list[i][j] == NULL)
continue;
dvp = softs->device_list[i][j];
pqisrc_device_mem_free(softs, dvp);
@@ -1850,4 +2041,3 @@
}
DBG_FUNC("OUT\n");
}
-
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,21 +34,22 @@
pqisrc_rescan_devices(pqisrc_softstate_t *softs)
{
int ret;
-
+
DBG_FUNC("IN\n");
-
+
os_sema_lock(&softs->scan_lock);
-
+
ret = pqisrc_scan_devices(softs);
os_sema_unlock(&softs->scan_lock);
-
+
DBG_FUNC("OUT\n");
return ret;
}
-void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
+void
+pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
{
os_sema_lock(&softs->scan_lock);
os_sema_unlock(&softs->scan_lock);
@@ -58,11 +58,11 @@
/*
* Subroutine to acknowledge the events processed by the driver to the adapter.
*/
-static void
-pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
+static void
+pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
struct pqi_event *event)
{
-
+
pqi_event_acknowledge_request_t request;
ib_queue_t *ib_q = &softs->op_raid_ib_q[0];
int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT;
@@ -71,7 +71,7 @@
DBG_FUNC("IN\n");
request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
- request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
+ request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
PQI_REQUEST_HEADER_LENGTH);
request.event_type = event->event_type;
request.event_id = event->event_id;
@@ -91,9 +91,9 @@
COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo);
if (tmo <= 0) {
DBG_ERR("wait for event acknowledge timed out\n");
- DBG_ERR("tmo : %d\n",tmo);
- }
-
+ DBG_ERR("tmo : %d\n",tmo);
+ }
+
DBG_FUNC(" OUT\n");
}
@@ -106,7 +106,7 @@
int i;
struct pqi_event *pending_event;
pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1;
-
+
DBG_FUNC(" IN\n");
@@ -118,19 +118,19 @@
}
pending_event++;
}
-
+
/* Rescan devices except for heartbeat event */
if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) {
DBG_ERR(" Failed to Re-Scan devices\n ");
}
DBG_FUNC(" OUT\n");
-
+
}
/*
* Get event index from event type to validate the type of event.
*/
-static int
+static int
pqisrc_event_type_to_event_index(unsigned event_type)
{
int index;
@@ -165,7 +165,7 @@
/*
* Function used to process the events supported by the adapter.
*/
-int
+int
pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
{
uint32_t obq_pi,obq_ci;
@@ -173,16 +173,14 @@
ob_queue_t *event_q;
struct pqi_event *pending_event;
boolean_t need_delayed_work = false;
-
+
DBG_FUNC(" IN\n");
-
- OS_ATOMIC64_INC(softs, num_intrs);
-
+
event_q = &softs->event_q;
obq_ci = event_q->ci_local;
obq_pi = *(event_q->pi_virt_addr);
DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi);
-
+
while(1) {
int event_index;
DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi);
@@ -208,10 +206,10 @@
pending_event->additional_event_id = response.additional_event_id;
}
}
-
+
obq_ci = (obq_ci + 1) % event_q->num_elem;
}
- /* Update CI */
+ /* Update CI */
event_q->ci_local = obq_ci;
PCI_MEM_PUT32(softs, event_q->ci_register_abs,
event_q->ci_register_offset, event_q->ci_local);
@@ -223,20 +221,21 @@
DBG_FUNC("OUT");
return PQI_STATUS_SUCCESS;
-
+
}
/*
* Function used to send a general management request to adapter.
*/
-int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
+int
+pqisrc_submit_management_req(pqisrc_softstate_t *softs,
pqi_event_config_request_t *request)
-{
+{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0];
rcb_t *rcb = NULL;
-
+
DBG_FUNC(" IN\n");
/* Get the tag */
@@ -244,7 +243,7 @@
if (INVALID_ELEM == request->request_id) {
DBG_ERR("Tag not available\n");
ret = PQI_STATUS_FAILURE;
- goto err_out;
+ goto err_out;
}
rcb = &softs->rcb[request->request_id];
@@ -257,19 +256,19 @@
goto err_cmd;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out !!\n");
goto err_cmd;
}
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
DBG_FUNC("OUT\n");
return ret;
-
+
err_cmd:
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
err_out:
DBG_FUNC(" failed OUT : %d\n", ret);
@@ -279,19 +278,19 @@
/*
* Build and send the general management request.
*/
-static int
-pqi_event_configure(pqisrc_softstate_t *softs ,
- pqi_event_config_request_t *request,
+static int
+pqi_event_configure(pqisrc_softstate_t *softs ,
+ pqi_event_config_request_t *request,
dma_mem_t *buff)
{
int ret = PQI_STATUS_SUCCESS;
-
+
DBG_FUNC(" IN\n");
-
+
request->header.comp_feature = 0x00;
- request->header.iu_length = sizeof(pqi_event_config_request_t) -
+ request->header.iu_length = sizeof(pqi_event_config_request_t) -
PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */
-
+
/*Op OQ id where response to be delivered */
request->response_queue_id = softs->op_ob_q[0].q_id;
request->buffer_length = buff->size;
@@ -299,16 +298,16 @@
request->sg_desc.length = buff->size;
request->sg_desc.zero = 0;
request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT;
-
+
/* submit management req IU*/
ret = pqisrc_submit_management_req(softs,request);
if(ret)
goto err_out;
-
+
DBG_FUNC(" OUT\n");
return ret;
-
+
err_out:
DBG_FUNC("Failed OUT\n");
return ret;
@@ -318,24 +317,25 @@
* Prepare REPORT EVENT CONFIGURATION IU to request that
* event configuration information be reported.
*/
-int pqisrc_report_event_config(pqisrc_softstate_t *softs)
+int
+pqisrc_report_event_config(pqisrc_softstate_t *softs)
{
int ret,i ;
- pqi_event_config_request_t request;
+ pqi_event_config_request_t request;
pqi_event_config_t *event_config_p ;
dma_mem_t buf_report_event ;
/*bytes to be allocaed for report event config data-in buffer */
uint32_t alloc_size = sizeof(pqi_event_config_t) ;
memset(&request, 0 , sizeof(request));
-
+
DBG_FUNC(" IN\n");
-
- memset(&buf_report_event, 0, sizeof(struct dma_mem));
+
+ memset(&buf_report_event, 0, sizeof(struct dma_mem));
buf_report_event.tag = "pqi_report_event_buf" ;
buf_report_event.size = alloc_size;
buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
-
+
/* allocate memory */
ret = os_dma_mem_alloc(softs, &buf_report_event);
if (ret) {
@@ -344,26 +344,26 @@
}
DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr);
DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr);
-
+
request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
-
+
/* Event configuration */
ret=pqi_event_configure(softs,&request,&buf_report_event);
if(ret)
goto free_mem;
-
-
+
+
event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr;
softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
PQI_MAX_EVENT_DESCRIPTORS) ;
-
+
for (i=0; i < softs->event_config.num_event_descriptors ;i++){
- softs->event_config.descriptors[i].event_type =
+ softs->event_config.descriptors[i].event_type =
event_config_p->descriptors[i].event_type;
}
/* free the allocated memory*/
os_dma_mem_free(softs, &buf_report_event);
-
+
DBG_FUNC(" OUT\n");
return ret;
@@ -378,7 +378,8 @@
* Prepare SET EVENT CONFIGURATION IU to request that
* event configuration parameters be set.
*/
-int pqisrc_set_event_config(pqisrc_softstate_t *softs)
+int
+pqisrc_set_event_config(pqisrc_softstate_t *softs)
{
int ret,i;
@@ -395,19 +396,19 @@
buf_set_event.tag = "pqi_set_event_buf";
buf_set_event.size = alloc_size;
buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;
-
+
/* allocate memory */
ret = os_dma_mem_alloc(softs, &buf_set_event);
if (ret) {
DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret);
goto err_out;
}
-
+
DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr);
DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr);
request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG;
- request.iu_specific.global_event_oq_id = softs->event_q.q_id;
+ request.iu_specific.global_event_oq_id = softs->event_q.q_id;
/*pointer to data-out buffer*/
@@ -415,27 +416,27 @@
event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
-
+
for (i=0; i < softs->event_config.num_event_descriptors ; i++){
- event_config_p->descriptors[i].event_type =
+ event_config_p->descriptors[i].event_type =
softs->event_config.descriptors[i].event_type;
if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
event_config_p->descriptors[i].oq_id = softs->event_q.q_id;
else
event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */
-
+
}
/* Event configuration */
ret = pqi_event_configure(softs,&request,&buf_set_event);
if(ret)
goto free_mem;
-
+
os_dma_mem_free(softs, &buf_set_event);
-
+
DBG_FUNC(" OUT\n");
return ret;
-
+
free_mem:
os_dma_mem_free(softs, &buf_set_event);
err_out:
diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c
--- a/sys/dev/smartpqi/smartpqi_helper.c
+++ b/sys/dev/smartpqi/smartpqi_helper.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,62 @@
#include "smartpqi_includes.h"
+/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */
+void
+pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t diags_options = 0;
+ pqisrc_raid_req_t request;
+
+ DBG_NOTE("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ /* read diags options of controller */
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SENSE_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_WARN("Request failed for BMIC Sense Diags Option command."
+ "ret:%d\n",ret);
+ return;
+ }
+ DBG_NOTE("diags options data after read: %#x\n",diags_options);
+ diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS;
+ DBG_NOTE("diags options data to write: %#x\n",diags_options);
+ memset(&request, 0, sizeof(request));
+ /* write specified diags options to controller */
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SET_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS)
+ DBG_WARN("Request failed for BMIC Set Diags Option command."
+ "ret:%d\n",ret);
+#if 0
+ diags_options = 0;
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SENSE_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS)
+ DBG_WARN("Request failed for BMIC Sense Diags Option command."
+ "ret:%d\n",ret);
+ DBG_NOTE("diags options after re-read: %#x\n",diags_options);
+#endif
+ DBG_NOTE("OUT\n");
+}
+
/*
* Function used to validate the adapter health.
*/
-boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
+boolean_t
+pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -43,29 +94,31 @@
/* Function used set/clear legacy INTx bit in Legacy Interrupt INTx
* mask clear pqi register
*/
-void pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
+void
+pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
{
uint32_t intx_mask;
uint32_t *reg_addr = NULL;
-
+
DBG_FUNC("IN\n");
-
+
if (enable_intx)
reg_addr = &softs->pqi_reg->legacy_intr_mask_clr;
else
reg_addr = &softs->pqi_reg->legacy_intr_mask_set;
-
+
intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR);
intx_mask |= PQISRC_LEGACY_INTX_MASK;
PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask);
-
+
DBG_FUNC("OUT\n");
}
/*
* Function used to take exposed devices to OS as offline.
*/
-void pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
+void
+pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
{
pqi_scsi_dev_t *device = NULL;
int i,j;
@@ -86,14 +139,26 @@
/*
* Function used to take adapter offline.
*/
-void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
+void
+pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->ctrl_online = false;
- pqisrc_trigger_nmi_sis(softs);
+
+ int lockupcode = 0;
+
+ if (SIS_IS_KERNEL_PANIC(softs)) {
+ lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
+ DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
+ }
+ else {
+ pqisrc_trigger_nmi_sis(softs);
+ }
+
os_complete_outstanding_cmds_nodevice(softs);
+ pqisrc_wait_for_rescan_complete(softs);
pqisrc_take_devices_offline(softs);
DBG_FUNC("OUT\n");
@@ -102,46 +167,27 @@
/*
* Timer handler for the adapter heart-beat.
*/
-void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
+void
+pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
{
- uint64_t num_intrs;
uint8_t take_offline = false;
DBG_FUNC("IN\n");
- num_intrs = OS_ATOMIC64_READ(softs, num_intrs);
-
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
- if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
- take_offline = true;
- goto take_ctrl_offline;
- }
- softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
- DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
+ if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
+ take_offline = true;
+ goto take_ctrl_offline;
+ }
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
+ DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
softs->prev_heartbeat_count = %lx\n",
CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
- } else {
- if (num_intrs == softs->prev_num_intrs) {
- softs->num_heartbeats_requested++;
- if (softs->num_heartbeats_requested > PQI_MAX_HEARTBEAT_REQUESTS) {
- take_offline = true;
- goto take_ctrl_offline;
- }
- softs->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
-
- pqisrc_ack_all_events((void*)softs);
-
- } else {
- softs->num_heartbeats_requested = 0;
- }
- softs->prev_num_intrs = num_intrs;
- }
take_ctrl_offline:
if (take_offline){
DBG_ERR("controller is offline\n");
- pqisrc_take_ctrl_offline(softs);
os_stop_heartbeat_timer(softs);
+ pqisrc_take_ctrl_offline(softs);
}
DBG_FUNC("OUT\n");
}
@@ -149,28 +195,33 @@
/*
* Conditional variable management routine for internal commands.
*/
-int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
-
+int
+pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
+ uint32_t timeout_in_msec)
+{
DBG_FUNC("IN\n");
int ret = PQI_STATUS_SUCCESS;
- uint32_t loop_cnt = 0;
-
+
+ /* 1 msec = 500 usec * 2 */
+ uint32_t loop_cnt = timeout_in_msec * 2;
+ uint32_t i = 0;
+
while (rcb->req_pending == true) {
OS_SLEEP(500); /* Micro sec */
-
- /*Polling needed for FreeBSD : since ithread routine is not scheduled
- during bootup, we could use polling until interrupts are
- enabled (using 'if (cold)'to check for the boot time before
- interrupts are enabled). */
+ /* Polling needed for FreeBSD : since ithread routine is not scheduled
+ * during bootup, we could use polling until interrupts are
+ * enabled (using 'if (cold)'to check for the boot time before
+ * interrupts are enabled). */
IS_POLLING_REQUIRED(softs);
- if (loop_cnt++ == PQISRC_CMD_TIMEOUT_CNT) {
+ if ((timeout_in_msec != TIMEOUT_INFINITE) && (i++ == loop_cnt)) {
DBG_ERR("ERR: Requested cmd timed out !!!\n");
ret = PQI_STATUS_TIMEOUT;
+ rcb->timedout = true;
break;
}
-
+
if (pqisrc_ctrl_offline(softs)) {
DBG_ERR("Controller is Offline");
ret = PQI_STATUS_FAILURE;
@@ -186,32 +237,37 @@
}
/* Function used to validate the device wwid. */
-boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1,
+boolean_t
+pqisrc_device_equal(pqi_scsi_dev_t *dev1,
pqi_scsi_dev_t *dev2)
{
return dev1->wwid == dev2->wwid;
}
/* Function used to validate the device scsi3addr. */
-boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
+boolean_t
+pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
{
return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}
/* Function used to validate hba_lunid */
-boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr)
+boolean_t
+pqisrc_is_hba_lunid(uint8_t *scsi3addr)
{
return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
-boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device)
+boolean_t
+pqisrc_is_logical_device(pqi_scsi_dev_t *device)
{
return !device->is_physical_device;
}
/* Function used to sanitize inquiry string */
-void pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
+void
+pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
{
boolean_t terminated = false;
@@ -238,7 +294,8 @@
};
/* Get the RAID level from the index */
-char *pqisrc_raidlevel_to_string(uint8_t raid_level)
+char *
+pqisrc_raidlevel_to_string(uint8_t raid_level)
{
DBG_FUNC("IN\n");
if (raid_level < ARRAY_SIZE(raid_levels))
@@ -249,7 +306,8 @@
}
/* Debug routine for displaying device info */
-void pqisrc_display_device_info(pqisrc_softstate_t *softs,
+void
+pqisrc_display_device_info(pqisrc_softstate_t *softs,
char *action, pqi_scsi_dev_t *device)
{
DBG_INFO( "%s scsi BTL %d:%d:%d: %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
@@ -268,9 +326,10 @@
}
/* validate the structure sizes */
-void check_struct_sizes()
-{
-
+void
+check_struct_sizes()
+{
+
ASSERT(sizeof(SCSI3Addr_struct)== 2);
ASSERT(sizeof(PhysDevAddr_struct) == 8);
ASSERT(sizeof(LogDevAddr_struct)== 8);
@@ -278,7 +337,10 @@
ASSERT(sizeof(RequestBlock_struct) == 20);
ASSERT(sizeof(MoreErrInfo_struct)== 8);
ASSERT(sizeof(ErrorInfo_struct)== 48);
- ASSERT(sizeof(IOCTL_Command_struct)== 86);
+ /* Checking the size of IOCTL_Command_struct for both
+ 64 bit and 32 bit system*/
+ ASSERT(sizeof(IOCTL_Command_struct)== 86 ||
+ sizeof(IOCTL_Command_struct)== 82);
ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42);
ASSERT(sizeof(struct bmic_host_wellness_time)== 20);
ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8);
@@ -298,7 +360,8 @@
ASSERT(sizeof(pqi_dev_cap_t)== 576);
ASSERT(sizeof(pqi_aio_req_t)== 128);
ASSERT(sizeof(pqisrc_raid_req_t)== 128);
- ASSERT(sizeof(pqi_tmf_req_t)== 32);
+ ASSERT(sizeof(pqi_raid_tmf_req_t)== 32);
+ ASSERT(sizeof(pqi_aio_tmf_req_t)== 32);
ASSERT(sizeof(struct pqi_io_response)== 16);
ASSERT(sizeof(struct sense_header_scsi)== 8);
ASSERT(sizeof(reportlun_header_t)==8);
@@ -308,5 +371,118 @@
ASSERT(sizeof(pqisrc_raid_map_t)== 8256);
ASSERT(sizeof(bmic_ident_ctrl_t)== 325);
ASSERT(sizeof(bmic_ident_physdev_t)==2048);
-
+
+}
+
+uint32_t
+pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint32_t i, active_io = 0;
+ rcb_t* rcb;
+
+ for(i = 1; i <= softs->max_outstanding_io; i++) {
+ rcb = &softs->rcb[i];
+ if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
+ active_io++;
+ }
+ }
+ return active_io;
+}
+
+void
+check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint32_t tag = softs->max_outstanding_io, active_requests;
+ uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds
+ rcb_t* rcb;
+
+ DBG_FUNC("IN\n");
+
+ active_requests = pqisrc_count_num_scsi_active_requests_on_dev(softs, device);
+
+ DBG_WARN("Device Outstanding IO count = %u\n", active_requests);
+
+ if(!active_requests)
+ return;
+
+ do {
+ rcb = &softs->rcb[tag];
+ if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
+ OS_BUSYWAIT(delay_in_usec);
+ timeout += delay_in_usec;
+ }
+ else
+ tag--;
+ if(timeout >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
+ DBG_WARN("timed out waiting for pending IO\n");
+ return;
+ }
+ } while(tag);
+
+}
+
+inline uint64_t
+pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Increment device active io count by one*/
+ return OS_ATOMIC64_INC(&device->active_requests);
+#endif
+}
+
+inline uint64_t
+pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Decrement device active io count by one*/
+ return OS_ATOMIC64_DEC(&device->active_requests);
+#endif
+}
+
+inline void
+pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* Reset device count to Zero */
+ OS_ATOMIC64_INIT(&device->active_requests, 0);
+#endif
+}
+
+inline uint64_t
+pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* read device active count*/
+ return OS_ATOMIC64_READ(&device->active_requests);
+#endif
+}
+
+void
+pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds
+
+ DBG_FUNC("IN\n");
+
+ if(!softs->ctrl_online)
+ return;
+
+#if PQISRC_DEVICE_IO_COUNTER
+ DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device));
+
+ while(pqisrc_read_device_active_io(softs, device)) {
+ OS_BUSYWAIT(delay_in_usec); // In microseconds
+ if(!softs->ctrl_online) {
+ DBG_WARN("Controller Offline was detected.\n");
+ }
+ timeout_in_usec += delay_in_usec;
+ if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
+ DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n",
+ pqisrc_read_device_active_io(softs, device));
+ return;
+ }
+ }
+#else
+ check_device_pending_commands_to_complete(softs, device);
+#endif
}
diff --git a/sys/dev/smartpqi/smartpqi_includes.h b/sys/dev/smartpqi/smartpqi_includes.h
--- a/sys/dev/smartpqi/smartpqi_includes.h
+++ b/sys/dev/smartpqi/smartpqi_includes.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,8 +36,7 @@
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/disk.h>
-#include <sys/cdefs.h>
-#include <sys/types.h>
+#include <sys/eventhandler.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/bus.h>
diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c
--- a/sys/dev/smartpqi/smartpqi_init.c
+++ b/sys/dev/smartpqi/smartpqi_init.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,14 @@
#include "smartpqi_includes.h"
+/* 5 mins timeout for quiesce */
+#define PQI_QUIESCE_TIMEOUT 300000
+
/*
* Request the adapter to get PQI capabilities supported.
*/
-static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
+static int
+pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -64,7 +67,7 @@
DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
goto err_dma_alloc;
}
-
+
admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
@@ -79,7 +82,7 @@
} else {
DBG_ERR("Failed to send admin req report pqi device capability\n");
goto err_admin_req;
-
+
}
softs->pqi_dev_cap.max_iqs = capability->max_iqs;
@@ -107,7 +110,7 @@
DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
-
+
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
@@ -130,9 +133,9 @@
/*
* Function used to deallocate the used rcb.
*/
-void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
+void
+pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
{
-
uint32_t num_req;
size_t size;
int i;
@@ -151,7 +154,8 @@
/*
* Allocate memory for rcb and SG descriptors.
*/
-static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
+static int
+pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
@@ -165,7 +169,7 @@
/* Set maximum outstanding requests */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb will be accessed by using the tag as index
- * * As 0 tag index is not used, we need to allocate one extra.
+ * As 0 tag index is not used, we need to allocate one extra.
*/
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
num_req = softs->max_outstanding_io + 1;
@@ -181,7 +185,7 @@
goto err_out;
}
softs->rcb = rcb;
-
+
/* Allocate sg dma memory for sg chain */
sg_buf_size = softs->pqi_cap.max_sg_elem *
sizeof(sgt_t);
@@ -219,13 +223,14 @@
* Function used to decide the operational queue configuration params
* - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
*/
-void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
+void
+pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
{
uint16_t total_iq_elements;
DBG_FUNC("IN\n");
- DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
+ DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
softs->intr_count, softs->num_cpus_online);
if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
@@ -235,19 +240,15 @@
}
else {
/* Note : One OBQ (OBQ0) reserved for event queue */
- softs->num_op_obq = MIN(softs->num_cpus_online,
+ softs->num_op_obq = MIN(softs->num_cpus_online,
softs->intr_count) - 1;
- softs->num_op_obq = softs->intr_count - 1;
softs->share_opq_and_eventq = false;
}
-
- /*
- * softs->num_cpus_online is set as number of physical CPUs,
- * So we can have more queues/interrupts .
- */
- if (softs->intr_count > 1)
+ /* If the available interrupt count is more than one,
+ we dont need to share the interrupt for IO and event queue */
+ if (softs->intr_count > 1)
softs->share_opq_and_eventq = false;
-
+
DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
softs->num_op_raid_ibq = softs->num_op_obq;
@@ -263,23 +264,23 @@
softs->max_ib_iu_length =
(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
softs->ibq_elem_size;
-
+
}
- /* If Max. Outstanding IO came with Max. Spanning element count then,
+ /* If Max. Outstanding IO came with Max. Spanning element count then,
needed elements per IO are multiplication of
Max.Outstanding IO and Max.Spanning element */
- total_iq_elements = (softs->max_outstanding_io *
+ total_iq_elements = (softs->max_outstanding_io *
(softs->max_ib_iu_length / softs->ibq_elem_size));
-
+
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
- softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
+ softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
softs->pqi_dev_cap.max_iq_elements);
-
- softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
+
+ softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
softs->pqi_dev_cap.max_oq_elements);
- softs->max_sg_per_iu = ((softs->max_ib_iu_length -
+ softs->max_sg_per_iu = ((softs->max_ib_iu_length -
softs->ibq_elem_size) /
sizeof(sgt_t)) +
MAX_EMBEDDED_SG_IN_FIRST_IU;
@@ -295,11 +296,12 @@
/*
* Configure the operational queue parameters.
*/
-int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
+int
+pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
-
- /* Get the PQI capability,
+
+ /* Get the PQI capability,
REPORT PQI DEVICE CAPABILITY request */
ret = pqisrc_report_pqi_capability(softs);
if (ret) {
@@ -312,11 +314,11 @@
softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
/* Decide the Op queue configuration */
- pqisrc_decide_opq_config(softs);
-
+ pqisrc_decide_opq_config(softs);
+
DBG_FUNC("OUT\n");
return ret;
-
+
err_out:
DBG_FUNC("OUT failed\n");
return ret;
@@ -325,7 +327,8 @@
/*
* Validate the PQI mode of adapter.
*/
-int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
+int
+pqisrc_check_pqimode(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
int tmo = 0;
@@ -337,7 +340,7 @@
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
do {
signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
-
+
if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
sizeof(uint64_t)) == 0) {
ret = PQI_STATUS_SUCCESS;
@@ -382,11 +385,207 @@
return ret;
}
+/* PQI Feature processing */
+static int
+pqisrc_config_table_update(struct pqisrc_softstate *softs,
+ uint16_t first_section, uint16_t last_section)
+{
+ pqi_vendor_general_request_t request;
+ int ret = PQI_STATUS_FAILURE;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+ request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
+ request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
+ request.data.config_table_update.first_section = first_section;
+ request.data.config_table_update.last_section = last_section;
+
+ ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
+ return PQI_STATUS_FAILURE;
+ }
+
+ return PQI_STATUS_SUCCESS;
+}
+
+static inline
+boolean_t pqi_is_firmware_feature_supported(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = bit_position / BITS_PER_BYTE;
+
+ if (byte_index >= firmware_feature_list->num_elements)
+ return false;
+
+ return firmware_feature_list->features_supported[byte_index] &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline
+boolean_t pqi_is_firmware_feature_enabled(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ uint8_t *firmware_features_addr, unsigned int bit_position)
+{
+ unsigned int byte_index;
+ uint8_t *feature_enabled_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (firmware_feature_list->num_elements * 2);
+
+ feature_enabled_addr = firmware_features_addr +
+ offsetof(struct pqi_conf_table_firmware_features,
+ features_supported) + byte_index;
+
+ return *feature_enabled_addr &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline void
+pqi_request_firmware_feature(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ firmware_feature_list->num_elements;
+
+ firmware_feature_list->features_supported[byte_index] |=
+ (1 << (bit_position % BITS_PER_BYTE));
+}
+
+/* Update PQI config table firmware features section and inform the firmware */
+static int
+pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
+ struct pqi_conf_table_firmware_features *firmware_feature_list)
+{
+ uint8_t *request_feature_addr;
+ void *request_feature_abs_addr;
+
+ request_feature_addr = firmware_feature_list->features_supported +
+ firmware_feature_list->num_elements;
+ request_feature_abs_addr = softs->fw_features_section_abs_addr +
+ (request_feature_addr - (uint8_t*)firmware_feature_list);
+
+ os_io_memcpy(request_feature_abs_addr, request_feature_addr,
+ firmware_feature_list->num_elements);
+
+ return pqisrc_config_table_update(softs,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+/* Check firmware has enabled the feature specified in the respective bit position. */
+inline boolean_t
+pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
+ struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
+{
+ uint16_t byte_index;
+ uint8_t *features_enabled_abs_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (firmware_feature_list->num_elements * 2);
+
+ features_enabled_abs_addr = softs->fw_features_section_abs_addr +
+ offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
+
+ return *features_enabled_abs_addr &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static void
+pqi_firmware_feature_status(struct pqisrc_softstate *softs,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch(firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_OFA:
+ break;
+ case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
+ softs->timeout_in_passthrough = true;
+ break;
+ case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
+ softs->timeout_in_tmf = true;
+ break;
+ default:
+ DBG_NOTE("Nothing to do \n");
+ }
+}
+
+/* Firmware features supported by the driver */
+static struct
+pqi_firmware_feature pqi_firmware_features[] = {
+ {
+ .feature_name = "Support timeout for pass-through commands",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Support timeout for LUN Reset TMF",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
+ .feature_status = pqi_firmware_feature_status,
+ }
+};
+
+static void
+pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
+{
+ int rc;
+ struct pqi_conf_table_firmware_features *firmware_feature_list;
+ unsigned int i;
+ unsigned int num_features_requested;
+
+ firmware_feature_list = (struct pqi_conf_table_firmware_features*)
+ softs->fw_features_section_abs_addr;
+
+ /* Check features and request those supported by firmware and driver.*/
+ for (i = 0, num_features_requested = 0;
+ i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ /* Firmware support it ? */
+ if (pqi_is_firmware_feature_supported(firmware_feature_list,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_request_firmware_feature(firmware_feature_list,
+ pqi_firmware_features[i].feature_bit);
+ pqi_firmware_features[i].supported = true;
+ num_features_requested++;
+ DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
+ pqi_firmware_features[i].feature_name);
+ } else {
+ DBG_NOTE("%s supported by driver, but not by current firmware\n",
+ pqi_firmware_features[i].feature_name);
+ }
+ }
+ if (num_features_requested == 0)
+ return;
+
+ rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
+ if (rc) {
+ DBG_ERR("Failed to update pqi config table\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (pqi_is_firmware_feature_enabled(firmware_feature_list,
+ softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].enabled = true;
+ DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
+ if(pqi_firmware_features[i].feature_status)
+ pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
+ }
+ }
+}
+
/*
* Get the PQI configuration table parameters.
* Currently using for heart-beat counter scratch-pad register.
*/
-int pqisrc_process_config_table(pqisrc_softstate_t *softs)
+int
+pqisrc_process_config_table(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
uint32_t config_table_size;
@@ -410,6 +609,13 @@
return ret;
}
+ if (config_table_size < sizeof(conf_table) ||
+ config_table_size > PQI_CONF_TABLE_MAX_LEN) {
+ DBG_ERR("Invalid PQI conf table length of %u\n",
+ config_table_size);
+ goto out;
+ }
+
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
softs->pqi_cap.conf_tab_off);
@@ -429,19 +635,21 @@
while (section_off) {
if (section_off+ sizeof(*section_hdr) >= config_table_size) {
- DBG_ERR("PQI config table section offset (%u) beyond \
- end of config table (config table length: %u)\n",
- section_off, config_table_size);
+ DBG_INFO("Reached end of PQI config table. Breaking off.\n");
break;
}
-
+
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
-
+
switch (LE_16(section_hdr->section_id)) {
case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
- case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
case PQI_CONF_TABLE_SECTION_DEBUG:
+ break;
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
+ softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
+ softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
+ pqisrc_process_firmware_features(softs);
break;
case PQI_CONF_TABLE_SECTION_HEARTBEAT:
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
@@ -465,7 +673,8 @@
}
/* Wait for PQI reset completion for the adapter*/
-int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
+int
+pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
pqi_reset_reg_t reset_reg;
@@ -481,7 +690,7 @@
while(1) {
if (pqi_reset_timeout++ == max_timeout) {
- return PQI_STATUS_TIMEOUT;
+ return PQI_STATUS_TIMEOUT;
}
OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
reset_reg.all_bits = PCI_MEM_GET32(softs,
@@ -496,7 +705,8 @@
/*
* Function used to perform PQI hard reset.
*/
-int pqi_reset(pqisrc_softstate_t *softs)
+int
+pqi_reset(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t val = 0;
@@ -504,8 +714,8 @@
DBG_FUNC("IN\n");
- if (true == softs->ctrl_in_pqi_mode) {
-
+ if (true == softs->ctrl_in_pqi_mode) {
+
if (softs->pqi_reset_quiesce_allowed) {
val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR);
@@ -540,7 +750,8 @@
/*
* Initialize the adapter with supported PQI configuration.
*/
-int pqisrc_pqi_init(pqisrc_softstate_t *softs)
+int
+pqisrc_pqi_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -555,7 +766,7 @@
PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
softs->ctrl_in_pqi_mode = true;
-
+
/* Get the No. of Online CPUs,NUMA/Processor config from OS */
ret = os_get_processor_config(softs);
if (ret) {
@@ -563,8 +774,8 @@
ret);
goto err_out;
}
-
- softs->intr_type = INTR_TYPE_NONE;
+
+ softs->intr_type = INTR_TYPE_NONE;
/* Get the interrupt count, type, priority available from OS */
ret = os_get_intr_config(softs);
@@ -582,16 +793,16 @@
sis_enable_intx(softs);
}
- /* Create Admin Queue pair*/
+ /* Create Admin Queue pair*/
ret = pqisrc_create_admin_queue(softs);
if(ret) {
DBG_ERR("Failed to configure admin queue\n");
goto err_admin_queue;
}
- /* For creating event and IO operational queues we have to submit
- admin IU requests.So Allocate resources for submitting IUs */
-
+ /* For creating event and IO operational queues we have to submit
+ admin IU requests.So Allocate resources for submitting IUs */
+
/* Allocate the request container block (rcb) */
ret = pqisrc_allocate_rcb(softs);
if (ret == PQI_STATUS_FAILURE) {
@@ -631,7 +842,7 @@
err_config_opq:
pqisrc_destroy_taglist(softs,&softs->taglist);
err_taglist:
- pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
+ pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
err_rcb:
pqisrc_destroy_admin_queue(softs);
err_admin_queue:
@@ -641,8 +852,8 @@
return PQI_STATUS_FAILURE;
}
-/* */
-int pqisrc_force_sis(pqisrc_softstate_t *softs)
+int
+pqisrc_force_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -666,7 +877,7 @@
ret = pqi_reset(softs);
if (ret) {
return ret;
- }
+ }
/* Re enable SIS */
ret = pqisrc_reenable_sis(softs);
if (ret) {
@@ -675,27 +886,56 @@
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
- return ret;
+ return ret;
}
-int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
+static int
+pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
{
+ int count = 0;
int ret = PQI_STATUS_SUCCESS;
- int tmo = PQI_CMND_COMPLETE_TMO;
-
- COND_WAIT((softs->taglist.num_elem == softs->max_outstanding_io), tmo);
- if (!tmo) {
- DBG_ERR("Pending commands %x!!!",softs->taglist.num_elem);
- ret = PQI_STATUS_TIMEOUT;
+
+ DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
+
+ if (softs->taglist.num_elem == softs->max_outstanding_io)
+ return ret;
+ else {
+ DBG_WARN("%d commands pending\n",
+ softs->max_outstanding_io - softs->taglist.num_elem);
+
+ while(1) {
+
+ /* Since heartbeat timer stopped ,check for firmware status*/
+ if (SIS_IS_KERNEL_PANIC(softs)) {
+ DBG_ERR("Controller FW is not running\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (softs->taglist.num_elem != softs->max_outstanding_io) {
+ /* Sleep for 1 msec */
+ OS_SLEEP(1000);
+ count++;
+ if(count % 1000 == 0) {
+ DBG_WARN("Waited for %d seconds", count/1000);
+ }
+ if (count >= PQI_QUIESCE_TIMEOUT) {
+ return PQI_STATUS_FAILURE;
+ }
+ continue;
+ }
+ break;
+ }
}
return ret;
}
-void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
+static void
+pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
{
+
int tag = 0;
rcb_t *rcb;
-
+
for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
rcb = &softs->rcb[tag];
if(rcb->req_pending && is_internal_req(rcb)) {
@@ -705,60 +945,66 @@
}
}
+
/*
* Uninitialize the resources used during PQI initialization.
*/
-void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
{
int i, ret;
DBG_FUNC("IN\n");
-
+
/* Wait for any rescan to finish */
pqisrc_wait_for_rescan_complete(softs);
/* Wait for commands to complete */
ret = pqisrc_wait_for_cmnd_complete(softs);
-
+
+ /* disable and free the interrupt resources */
+ os_destroy_intr(softs);
+
/* Complete all pending commands. */
if(ret != PQI_STATUS_SUCCESS) {
pqisrc_complete_internal_cmds(softs);
os_complete_outstanding_cmds_nodevice(softs);
}
- if(softs->devlist_lockcreated==true){
- os_uninit_spinlock(&softs->devlist_lock);
- softs->devlist_lockcreated = false;
- }
-
+ if(softs->devlist_lockcreated==true){
+ os_uninit_spinlock(&softs->devlist_lock);
+ softs->devlist_lockcreated = false;
+ }
+
for (i = 0; i < softs->num_op_raid_ibq; i++) {
- /* OP RAID IB Q */
- if(softs->op_raid_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
- softs->op_raid_ib_q[i].lockcreated = false;
- }
-
- /* OP AIO IB Q */
- if(softs->op_aio_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
- softs->op_aio_ib_q[i].lockcreated = false;
- }
+ /* OP RAID IB Q */
+ if(softs->op_raid_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
+ softs->op_raid_ib_q[i].lockcreated = false;
+ }
+ /* OP AIO IB Q */
+ if(softs->op_aio_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
+ softs->op_aio_ib_q[i].lockcreated = false;
+ }
}
/* Free Op queues */
os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
os_dma_mem_free(softs, &softs->event_q_dma_mem);
-
+
+
+
/* Free rcb */
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
/* Free request id lists */
pqisrc_destroy_taglist(softs,&softs->taglist);
- if(softs->admin_ib_queue.lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
- softs->admin_ib_queue.lockcreated = false;
+ if(softs->admin_ib_queue.lockcreated==true) {
+ OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
+ softs->admin_ib_queue.lockcreated = false;
}
/* Free Admin Queue */
@@ -775,15 +1021,16 @@
/*
* Function to initialize the adapter settings.
*/
-int pqisrc_init(pqisrc_softstate_t *softs)
+int
+pqisrc_init(pqisrc_softstate_t *softs)
{
int ret = 0;
int i = 0, j = 0;
DBG_FUNC("IN\n");
-
+
check_struct_sizes();
-
+
/* Init the Sync interface */
ret = pqisrc_sis_init(softs);
if (ret) {
@@ -817,7 +1064,7 @@
DBG_ERR(" Failed to configure Report events\n");
goto err_event;
}
-
+
/* Set event configuration*/
ret = pqisrc_set_event_config(softs);
if(ret){
@@ -839,7 +1086,7 @@
goto err_host_wellness;
}
-
+
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
if(ret){
@@ -848,23 +1095,16 @@
goto err_lock;
}
softs->devlist_lockcreated = true;
-
- OS_ATOMIC64_SET(softs, num_intrs, 0);
- softs->prev_num_intrs = softs->num_intrs;
-
/* Get the PQI configuration table to read heart-beat counter*/
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
- ret = pqisrc_process_config_table(softs);
- if (ret) {
- DBG_ERR("Failed to process PQI configuration table %d\n", ret);
- goto err_config_tab;
- }
+ ret = pqisrc_process_config_table(softs);
+ if (ret) {
+ DBG_ERR("Failed to process PQI configuration table %d\n", ret);
+ goto err_config_tab;
}
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
- softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
-
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
+
/* Init device list */
for(i = 0; i < PQI_MAX_DEVICES; i++)
for(j = 0; j < PQI_MAX_MULTILUN; j++)
@@ -876,15 +1116,14 @@
return ret;
err_config_tab:
- if(softs->devlist_lockcreated==true){
+ if(softs->devlist_lockcreated==true){
os_uninit_spinlock(&softs->devlist_lock);
softs->devlist_lockcreated = false;
- }
+ }
err_lock:
err_fw_version:
err_event:
err_host_wellness:
- os_destroy_intr(softs);
err_intr:
pqisrc_pqi_uninit(softs);
err_pqi:
@@ -900,7 +1139,8 @@
* Write all data in the adapter's battery-backed cache to
* storage.
*/
-int pqisrc_flush_cache( pqisrc_softstate_t *softs,
+int
+pqisrc_flush_cache( pqisrc_softstate_t *softs,
enum pqisrc_flush_cache_event_type event_type)
{
int rval = PQI_STATUS_SUCCESS;
@@ -912,7 +1152,7 @@
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
- flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
+ flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
if (!flush_buff) {
DBG_ERR("Failed to allocate memory for flush cache params\n");
rval = PQI_STATUS_FAILURE;
@@ -942,17 +1182,16 @@
/*
* Uninitialize the adapter.
*/
-void pqisrc_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
-
+
pqisrc_pqi_uninit(softs);
pqisrc_sis_uninit(softs);
os_destroy_semaphore(&softs->scan_lock);
-
- os_destroy_intr(softs);
pqisrc_cleanup_devices(softs);
diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c
--- a/sys/dev/smartpqi/smartpqi_intr.c
+++ b/sys/dev/smartpqi/smartpqi_intr.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,34 +27,31 @@
#include "smartpqi_includes.h"
-
/*
* Function to get processor count
*/
-int os_get_processor_config(pqisrc_softstate_t *softs)
+int
+os_get_processor_config(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
DBG_FUNC("OUT\n");
-
+
return PQI_STATUS_SUCCESS;
}
/*
* Function to get interrupt count and type supported
*/
-int os_get_intr_config(pqisrc_softstate_t *softs)
+int
+os_get_intr_config(pqisrc_softstate_t *softs)
{
- device_t dev;
- int msi_count = 0;
- int error = 0;
- int ret = PQI_STATUS_SUCCESS;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
+ int msi_count = pci_msix_count(dev);
+ int error = BSD_SUCCESS;
DBG_FUNC("IN\n");
- msi_count = pci_msix_count(dev);
-
if (msi_count > softs->num_cpus_online)
msi_count = softs->num_cpus_online;
if (msi_count > PQI_MAX_MSIX)
@@ -91,21 +87,21 @@
softs->intr_count = 1;
}
- if(!softs->intr_type) {
- DBG_FUNC("OUT failed\n");
- ret = PQI_STATUS_FAILURE;
- return ret;
- }
DBG_FUNC("OUT\n");
- return ret;
+
+ error = bsd_status_to_pqi_status(BSD_SUCCESS);
+
+ return error;
}
-void os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
+void
+os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
{
taskqueue_enqueue(taskqueue_swi, &sc->os_specific.event_task);
}
-void pqisrc_event_worker(void *arg1, int arg2)
+void
+pqisrc_event_worker(void *arg1, int arg2)
{
pqisrc_ack_all_events(arg1);
}
@@ -113,14 +109,18 @@
/*
* ithread routine to handle uniprocessor systems
*/
-static void shared_ithread_routine(void *arg)
+static void
+shared_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
int oq_id = intr_ctx->oq_id;
-
+
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return;
+
pqisrc_process_response_queue(softs, oq_id);
pqisrc_process_event_intr_src(softs, oq_id - 1);
@@ -130,20 +130,25 @@
/*
* ithread routine to process non event response
*/
-static void common_ithread_routine(void *arg)
+static void
+common_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
int oq_id = intr_ctx->oq_id;
DBG_FUNC("IN\n");
-
+
+ if (softs == NULL)
+ return;
+
pqisrc_process_response_queue(softs, oq_id);
DBG_FUNC("OUT\n");
}
-static void event_ithread_routine(void *arg)
+static void
+event_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
@@ -151,6 +156,9 @@
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return;
+
pqisrc_process_event_intr_src(softs, oq_id);
DBG_FUNC("OUT\n");
@@ -159,34 +167,34 @@
/*
* Registration of legacy interrupt in case MSI is unsupported
*/
-int register_legacy_intr(pqisrc_softstate_t *softs)
+int
+register_legacy_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
- device_t dev;
+ int error = BSD_SUCCESS;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
- dev = softs->os_specific.pqi_dev;
-
softs->os_specific.pqi_irq_rid[0] = 0;
softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
RF_ACTIVE | RF_SHAREABLE);
if (NULL == softs->os_specific.pqi_irq[0]) {
DBG_ERR("Failed to allocate resource for interrupt\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
if ((softs->os_specific.msi_ctx = os_mem_alloc(softs,sizeof(pqi_intr_ctx_t))) == NULL) {
DBG_ERR("Failed to allocate memory for msi_ctx\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.msi_ctx[0].pqi_dev = dev;
+ /* For Legacy support oq_id should be one */
softs->os_specific.msi_ctx[0].oq_id = 1;
error = bus_setup_intr(dev, softs->os_specific.pqi_irq[0],
INTR_TYPE_CAM | INTR_MPSAFE, \
NULL, shared_ithread_routine,
- &softs->os_specific.msi_ctx[0],
+ &softs->os_specific.msi_ctx[0],
&softs->os_specific.intrcookie[0]);
if (error) {
DBG_ERR("Failed to setup legacy interrupt err = %d\n", error);
@@ -200,19 +208,24 @@
}
/*
- * Registration of MSIx
+ * Registration of MSIx
*/
-int register_msix_intr(pqisrc_softstate_t *softs)
+int
+register_msix_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
+ int error = BSD_SUCCESS;
int i = 0;
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
DBG_FUNC("IN\n");
softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
+ if (!softs->os_specific.msi_ctx) {
+ DBG_ERR("Memory allocation failed\n");
+ return ENXIO;
+ }
+
/*Add shared handler */
if (softs->share_opq_and_eventq) {
softs->os_specific.pqi_irq_rid[i] = i+1;
@@ -223,12 +236,12 @@
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
event interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
-
+
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i+1;
-
+
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
NULL,
@@ -237,7 +250,7 @@
&softs->os_specific.intrcookie[i]);
if (error) {
- DBG_ERR("Failed to setup interrupt for events r=%d\n",
+ DBG_ERR("Failed to setup interrupt for events r=%d\n",
error);
return error;
}
@@ -251,15 +264,12 @@
&softs->os_specific.pqi_irq_rid[i],
RF_SHAREABLE | RF_ACTIVE);
if (NULL == softs->os_specific.pqi_irq[i]) {
- DBG_ERR("ERR : Failed to allocate \
- event interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ DBG_ERR("Failed to allocate event interrupt resource\n");
+ return ENXIO;
}
-
-
+
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
-
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
@@ -284,7 +294,7 @@
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
msi/x interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
@@ -312,38 +322,40 @@
/*
* Setup interrupt depending on the configuration
*/
-int os_setup_intr(pqisrc_softstate_t *softs)
+int
+os_setup_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
+ int bsd_status, pqi_status;
DBG_FUNC("IN\n");
if (softs->intr_type == INTR_TYPE_FIXED) {
- error = register_legacy_intr(softs);
+ bsd_status = register_legacy_intr(softs);
}
else {
- error = register_msix_intr(softs);
- }
- if (error) {
- DBG_FUNC("OUT failed error = %d\n", error);
- return error;
+ bsd_status = register_msix_intr(softs);
}
- DBG_FUNC("OUT error = %d\n", error);
+ if(bsd_status)
+ DBG_WARN("interrupt registration is failed, error = %d\n", bsd_status);
- return error;
+ pqi_status = bsd_status_to_pqi_status(bsd_status);
+
+ DBG_FUNC("OUT\n");
+
+ return pqi_status;
}
/*
* Deregistration of legacy interrupt
*/
-void deregister_pqi_intx(pqisrc_softstate_t *softs)
+void
+deregister_pqi_intx(pqisrc_softstate_t *softs)
{
- device_t dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
- dev = softs->os_specific.pqi_dev;
if (softs->os_specific.pqi_irq[0] != NULL) {
if (softs->os_specific.intr_registered[0]) {
bus_teardown_intr(dev, softs->os_specific.pqi_irq[0],
@@ -363,15 +375,15 @@
/*
* Deregistration of MSIx interrupt
*/
-void deregister_pqi_msix(pqisrc_softstate_t *softs)
+void
+deregister_pqi_msix(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
int i = 0;
DBG_FUNC("IN\n");
-
+
os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t) * msix_count);
softs->os_specific.msi_ctx = NULL;
@@ -396,10 +408,10 @@
/*
* Function to destroy interrupts registered
*/
-int os_destroy_intr(pqisrc_softstate_t *softs)
+int
+os_destroy_intr(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
@@ -421,10 +433,10 @@
/*
* Free interrupt related resources for the adapter
*/
-void os_free_intr_config(pqisrc_softstate_t *softs)
+void
+os_free_intr_config(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.h b/sys/dev/smartpqi/smartpqi_ioctl.h
--- a/sys/dev/smartpqi/smartpqi_ioctl.h
+++ b/sys/dev/smartpqi/smartpqi_ioctl.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +31,7 @@
/* IOCTL passthrough macros and structures */
#define SENSEINFOBYTES 32 /* note that this value may vary
- between host implementations */
+ between host implementations */
/* transfer direction */
#define PQIIOCTL_NONE 0x00
@@ -48,7 +47,6 @@
#define DWORD uint32_t
-
/* Command List Structure */
typedef union _SCSI3Addr_struct {
struct {
@@ -67,7 +65,7 @@
BYTE Targ:6;
BYTE Mode:2; /* b10 */
} LogUnit;
-
+
}OS_ATTRIBUTE_PACKED SCSI3Addr_struct;
typedef struct _PhysDevAddr_struct {
@@ -75,14 +73,14 @@
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; /* 2 level target device addr */
-
+
}OS_ATTRIBUTE_PACKED PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
-
+
}OS_ATTRIBUTE_PACKED LogDevAddr_struct;
typedef union _LUNAddr_struct {
@@ -94,7 +92,7 @@
}OS_ATTRIBUTE_PACKED LUNAddr_struct;
typedef struct _RequestBlock_struct {
- BYTE CDBLen;
+ BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
@@ -103,13 +101,13 @@
HWORD Timeout;
BYTE CDB[16];
-}OS_ATTRIBUTE_PACKED RequestBlock_struct;
+}OS_ATTRIBUTE_PACKED RequestBlock_struct;
typedef union _MoreErrInfo_struct{
struct {
- BYTE Reserved[3];
- BYTE Type;
- DWORD ErrorInfo;
+ BYTE Reserved[3];
+ BYTE Type;
+ DWORD ErrorInfo;
} Common_Info;
struct{
BYTE Reserved[2];
@@ -134,7 +132,7 @@
typedef struct pqi_ioctl_passthruCmd_struct {
LUNAddr_struct LUN_info;
RequestBlock_struct Request;
- ErrorInfo_struct error_info;
+ ErrorInfo_struct error_info;
WORD buf_size; /* size in bytes of the buf */
passthru_buf_type_t buf;
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c
--- a/sys/dev/smartpqi/smartpqi_ioctl.c
+++ b/sys/dev/smartpqi/smartpqi_ioctl.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,7 +34,8 @@
/*
* Wrapper function to copy to user from kernel
*/
-int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
+int
+os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyout(src_buf, dest_buf, size));
@@ -44,7 +44,8 @@
/*
* Wrapper function to copy from user to kernel
*/
-int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
+int
+os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyin(src_buf, dest_buf, size));
@@ -53,39 +54,38 @@
/*
* Device open function for ioctl entry
*/
-static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
+static int
+smartpqi_open(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
-
- return error;
+ return BSD_SUCCESS;
}
/*
- * Device close function for ioctl entry
+ * Device close function for ioctl entry
*/
-static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
+static int
+smartpqi_close(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
-
- return error;
+ return BSD_SUCCESS;
}
/*
* ioctl for getting driver info
*/
-static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
+static void
+smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
pdriver_info driver_info = (pdriver_info)udata;
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
- driver_info->major_version = PQISRC_DRIVER_MAJOR;
- driver_info->minor_version = PQISRC_DRIVER_MINOR;
- driver_info->release_version = PQISRC_DRIVER_RELEASE;
- driver_info->build_revision = PQISRC_DRIVER_REVISION;
+ driver_info->major_version = PQISRC_OS_VERSION;
+ driver_info->minor_version = PQISRC_FEATURE_VERSION;
+ driver_info->release_version = PQISRC_PATCH_VERSION;
+ driver_info->build_revision = PQISRC_BUILD_VERSION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
driver_info->max_io = softs->max_io_for_scsi_ml;
driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
@@ -96,7 +96,8 @@
/*
* ioctl for getting controller info
*/
-static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
+static void
+smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
device_t dev = softs->os_specific.pqi_dev;
@@ -120,50 +121,62 @@
DBG_FUNC("OUT\n");
}
+static inline int
+pqi_status_to_bsd_ioctl_status(int pqi_status)
+{
+ if (PQI_STATUS_SUCCESS == pqi_status)
+ return BSD_SUCCESS;
+ else
+ return EIO;
+}
/*
* ioctl entry point for user
*/
-static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
+static int
+smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
int flags, struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
+ int bsd_status, pqi_status;
struct pqisrc_softstate *softs = cdev->si_drv1;
DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
if (!udata) {
DBG_ERR("udata is null !!\n");
+ return EINVAL;
}
if (pqisrc_ctrl_offline(softs)){
- DBG_ERR("Controller s offline !!\n");
return ENOTTY;
}
switch (cmd) {
case CCISS_GETDRIVVER:
smartpqi_get_driver_info_ioctl(udata, cdev);
+ bsd_status = BSD_SUCCESS;
break;
case CCISS_GETPCIINFO:
smartpqi_get_pci_info_ioctl(udata, cdev);
+ bsd_status = BSD_SUCCESS;
break;
case SMARTPQI_PASS_THRU:
case CCISS_PASSTHRU:
- error = pqisrc_passthru_ioctl(softs, udata, 0);
- error = PQI_STATUS_SUCCESS;
+ pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
+ bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
break;
case CCISS_REGNEWD:
- error = pqisrc_scan_devices(softs);
+ pqi_status = pqisrc_scan_devices(softs);
+ bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
break;
default:
- DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
- error = ENOTTY;
+ DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
+ bsd_status = ENOTTY;
break;
}
- DBG_FUNC("OUT error = %d\n", error);
- return error;
+ DBG_FUNC("OUT error = %d\n", bsd_status);
+ return bsd_status;
}
static struct cdevsw smartpqi_cdevsw =
@@ -178,9 +191,10 @@
/*
* Function to create device node for ioctl
*/
-int create_char_dev(struct pqisrc_softstate *softs, int card_index)
+int
+create_char_dev(struct pqisrc_softstate *softs, int card_index)
{
- int error = PQI_STATUS_SUCCESS;
+ int error = BSD_SUCCESS;
DBG_FUNC("IN idx = %d\n", card_index);
@@ -190,17 +204,19 @@
if(softs->os_specific.cdev) {
softs->os_specific.cdev->si_drv1 = softs;
} else {
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
}
DBG_FUNC("OUT error = %d\n", error);
+
return error;
}
/*
* Function to destroy device node for ioctl
*/
-void destroy_char_dev(struct pqisrc_softstate *softs)
+void
+destroy_char_dev(struct pqisrc_softstate *softs)
{
DBG_FUNC("IN\n");
if (softs->os_specific.cdev) {
@@ -230,16 +246,16 @@
memset(&request, 0, sizeof(request));
memset(&error_info, 0, sizeof(error_info));
-
+
DBG_FUNC("IN");
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
-
+
if (!arg)
return (PQI_STATUS_FAILURE);
- if (iocommand->buf_size < 1 &&
+ if (iocommand->buf_size < 1 &&
iocommand->Request.Type.Direction != PQIIOCTL_NONE)
return PQI_STATUS_FAILURE;
if (iocommand->Request.CDBLen > sizeof(request.cdb))
@@ -267,28 +283,29 @@
ret = PQI_STATUS_FAILURE;
goto out;
}
-
+
DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
-
+
drv_buf = (char *)ioctl_dma_buf.virt_addr;
if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
- if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
- iocommand->buf_size, mode)) != 0) {
+ if ((ret = os_copy_from_user(softs, (void *)drv_buf,
+ (void *)iocommand->buf,
+ iocommand->buf_size, mode)) != 0) {
ret = PQI_STATUS_FAILURE;
goto free_mem;
}
}
}
-
+
request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
- request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
+ request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
PQI_REQUEST_HEADER_LENGTH;
- memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
+ memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
sizeof(request.lun_number));
memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
-
+
switch (iocommand->Request.Type.Direction) {
case PQIIOCTL_NONE:
request.data_direction = SOP_DATA_DIR_NONE;
@@ -320,8 +337,11 @@
request.request_id = tag;
request.response_queue_id = ob_q->q_id;
request.error_index = request.request_id;
- rcb = &softs->rcb[tag];
+ if (softs->timeout_in_passthrough) {
+ request.timeout_in_sec = iocommand->Request.Timeout;
+ }
+ rcb = &softs->rcb[tag];
rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
rcb->tag = tag;
@@ -333,7 +353,8 @@
goto err_out;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb,
+ PQISRC_PASSTHROUGH_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Passthru IOCTL cmd timed out !!\n");
goto err_out;
@@ -352,7 +373,7 @@
if (!sense_data_length)
sense_data_length = error_info.resp_data_len;
- if (sense_data_length &&
+ if (sense_data_length &&
(sense_data_length > sizeof(error_info.data)))
sense_data_length = sizeof(error_info.data);
@@ -366,23 +387,23 @@
iocommand->error_info.SenseLen = sense_data_length;
}
- if (error_info.data_out_result ==
+ if (error_info.data_out_result ==
PQI_RAID_DATA_IN_OUT_UNDERFLOW){
rcb->status = REQUEST_SUCCESS;
}
}
- if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
+ if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
- if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
+ if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
- DBG_ERR("Failed to copy the response\n");
+ DBG_ERR("Failed to copy the response\n");
goto err_out;
}
}
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
if (iocommand->buf_size > 0)
os_dma_mem_free(softs,&ioctl_dma_buf);
@@ -390,7 +411,7 @@
DBG_FUNC("OUT\n");
return ret;
err_out:
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
free_mem:
diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c
--- a/sys/dev/smartpqi/smartpqi_main.c
+++ b/sys/dev/smartpqi/smartpqi_main.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,6 +32,8 @@
#include "smartpqi_includes.h"
#include "smartpqi_prototypes.h"
+CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS);
+
/*
* Supported devices
*/
@@ -46,55 +47,69 @@
char *desc;
} pqi_identifiers[] = {
/* (MSCC PM8205 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
{0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
- {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
- {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
+ {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
+ {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
/* (MSCC PM8225 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
/* (MSCC PM8221 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+ {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
+ {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
+
/* (MSCC PM8204 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
- {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
- {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
- {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
- {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
- {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
- {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
- {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
+ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
+ {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
+ {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
+ {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
+ {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
+ {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
+ {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
+ {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
{0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
{0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
{0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
- {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR RAID PM8204-2GB"},
- {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR RAID PM8204-4GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"},
+ {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
+ {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
+ {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
+ {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
+ {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
/* (MSCC PM8222 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
- {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
- {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
- {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
- {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
- {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
- {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
- {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
- {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
- {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
+ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
+ {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
+ {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
+ {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
{0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"},
{0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
- {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR SMART-HBA PM8222-SHBA"},
+ {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
+ {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
+ {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"},
{0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"},
+ {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
/* (SRCx MSCC FVB 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
@@ -114,6 +129,7 @@
{0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
{0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
/* (MSCC PM8237 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
@@ -126,6 +142,8 @@
{0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"},
{0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
+ {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
/* (MSCC PM8240 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
@@ -134,6 +152,14 @@
{0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
{0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"},
+ /* Huawei ID's */
+ {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
+
{0, 0, 0, 0, 0, 0}
};
@@ -146,8 +172,8 @@
/*
* Function to identify the installed adapter.
*/
-static struct pqi_ident *
-pqi_find_ident(device_t dev)
+static struct
+pqi_ident *pqi_find_ident(device_t dev)
{
struct pqi_ident *m;
u_int16_t vendid, devid, sub_vendid, sub_devid;
@@ -183,7 +209,7 @@
struct pqi_ident *id;
if ((id = pqi_find_ident(dev)) != NULL) {
- device_set_desc(dev, id->desc);
+ device_set_desc(dev, id->desc);
return(BUS_PROBE_VENDOR);
}
@@ -193,13 +219,14 @@
/*
* Store Bus/Device/Function in softs
*/
-void pqisrc_save_controller_info(struct pqisrc_softstate *softs)
+void
+pqisrc_save_controller_info(struct pqisrc_softstate *softs)
{
device_t dev = softs->os_specific.pqi_dev;
softs->bus_id = (uint32_t)pci_get_bus(dev);
softs->device_id = (uint32_t)pci_get_device(dev);
- softs->func_id = (uint32_t)pci_get_function(dev);
+ softs->func_id = (uint32_t)pci_get_function(dev);
}
@@ -213,7 +240,7 @@
{
struct pqisrc_softstate *softs = NULL;
struct pqi_ident *id = NULL;
- int error = 0;
+ int error = BSD_SUCCESS;
u_int32_t command = 0, i = 0;
int card_index = device_get_unit(dev);
rcb_t *rcbp = NULL;
@@ -236,7 +263,7 @@
/* assume failure is 'not configured' */
error = ENXIO;
- /*
+ /*
* Verify that the adapter is correctly set up in PCI space.
*/
pci_enable_busmaster(softs->os_specific.pqi_dev);
@@ -247,16 +274,21 @@
goto out;
}
- /*
+ /*
* Detect the hardware interface version, set up the bus interface
* indirection.
*/
id = pqi_find_ident(dev);
+ if (!id) {
+ DBG_ERR("NULL return value from pqi_find_ident\n");
+ goto out;
+ }
+
softs->os_specific.pqi_hwif = id->hwif;
switch(softs->os_specific.pqi_hwif) {
case PQI_HWIF_SRCV:
- DBG_INFO("set hardware up for PMC SRCv for %p", softs);
+ DBG_INFO("set hardware up for PMC SRCv for %p\n", softs);
break;
default:
softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN;
@@ -290,7 +322,7 @@
/*
* Allocate the parent bus DMA tag appropriate for our PCI interface.
- *
+ *
* Note that some of these controllers are 64-bit capable.
*/
if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
@@ -312,28 +344,34 @@
softs->os_specific.sim_registered = FALSE;
softs->os_name = "FreeBSD ";
-
+
/* Initialize the PQI library */
error = pqisrc_init(softs);
- if (error) {
+ if (error != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to initialize pqi lib error = %d\n", error);
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
goto out;
}
+ else {
+ error = BSD_SUCCESS;
+ }
+
+ mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
+ softs->os_specific.mtx_init = TRUE;
+ mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
- mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
- softs->os_specific.mtx_init = TRUE;
- mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
+ callout_init(&softs->os_specific.wellness_periodic, 1);
+ callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
- /*
- * Create DMA tag for mapping buffers into controller-addressable space.
- */
- if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
- 1, 0, /* algnmnt, boundary */
+ /*
+ * Create DMA tag for mapping buffers into controller-addressable space.
+ */
+ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
+ PAGE_SIZE, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- softs->pqi_cap.max_sg_elem*PAGE_SIZE,/*maxsize*/
+ (bus_size_t)softs->pqi_cap.max_sg_elem*PAGE_SIZE,/* maxsize */
softs->pqi_cap.max_sg_elem, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
@@ -348,40 +386,37 @@
for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
DBG_ERR("Cant create datamap for buf @"
- "rcbp = %p maxio = %d error = %d\n",
+ "rcbp = %p maxio = %d error = %d\n",
rcbp, softs->pqi_cap.max_outstanding_io, error);
goto dma_out;
}
}
os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */
- softs->os_specific.wellness_periodic = timeout( os_wellness_periodic,
- softs, 120*hz);
- /* Register our shutdown handler. */
- softs->os_specific.eh = EVENTHANDLER_REGISTER(shutdown_final,
- smartpqi_shutdown, softs, SHUTDOWN_PRI_DEFAULT);
+ callout_reset(&softs->os_specific.wellness_periodic, 120 * hz,
+ os_wellness_periodic, softs);
error = pqisrc_scan_devices(softs);
- if (error) {
+ if (error != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to scan lib error = %d\n", error);
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
goto out;
}
error = register_sim(softs, card_index);
if (error) {
- DBG_ERR("Failed to register sim index = %d error = %d\n",
+ DBG_ERR("Failed to register sim index = %d error = %d\n",
card_index, error);
goto out;
}
- smartpqi_target_rescan(softs);
+ smartpqi_target_rescan(softs);
TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs);
error = create_char_dev(softs, card_index);
if (error) {
- DBG_ERR("Failed to register character device index=%d r=%d\n",
+ DBG_ERR("Failed to register character device index=%d r=%d\n",
card_index, error);
goto out;
}
@@ -390,7 +425,7 @@
dma_out:
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
- softs->os_specific.pqi_regs_rid0,
+ softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
out:
DBG_FUNC("OUT error = %d\n", error);
@@ -403,27 +438,35 @@
static int
smartpqi_detach(device_t dev)
{
- struct pqisrc_softstate *softs = NULL;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+ int rval = BSD_SUCCESS;
+
DBG_FUNC("IN\n");
- EVENTHANDLER_DEREGISTER(shutdown_final, softs->os_specific.eh);
+ if (softs == NULL)
+ return ENXIO;
/* kill the periodic event */
- untimeout(os_wellness_periodic, softs,
- softs->os_specific.wellness_periodic);
+ callout_drain(&softs->os_specific.wellness_periodic);
/* Kill the heart beat event */
- untimeout(os_start_heartbeat_timer, softs,
- softs->os_specific.heartbeat_timeout_id);
+ callout_drain(&softs->os_specific.heartbeat_timeout_id);
+
+ if (!pqisrc_ctrl_offline(softs)) {
+ rval = pqisrc_flush_cache(softs, PQISRC_NONE_CACHE_FLUSH_ONLY);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval);
+ rval = EIO;
+ }
+ }
- smartpqi_shutdown(softs);
destroy_char_dev(softs);
pqisrc_uninit(softs);
deregister_sim(softs);
pci_release_msi(dev);
-
+
DBG_FUNC("OUT\n");
- return 0;
+
+ return rval;
}
/*
@@ -432,15 +475,19 @@
static int
smartpqi_suspend(device_t dev)
{
- struct pqisrc_softstate *softs;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return ENXIO;
+
DBG_INFO("Suspending the device %p\n", softs);
softs->os_specific.pqi_state |= SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
- return(0);
+
+ return BSD_SUCCESS;
}
/*
@@ -449,37 +496,47 @@
static int
smartpqi_resume(device_t dev)
{
- struct pqisrc_softstate *softs;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return ENXIO;
+
softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
- return(0);
+
+ return BSD_SUCCESS;
}
/*
* Do whatever is needed during a system shutdown.
*/
-int
-smartpqi_shutdown(void *arg)
+static int
+smartpqi_shutdown(device_t dev)
{
- struct pqisrc_softstate *softs = NULL;
- int rval = 0;
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+ int bsd_status = BSD_SUCCESS;
+ int pqi_status;
DBG_FUNC("IN\n");
- softs = (struct pqisrc_softstate *)arg;
+ if (softs == NULL)
+ return ENXIO;
+
+ if (pqisrc_ctrl_offline(softs))
+ return BSD_SUCCESS;
- rval = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
- if (rval != PQI_STATUS_SUCCESS) {
- DBG_ERR("Unable to flush adapter cache! rval = %d", rval);
+ pqi_status = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
+ if (pqi_status != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to flush adapter cache! rval = %d\n", pqi_status);
+ bsd_status = EIO;
}
DBG_FUNC("OUT\n");
- return rval;
+ return bsd_status;
}
/*
@@ -492,10 +549,12 @@
DEVMETHOD(device_detach, smartpqi_detach),
DEVMETHOD(device_suspend, smartpqi_suspend),
DEVMETHOD(device_resume, smartpqi_resume),
+ DEVMETHOD(device_shutdown, smartpqi_shutdown),
{ 0, 0 }
};
-static devclass_t pqi_devclass;
+static devclass_t pqi_devclass;
+
static driver_t smartpqi_pci_driver = {
"smartpqi",
pqi_methods,
diff --git a/sys/dev/smartpqi/smartpqi_mem.c b/sys/dev/smartpqi/smartpqi_mem.c
--- a/sys/dev/smartpqi/smartpqi_mem.c
+++ b/sys/dev/smartpqi/smartpqi_mem.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +27,7 @@
#include "smartpqi_includes.h"
-MALLOC_DEFINE(M_SMARTRAID, "smartraidbuf", "Buffers for the smartraid driver");
+MALLOC_DEFINE(M_SMARTPQI, "smartpqi", "Buffers for the smartpqi driver");
/*
* DMA map load callback function
@@ -40,15 +39,42 @@
*paddr = segs[0].ds_addr;
}
+int
+os_dma_setup(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+int
+os_dma_destroy(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+void
+os_update_dma_attributes(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+}
+
/*
* DMA mem resource allocation wrapper function
*/
-int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+int
+os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
- int ret = 0;
+ int ret = BSD_SUCCESS;
/* DBG_FUNC("IN\n"); */
+ /* Make sure the alignment is at least 4 bytes */
+ ASSERT(dma_mem->align >= 4);
+
/* DMA memory needed - allocate it */
if ((ret = bus_dma_tag_create(
softs->os_specific.pqi_parent_dmat, /* parent */
@@ -65,14 +91,21 @@
DBG_ERR("can't allocate DMA tag with error = 0x%x\n", ret);
goto err_out;
}
+
+ if (!dma_mem->dma_tag) {
+ DBG_ERR("dma tag is NULL\n");
+ ret = ENOMEM;
+ goto err_out;
+ }
+
if ((ret = bus_dmamem_alloc(dma_mem->dma_tag, (void **)&dma_mem->virt_addr,
- BUS_DMA_NOWAIT, &dma_mem->dma_map)) != 0) {
+ BUS_DMA_WAITOK, &dma_mem->dma_map)) != 0) {
DBG_ERR("can't allocate DMA memory for required object \
with error = 0x%x\n", ret);
goto err_mem;
}
- if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
+ if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
dma_mem->virt_addr, dma_mem->size,
os_dma_map, &dma_mem->dma_addr, 0)) != 0) {
DBG_ERR("can't load DMA memory for required \
@@ -82,25 +115,31 @@
memset(dma_mem->virt_addr, 0, dma_mem->size);
+ ret = bsd_status_to_pqi_status(ret);
+
/* DBG_FUNC("OUT\n"); */
return ret;
err_load:
if(dma_mem->virt_addr)
- bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
+ bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
dma_mem->dma_map);
err_mem:
if(dma_mem->dma_tag)
bus_dma_tag_destroy(dma_mem->dma_tag);
err_out:
DBG_FUNC("failed OUT\n");
+
+ ret = bsd_status_to_pqi_status(ret);
+
return ret;
}
/*
* DMA mem resource deallocation wrapper function
*/
-void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+void
+os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
/* DBG_FUNC("IN\n"); */
@@ -127,16 +166,17 @@
/*
* Mem resource allocation wrapper function
*/
-void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
+void
+*os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
{
void *addr = NULL;
- /* DBG_FUNC("IN\n"); */
+ /* DBG_FUNC("IN\n"); */
- addr = malloc((unsigned long)size, M_SMARTRAID,
+ addr = malloc((unsigned long)size, M_SMARTPQI,
M_NOWAIT | M_ZERO);
-/* DBG_FUNC("OUT\n"); */
+ /* DBG_FUNC("OUT\n"); */
return addr;
}
@@ -144,12 +184,12 @@
/*
* Mem resource deallocation wrapper function
*/
-void os_mem_free(pqisrc_softstate_t *softs,
- char *addr, size_t size)
+void
+os_mem_free(pqisrc_softstate_t *softs, char *addr, size_t size)
{
/* DBG_FUNC("IN\n"); */
- free((void*)addr, M_SMARTRAID);
+ free((void*)addr, M_SMARTPQI);
/* DBG_FUNC("OUT\n"); */
}
@@ -157,14 +197,15 @@
/*
* dma/bus resource deallocation wrapper function
*/
-void os_resource_free(pqisrc_softstate_t *softs)
+void
+os_resource_free(pqisrc_softstate_t *softs)
{
if(softs->os_specific.pqi_parent_dmat)
bus_dma_tag_destroy(softs->os_specific.pqi_parent_dmat);
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev,
- SYS_RES_MEMORY,
+ SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
}
diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c
--- a/sys/dev/smartpqi/smartpqi_misc.c
+++ b/sys/dev/smartpqi/smartpqi_misc.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,9 +28,10 @@
#include "smartpqi_includes.h"
/*
- * Populate hostwell time variables in bcd format from FreeBSD format
+ * Populate hostwellness time variables in bcd format from FreeBSD format
*/
-void os_get_time(struct bmic_host_wellness_time *host_wellness_time)
+void
+os_get_time(struct bmic_host_wellness_time *host_wellness_time)
{
struct timespec ts;
struct clocktime ct;
@@ -39,7 +39,6 @@
getnanotime(&ts);
clock_ts_to_ct(&ts, &ct);
-
/* Fill the time In BCD Format */
host_wellness_time->hour= (uint8_t)bin2bcd(ct.hour);
host_wellness_time->min = (uint8_t)bin2bcd(ct.min);
@@ -50,18 +49,18 @@
host_wellness_time->century = (uint8_t)bin2bcd(ct.year / 100);
host_wellness_time->year = (uint8_t)bin2bcd(ct.year % 100);
-}
+}
/*
* Update host time to f/w every 24 hours in a periodic timer.
*/
-void os_wellness_periodic(void *data)
+void
+os_wellness_periodic(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
int ret = 0;
-
/* update time to FW */
if (!pqisrc_ctrl_offline(softs)){
if( (ret = pqisrc_write_current_time_to_host_wellness(softs)) != 0 )
@@ -69,20 +68,20 @@
}
/* reschedule ourselves */
- softs->os_specific.wellness_periodic = timeout(os_wellness_periodic,
- softs, OS_HOST_WELLNESS_TIMEOUT * hz);
+ callout_reset(&softs->os_specific.wellness_periodic,
+ PQI_HOST_WELLNESS_TIMEOUT_SEC * hz, os_wellness_periodic, softs);
}
/*
* Routine used to stop the heart-beat timer
*/
-void os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
+void
+os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
/* Kill the heart beat event */
- untimeout(os_start_heartbeat_timer, softs,
- softs->os_specific.heartbeat_timeout_id);
+ callout_stop(&softs->os_specific.heartbeat_timeout_id);
DBG_FUNC("OUT\n");
}
@@ -90,16 +89,17 @@
/*
* Routine used to start the heart-beat timer
*/
-void os_start_heartbeat_timer(void *data)
+void
+os_start_heartbeat_timer(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
DBG_FUNC("IN\n");
pqisrc_heartbeat_timer_handler(softs);
if (!pqisrc_ctrl_offline(softs)) {
- softs->os_specific.heartbeat_timeout_id =
- timeout(os_start_heartbeat_timer, softs,
- OS_FW_HEARTBEAT_TIMER_INTERVAL * hz);
+ callout_reset(&softs->os_specific.heartbeat_timeout_id,
+ PQI_HEARTBEAT_TIMEOUT_SEC * hz,
+ os_start_heartbeat_timer, softs);
}
DBG_FUNC("OUT\n");
@@ -108,48 +108,49 @@
/*
* Mutex initialization function
*/
-int os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
+int
+os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
char *lockname)
{
- mtx_init(lock, lockname, NULL, MTX_SPIN);
- return 0;
-
+ mtx_init(lock, lockname, NULL, MTX_SPIN);
+ return 0;
}
/*
* Mutex uninitialization function
*/
-void os_uninit_spinlock(struct mtx *lock)
+void
+os_uninit_spinlock(struct mtx *lock)
{
- mtx_destroy(lock);
- return;
-
+ mtx_destroy(lock);
+ return;
}
/*
* Semaphore initialization function
*/
-int os_create_semaphore(const char *name, int value, struct sema *sema)
+int
+os_create_semaphore(const char *name, int value, struct sema *sema)
{
- sema_init(sema, value, name);
- return PQI_STATUS_SUCCESS;
-
+ sema_init(sema, value, name);
+ return PQI_STATUS_SUCCESS;
}
/*
* Semaphore uninitialization function
*/
-int os_destroy_semaphore(struct sema *sema)
+int
+os_destroy_semaphore(struct sema *sema)
{
- sema_destroy(sema);
- return PQI_STATUS_SUCCESS;
-
+ sema_destroy(sema);
+ return PQI_STATUS_SUCCESS;
}
/*
* Semaphore grab function
*/
-void inline os_sema_lock(struct sema *sema)
+void inline
+os_sema_lock(struct sema *sema)
{
sema_post(sema);
}
@@ -157,16 +158,26 @@
/*
* Semaphore release function
*/
-void inline os_sema_unlock(struct sema *sema)
+void inline
+os_sema_unlock(struct sema *sema)
{
sema_wait(sema);
}
-
/*
* string copy wrapper function
*/
-int os_strlcpy(char *dst, char *src, int size)
+int
+os_strlcpy(char *dst, char *src, int size)
{
return strlcpy(dst, src, size);
}
+
+int
+bsd_status_to_pqi_status(int bsd_status)
+{
+ if (bsd_status == BSD_SUCCESS)
+ return PQI_STATUS_SUCCESS;
+ else
+ return PQI_STATUS_FAILURE;
+}
diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h
--- a/sys/dev/smartpqi/smartpqi_prototypes.h
+++ b/sys/dev/smartpqi/smartpqi_prototypes.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,8 +37,8 @@
int pqisrc_process_config_table(pqisrc_softstate_t *);
int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
-int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *);
-void pqisrc_complete_internal_cmds(pqisrc_softstate_t *);
+inline boolean_t pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *,
+ struct pqi_conf_table_firmware_features *, uint16_t );
/* pqi_sis.c*/
int pqisrc_sis_init(pqisrc_softstate_t *);
@@ -53,6 +52,7 @@
int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t);
void sis_disable_interrupt(pqisrc_softstate_t*);
+
/* pqi_queue.c */
int pqisrc_submit_admin_req(pqisrc_softstate_t *,
gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
@@ -90,11 +90,16 @@
void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs);
int pqisrc_alloc_tid(pqisrc_softstate_t *softs);
void pqisrc_free_tid(pqisrc_softstate_t *softs, int);
+int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+ reportlun_data_ext_t **buff, size_t *data_length);
+int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
+ uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len);
/* pqi_helper.c */
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
-int pqisrc_wait_on_condition(pqisrc_softstate_t *, rcb_t *);
+int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
+ uint32_t timeout);
boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_hba_lunid(uint8_t *);
boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *);
@@ -104,6 +109,14 @@
void check_struct_sizes(void);
char *pqisrc_raidlevel_to_string(uint8_t);
void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t);
+void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *);
+void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+void check_device_pending_commands_to_complete(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+
/* pqi_response.c */
void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
@@ -113,6 +126,7 @@
rcb_t *, uint16_t);
void pqisrc_process_io_response_success(pqisrc_softstate_t *,
rcb_t *);
+void pqisrc_show_sense_data_full(pqisrc_softstate_t *, rcb_t *, sense_data_u_t *sense_data);
void pqisrc_process_aio_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
@@ -120,29 +134,32 @@
void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
+
/* pqi_request.c */
+int pqisrc_build_send_vendor_request(pqisrc_softstate_t*,
+ pqi_vendor_general_request_t *,
+ raid_path_error_info_elem_t *);
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
-
+
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
-
+
int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
- rcb_t *, int, int);
+ rcb_t *, rcb_t *, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
+void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags);
+char *io_path_to_ascii(IO_PATH_T path);
/* pqi_event.c*/
int pqisrc_report_event_config(pqisrc_softstate_t *);
int pqisrc_set_event_config(pqisrc_softstate_t *);
int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
void pqisrc_ack_all_events(void *arg);
+void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
-
-void pqisrc_event_worker(void *, int);
-int pqisrc_scsi_setup(struct pqisrc_softstate *);
-void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
struct sense_header_scsi *);
int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
@@ -192,8 +209,16 @@
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
int pqisrc_process_task_management_response(pqisrc_softstate_t *,
pqi_tmf_resp_t *);
-void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
+/*Device outstanding Io count*/
+uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+void pqisrc_init_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
/* pqi_ioctl.c*/
@@ -201,6 +226,7 @@
pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
+
/* Functions Prototypes */
/* FreeBSD_mem.c */
int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
@@ -208,6 +234,9 @@
void *os_mem_alloc(pqisrc_softstate_t *,size_t);
void os_mem_free(pqisrc_softstate_t *,char *,size_t);
void os_resource_free(pqisrc_softstate_t *);
+int os_dma_setup(pqisrc_softstate_t *);
+int os_dma_destroy(pqisrc_softstate_t *);
+void os_update_dma_attributes(pqisrc_softstate_t *);
/* FreeBSD intr.c */
int os_get_intr_config(pqisrc_softstate_t *);
@@ -239,16 +268,18 @@
/* FreeBSD_cam.c */
uint8_t os_get_task_attr(rcb_t *);
-void os_wellness_periodic(void *);
void smartpqi_target_rescan(struct pqisrc_softstate *);
+void os_rescan_target(struct pqisrc_softstate *, pqi_scsi_dev_t *);
/* FreeBSD_intr.c FreeBSD_main.c */
+void pqisrc_event_worker(void *, int);
void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_io_response_success(rcb_t *);
void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
+void os_wellness_periodic(void *);
void os_reset_rcb( rcb_t *);
int register_sim(struct pqisrc_softstate *, int);
void deregister_sim(struct pqisrc_softstate *);
@@ -261,6 +292,8 @@
void os_get_time(struct bmic_host_wellness_time *);
void os_eventtaskqueue_enqueue(pqisrc_softstate_t *);
void pqisrc_save_controller_info(struct pqisrc_softstate *);
-int smartpqi_shutdown(void *);
+
+/* Domain status conversion */
+int bsd_status_to_pqi_status(int );
#endif // _SMARTPQI_PROTOTYPES_H
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,14 +31,15 @@
* Submit an admin IU to the adapter.
* Add interrupt support, if required
*/
-int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
+int
+pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp)
{
int ret = PQI_STATUS_SUCCESS;
ob_queue_t *ob_q = &softs->admin_ob_queue;
ib_queue_t *ib_q = &softs->admin_ib_queue;
int tmo = PQISRC_ADMIN_CMD_RESP_TIMEOUT;
-
+
DBG_FUNC("IN\n");
req->header.iu_type =
@@ -48,7 +48,7 @@
req->header.iu_length = PQI_STANDARD_IU_LENGTH;
req->res1 = 0;
req->work = 0;
-
+
/* Get the tag */
req->req_id = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == req->req_id) {
@@ -57,7 +57,7 @@
goto err_out;
}
softs->rcb[req->req_id].tag = req->req_id;
-
+
/* Submit the command to the admin ib queue */
ret = pqisrc_submit_cmnd(softs, ib_q, req);
if (ret != PQI_STATUS_SUCCESS) {
@@ -70,19 +70,20 @@
if (tmo <= 0) {
DBG_ERR("Admin cmd timeout\n");
DBG_ERR("tmo : %d\n",tmo); \
+ /* TODO : PQI device status and error register and report */
ret = PQI_STATUS_TIMEOUT;
goto err_cmd;
}
-
+
/* Copy the response */
memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size),
sizeof(gen_adm_resp_iu_t));
-
+
/* Update CI */
ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem;
- PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, LE_32(ob_q->ci_local));
-
+
/* Validate the response data */
ASSERT(req->fn_code == resp->fn_code);
ASSERT(resp->header.iu_type == PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE);
@@ -105,7 +106,8 @@
/*
* Get the administration queue config parameters.
*/
-void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
+void
+pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
{
uint64_t val = 0;
@@ -118,7 +120,7 @@
/* Note : size in unit of 16 byte s*/
softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16;
softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16;
-
+
DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n",
softs->admin_ib_queue.num_elem);
DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n",
@@ -126,14 +128,15 @@
}
/*
- * Decide the no of elements in admin ib and ob queues.
+ * Decide the no of elements in admin ib and ob queues.
*/
-void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
+void
+pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
{
/* Determine num elements in Admin IBQ */
softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem,
PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM);
-
+
/* Determine num elements in Admin OBQ */
softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem,
PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM);
@@ -142,7 +145,8 @@
/*
* Allocate DMA memory for admin queue and initialize.
*/
-int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
+int
+pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
{
uint32_t ib_array_size = 0;
uint32_t ob_array_size = 0;
@@ -153,7 +157,7 @@
ib_array_size = (softs->admin_ib_queue.num_elem *
softs->admin_ib_queue.elem_size);
-
+
ob_array_size = (softs->admin_ob_queue.num_elem *
softs->admin_ob_queue.elem_size);
@@ -183,21 +187,21 @@
softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size;
softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size;
softs->admin_ob_queue.ci_local = 0;
-
+
/* IB CI */
softs->admin_ib_queue.ci_virt_addr =
- (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ ob_array_size);
softs->admin_ib_queue.ci_dma_addr =
- (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
+ (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
ob_array_size);
/* OB PI */
softs->admin_ob_queue.pi_virt_addr =
- (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
+ (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
softs->admin_ob_queue.pi_dma_addr =
- (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
+ (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
@@ -224,8 +228,8 @@
/*
* Subroutine used to create (or) delete the admin queue requested.
*/
-int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
- uint32_t cmd)
+int
+pqisrc_create_delete_adminq(pqisrc_softstate_t *softs, uint32_t cmd)
{
int tmo = 0;
int ret = PQI_STATUS_SUCCESS;
@@ -233,27 +237,29 @@
/* Create Admin Q pair writing to Admin Q config function reg */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd));
-
+
if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR)
tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT;
else
tmo = PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT;
-
+
/* Wait for completion */
COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) ==
PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
if (tmo <= 0) {
DBG_ERR("Unable to create/delete admin queue pair\n");
+ /* TODO : PQI device status and error register and report */
ret = PQI_STATUS_TIMEOUT;
}
return ret;
-}
+}
/*
* Debug admin queue configuration params.
*/
-void pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
+void
+pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
{
DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n",
(void*)softs->admin_ib_queue.array_dma_addr);
@@ -280,9 +286,10 @@
/*
* Function used to create an admin queue.
*/
-int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
{
- int ret = PQI_STATUS_SUCCESS;;
+ int ret = PQI_STATUS_SUCCESS;
uint32_t admin_q_param = 0;
DBG_FUNC("IN\n");
@@ -299,21 +306,21 @@
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
goto err_out;
}
-
+
/* Write IB Q element array address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr));
/* Write OB Q element array address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr));
/* Write IB Q CI address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr));
/* Write OB Q PI address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr));
@@ -322,10 +329,10 @@
admin_q_param = softs->admin_ib_queue.num_elem |
(softs->admin_ob_queue.num_elem << 8)|
PQI_ADMIN_QUEUE_MSIX_DISABLE;
-
- PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
+
+ PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
PQI_ADMINQ_PARAM, LE_32(admin_q_param));
-
+
/* Submit cmd to create Admin Q pair */
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR);
@@ -333,22 +340,22 @@
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
goto err_q_create;
}
-
+
/* Admin queue created, get ci,pi offset */
softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET));
-
- softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
+
+ softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ib_queue.pi_register_offset);
-
+
softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET));
- softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
+ softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ob_queue.ci_register_offset);
os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE);
-
+
ret =OS_INIT_PQILOCK(softs, &softs->admin_ib_queue.lock,
softs->admin_ib_queue.lockname);
if(ret){
@@ -360,7 +367,7 @@
/* Print admin q config details */
pqisrc_print_adminq_config(softs);
-
+
DBG_FUNC("OUT\n");
return ret;
@@ -375,8 +382,9 @@
/*
* Subroutine used to delete an operational queue.
*/
-int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
- uint32_t q_id, boolean_t ibq)
+int
+pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
+ uint32_t q_id, boolean_t ibq)
{
int ret = PQI_STATUS_SUCCESS;
/* Firmware doesn't support this now */
@@ -397,10 +405,10 @@
admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_IQ;
else
admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ;
-
-
+
+
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
-
+
DBG_FUNC("OUT\n");
#endif
return ret;
@@ -409,7 +417,8 @@
/*
* Function used to destroy the event queue.
*/
-void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -421,7 +430,7 @@
}
softs->event_q.created = false;
}
-
+
/* Free the memory */
os_dma_mem_free(softs, &softs->event_q_dma_mem);
@@ -431,7 +440,8 @@
/*
* Function used to destroy operational ib queues.
*/
-void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = NULL;
@@ -446,15 +456,15 @@
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
if (ret) {
DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
- }
+ }
op_ib_q->created = false;
}
-
+
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
-
+
/* OP AIO IB Q */
op_ib_q = &softs->op_aio_ib_q[i];
if (op_ib_q->created == true) {
@@ -464,7 +474,7 @@
}
op_ib_q->created = false;
}
-
+
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
@@ -479,7 +489,8 @@
/*
* Function used to destroy operational ob queues.
*/
-void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i;
@@ -506,7 +517,8 @@
/*
* Function used to destroy an admin queue.
*/
-int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -514,9 +526,9 @@
#if 0
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
-#endif
- os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
-
+#endif
+ os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+
DBG_FUNC("OUT\n");
return ret;
}
@@ -524,24 +536,25 @@
/*
* Function used to change operational ib queue properties.
*/
-int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
+int
+pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q, uint32_t prop)
{
- int ret = PQI_STATUS_SUCCESS;;
+ int ret = PQI_STATUS_SUCCESS;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
-
+
DBG_FUNC("IN\n");
-
+
admin_req.fn_code = PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP;
admin_req.req_type.change_op_iq_prop.qid = op_ib_q->q_id;
admin_req.req_type.change_op_iq_prop.vend_specific = prop;
-
+
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
-
+
DBG_FUNC("OUT\n");
return ret;
}
@@ -549,10 +562,11 @@
/*
* Function used to create an operational ob queue.
*/
-int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_obq(pqisrc_softstate_t *softs,
ob_queue_t *op_ob_q)
{
- int ret = PQI_STATUS_SUCCESS;;
+ int ret = PQI_STATUS_SUCCESS;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
@@ -562,7 +576,7 @@
memset(&admin_resp, 0, sizeof(admin_resp));
admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ;
- admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
+ admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num;
admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr;
admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr;
@@ -574,7 +588,7 @@
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
- op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
+ op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_oq.ci_offset);
op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
op_ob_q->ci_register_offset);
@@ -582,7 +596,7 @@
int i = 0;
DBG_WARN("Error Status Descriptors\n");
for(i = 0; i < 4;i++)
- DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
@@ -593,10 +607,11 @@
/*
* Function used to create an operational ib queue.
*/
-int pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q)
{
- int ret = PQI_STATUS_SUCCESS;;
+ int ret = PQI_STATUS_SUCCESS;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
@@ -611,36 +626,37 @@
admin_req.req_type.create_op_iq.iq_ci_addr = op_ib_q->ci_dma_addr;
admin_req.req_type.create_op_iq.num_elem = op_ib_q->num_elem;
admin_req.req_type.create_op_iq.elem_len = op_ib_q->elem_size / 16;
-
+
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
-
+
if( PQI_STATUS_SUCCESS == ret) {
- op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
+ op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_iq.pi_offset);
-
+
op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
op_ib_q->pi_register_offset);
} else {
int i = 0;
DBG_WARN("Error Status Decsriptors\n");
for(i = 0; i < 4;i++)
- DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
- return ret;
+ return ret;
}
/*
* subroutine used to create an operational ib queue for AIO.
*/
-int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_aio_ib_q)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
-
+
ret = pqisrc_create_op_ibq(softs,op_aio_ib_q);
if ( PQI_STATUS_SUCCESS == ret)
ret = pqisrc_change_op_ibq_queue_prop(softs,
@@ -653,15 +669,16 @@
/*
* subroutine used to create an operational ib queue for RAID.
*/
-int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_raid_ib_q)
{
- int ret = PQI_STATUS_SUCCESS;;
-
+ int ret = PQI_STATUS_SUCCESS;
+
DBG_FUNC("IN\n");
-
+
ret = pqisrc_create_op_ibq(softs,op_raid_ib_q);
-
+
DBG_FUNC("OUT\n");
return ret;
}
@@ -669,30 +686,31 @@
/*
* Allocate and create an event queue to process supported events.
*/
-int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
uint32_t num_elem;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
- uint32_t event_q_pi_dma_start_offset = 0;
+ uint64_t event_q_pi_dma_start_offset = 0;
uint32_t event_q_pi_virt_start_offset = 0;
char *event_q_pi_virt_start_addr = NULL;
ob_queue_t *event_q = NULL;
-
+
DBG_FUNC("IN\n");
- /*
- * Calculate memory requirements.
- * If event queue is shared for IO response, number of
- * elements in event queue depends on num elements in OP OB Q
- * also. Since event queue element size (32) is more than IO
+ /*
+ * Calculate memory requirements.
+ * If event queue is shared for IO response, number of
+ * elements in event queue depends on num elements in OP OB Q
+ * also. Since event queue element size (32) is more than IO
* response size , event queue element size need not be checked
* for queue size calculation.
*/
-#ifdef SHARE_EVENT_QUEUE_FOR_IO
+#ifdef SHARE_EVENT_QUEUE_FOR_IO
num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
#else
num_elem = PQISRC_NUM_EVENT_Q_ELEM;
@@ -719,7 +737,7 @@
dma_addr = softs->event_q_dma_mem.dma_addr;
event_q_pi_dma_start_offset += dma_addr;
event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset;
-
+
event_q = &softs->event_q;
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr);
@@ -740,7 +758,7 @@
DBG_FUNC("OUT\n");
return ret;
-
+
err_out_create:
pqisrc_destroy_event_queue(softs);
err_out:
@@ -750,20 +768,21 @@
/*
* Allocate DMA memory and create operational ib queues.
- */
-int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t ibq_size = 0;
- uint32_t ib_ci_dma_start_offset = 0;
+ uint64_t ib_ci_dma_start_offset = 0;
char *ib_ci_virt_start_addr = NULL;
- uint32_t ib_ci_virt_start_offset = 0;
+ uint32_t ib_ci_virt_start_offset = 0;
uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
ib_queue_t *op_ib_q = NULL;
- uint32_t num_op_ibq = softs->num_op_raid_ibq +
+ uint32_t num_op_ibq = softs->num_op_raid_ibq +
softs->num_op_aio_ibq;
int i = 0;
@@ -772,7 +791,7 @@
/* Calculate memory requirements */
ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
alloc_size = num_op_ibq * ibq_size;
- /* CI indexes starts after Queue element array */
+ /* CI indexes starts after Queue element array */
ib_ci_dma_start_offset = alloc_size;
ib_ci_virt_start_offset = alloc_size;
alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
@@ -795,7 +814,7 @@
ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset;
ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq);
-
+
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
op_ib_q = &softs->op_raid_ib_q[i];
@@ -803,21 +822,24 @@
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- DBG_ERR("raid_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
+ if(ret){
+ /* TODO: error handling */
+ DBG_ERR("raid_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
}
- op_ib_q->lockcreated = true;
- op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ op_ib_q->lockcreated = true;
+
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(2 * i * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(2 * i * sizeof(uint32_t)));
- ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+ ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
@@ -833,28 +855,31 @@
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- DBG_ERR("aio_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
- }
- op_ib_q->lockcreated = true;
- op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ if(ret){
+ /* TODO: error handling */
+ DBG_ERR("aio_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
+ }
+ op_ib_q->lockcreated = true;
+
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(((2 * i) + 1) * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(((2 * i) + 1) * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
-
+
virt_addr += ibq_size;
dma_addr += ibq_size;
}
@@ -872,15 +897,16 @@
/*
* Allocate DMA memory and create operational ob queues.
- */
-int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t obq_size = 0;
- uint32_t ob_pi_dma_start_offset = 0;
+ uint64_t ob_pi_dma_start_offset = 0;
uint32_t ob_pi_virt_start_offset = 0;
char *ob_pi_virt_start_addr = NULL;
uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
@@ -890,17 +916,17 @@
DBG_FUNC("IN\n");
- /*
- * OB Q element array should be 64 byte aligned.
- * So the number of elements in OB Q should be multiple
- * of 4, so that OB Queue element size (16) * num elements
+ /*
+ * OB Q element array should be 64 byte aligned.
+ * So the number of elements in OB Q should be multiple
+ * of 4, so that OB Queue element size (16) * num elements
* will be multiple of 64.
*/
-
+
ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
alloc_size += num_op_obq * obq_size;
- /* PI indexes starts after Queue element array */
+ /* PI indexes starts after Queue element array */
ob_pi_dma_start_offset = alloc_size;
ob_pi_virt_start_offset = alloc_size;
alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
@@ -924,15 +950,15 @@
DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
- for (i = 0; i < softs->num_op_obq; i++) {
+ for (i = 0; i < softs->num_op_obq; i++) {
op_ob_q = &softs->op_ob_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
op_ob_q->q_id = obq_id++;
if(softs->share_opq_and_eventq == true)
- op_ob_q->intr_msg_num = i;
+ op_ob_q->intr_msg_num = i;
else
- op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
+ op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
op_ob_q->num_elem = softs->num_elem_per_op_obq;
op_ob_q->elem_size = softs->obq_elem_size;
op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
@@ -940,7 +966,7 @@
op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
(i * sizeof(uint32_t)));
ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
-
+
ret = pqisrc_create_op_obq(softs,op_ob_q);
if (ret) {
DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
@@ -950,10 +976,10 @@
virt_addr += obq_size;
dma_addr += obq_size;
}
-
+
DBG_FUNC("OUT\n");
return ret;
-
+
err_out_create:
pqisrc_destroy_op_ob_queues(softs);
err_out:
@@ -963,13 +989,14 @@
/*
* Function used to create operational queues for the adapter.
- */
-int pqisrc_create_op_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_create_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
-
+
/* Create Operational IB queues */
ret = pqisrc_alloc_and_create_ib_queues(softs);
if (ret)
@@ -982,12 +1009,12 @@
/* Create Event queue */
ret = pqisrc_alloc_and_create_event_queue(softs);
if (ret)
- goto err_out_eventq;
+ goto err_out_eventq;
DBG_FUNC("OUT\n");
return ret;
err_out_eventq:
- pqisrc_destroy_op_ob_queues(softs);
+ pqisrc_destroy_op_ob_queues(softs);
err_out_obq:
pqisrc_destroy_op_ib_queues(softs);
err_out:
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
--- a/sys/dev/smartpqi/smartpqi_request.c
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +27,26 @@
#include "smartpqi_includes.h"
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define HPSA_RAID_MAX HPSA_RAID_ADM
+#define HPSA_RAID_UNKNOWN 0xff
+
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
/* Subroutine to find out embedded sgl count in IU */
-static inline
-uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
+static inline uint32_t
+pqisrc_embedded_sgl_count(uint32_t elem_alloted)
{
uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
DBG_FUNC(" IN ");
@@ -47,19 +60,19 @@
DBG_FUNC(" OUT ");
return embedded_sgl_count;
-
+
}
/* Subroutine to find out contiguous free elem in IU */
-static inline
-uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
+static inline uint32_t
+pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
{
uint32_t contiguous_free_elem = 0;
DBG_FUNC(" IN ");
if(pi >= ci) {
- contiguous_free_elem = (elem_in_q - pi);
+ contiguous_free_elem = (elem_in_q - pi);
if(ci == 0)
contiguous_free_elem -= 1;
} else {
@@ -80,7 +93,7 @@
DBG_FUNC(" IN ");
DBG_IO("SGL_Count :%d",SG_Count);
/********
- If SG_Count greater than max sg per IU i.e 4 or 68
+ If SG_Count greater than max sg per IU i.e 4 or 68
(4 is with out spanning or 68 is with spanning) chaining is required.
OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
on these two cases one element is enough.
@@ -97,13 +110,13 @@
}
/* Subroutine to build SG list for the IU submission*/
-static
-boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
+static boolean_t
+pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
uint32_t num_elem_alloted)
{
uint32_t i;
uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
- sgt_t *sgt = sg_array;
+ sgt_t *sgt = sg_array;
sgt_t *sg_chain = NULL;
boolean_t partial = false;
@@ -120,7 +133,7 @@
sgt->len= OS_GET_IO_SG_LEN(rcb,i);
sgt->flags= 0;
}
-
+
sg_array[num_sg - 1].flags = SG_FLAG_LAST;
} else {
/**
@@ -130,15 +143,15 @@
sgt->addr = rcb->sg_chain_dma;
sgt->len = num_sg * sizeof(sgt_t);
sgt->flags = SG_FLAG_CHAIN;
-
+
sgt = sg_chain;
for (i = 0; i < num_sg; i++, sgt++) {
sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
sgt->len = OS_GET_IO_SG_LEN(rcb,i);
sgt->flags = 0;
}
-
- sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
+
+ sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
num_sg = 1;
partial = true;
@@ -147,16 +160,16 @@
iu_hdr->iu_length = num_sg * sizeof(sgt_t);
DBG_FUNC(" OUT ");
return partial;
-
+
}
/*Subroutine used to Build the RAID request */
-static void
-pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+static void
+pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
-
+
raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
raid_req->header.comp_feature = 0;
raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
@@ -165,8 +178,8 @@
raid_req->request_id = rcb->tag;
raid_req->nexus_id = 0;
raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
- memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
- sizeof(raid_req->lun_number));
+ memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
+ sizeof(raid_req->lun_number));
raid_req->protocol_spec = 0;
raid_req->data_direction = rcb->data_dir;
raid_req->reserved1 = 0;
@@ -179,7 +192,7 @@
raid_req->reserved4 = 0;
raid_req->reserved5 = 0;
- /* As cdb and additional_cdb_bytes are contiguous,
+ /* As cdb and additional_cdb_bytes are contiguous,
update them in a single statement */
memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
#if 0
@@ -187,7 +200,7 @@
for(i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
#endif
-
+
switch (rcb->cmdlen) {
case 6:
case 10:
@@ -214,14 +227,14 @@
PQI_ADDITIONAL_CDB_BYTES_16;
break;
}
-
+
/* Frame SGL Descriptor */
raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
- &raid_req->header, num_elem_alloted);
+ &raid_req->header, num_elem_alloted);
- raid_req->header.iu_length +=
+ raid_req->header.iu_length +=
offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
-
+
#if 0
DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
@@ -233,18 +246,18 @@
DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
-#endif
- rcb->success_cmp_callback = pqisrc_process_io_response_success;
- rcb->error_cmp_callback = pqisrc_process_raid_response_error;
+#endif
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_raid_response_error;
rcb->resp_qid = raid_req->response_queue_id;
-
+
DBG_FUNC(" OUT ");
-
+
}
/*Subroutine used to Build the AIO request */
static void
-pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
@@ -263,11 +276,12 @@
aio_req->mem_type = 0;
aio_req->fence = 0;
aio_req->res2 = 0;
- aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
aio_req->cmd_prio = 0;
aio_req->res3 = 0;
aio_req->err_idx = aio_req->req_id;
aio_req->cdb_len = rcb->cmdlen;
+
if(rcb->cmdlen > sizeof(aio_req->cdb))
rcb->cmdlen = sizeof(aio_req->cdb);
memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
@@ -278,7 +292,7 @@
#endif
memset(aio_req->lun,0,sizeof(aio_req->lun));
memset(aio_req->res4,0,sizeof(aio_req->res4));
-
+
if(rcb->encrypt_enable == true) {
aio_req->encrypt_enable = true;
aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
@@ -289,8 +303,8 @@
aio_req->encrypt_key_index = 0;
aio_req->encrypt_twk_high = 0;
aio_req->encrypt_twk_low = 0;
- }
-
+ }
+
/* Frame SGL Descriptor */
aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
&aio_req->header, num_elem_alloted);
@@ -298,8 +312,8 @@
aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
-
- aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
+
+ aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
sizeof(iu_header_t);
#if 0
DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
@@ -315,9 +329,9 @@
DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
#endif
-
- rcb->success_cmp_callback = pqisrc_process_io_response_success;
- rcb->error_cmp_callback = pqisrc_process_aio_response_error;
+
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
rcb->resp_qid = aio_req->response_queue_id;
DBG_FUNC(" OUT ");
@@ -325,59 +339,65 @@
}
/*Function used to build and send RAID/AIO */
-int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
+int
+pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
{
ib_queue_t *ib_q_array = softs->op_aio_ib_q;
ib_queue_t *ib_q = NULL;
- char *ib_iu = NULL;
+ char *ib_iu = NULL;
IO_PATH_T io_path = AIO_PATH;
- uint32_t TraverseCount = 0;
- int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
+ uint32_t TraverseCount = 0;
+ int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
int qindex = first_qindex;
uint32_t num_op_ib_q = softs->num_op_aio_ibq;
uint32_t num_elem_needed;
uint32_t num_elem_alloted = 0;
pqi_scsi_dev_t *devp = rcb->dvp;
uint8_t raidbypass_cdb[16];
-
- DBG_FUNC(" IN ");
+ DBG_FUNC(" IN ");
- rcb->cdbp = OS_GET_CDBP(rcb);
-
- if(IS_AIO_PATH(devp)) {
- /** IO for Physical Drive **/
- /** Send in AIO PATH**/
- rcb->ioaccel_handle = devp->ioaccel_handle;
- } else {
- int ret = PQI_STATUS_FAILURE;
- /** IO for RAID Volume **/
- if (devp->offload_enabled) {
- /** ByPass IO ,Send in AIO PATH **/
- ret = pqisrc_send_scsi_cmd_raidbypass(softs,
- devp, rcb, raidbypass_cdb);
- }
-
- if (PQI_STATUS_FAILURE == ret) {
- /** Send in RAID PATH **/
- io_path = RAID_PATH;
- num_op_ib_q = softs->num_op_raid_ibq;
- ib_q_array = softs->op_raid_ib_q;
+ if(!rcb->aio_retry) {
+ rcb->cdbp = OS_GET_CDBP(rcb);
+ if(IS_AIO_PATH(devp)) {
+ /** IO for Physical Drive **/
+ /** Send in AIO PATH**/
+ rcb->ioaccel_handle = devp->ioaccel_handle;
} else {
- rcb->cdbp = raidbypass_cdb;
+ int ret = PQI_STATUS_FAILURE;
+ /** IO for RAID Volume **/
+ if (devp->offload_enabled) {
+ /** ByPass IO ,Send in AIO PATH **/
+ ret = pqisrc_send_scsi_cmd_raidbypass(softs,
+ devp, rcb, raidbypass_cdb);
+ }
+ if (PQI_STATUS_FAILURE == ret) {
+ /** Send in RAID PATH **/
+ io_path = RAID_PATH;
+ num_op_ib_q = softs->num_op_raid_ibq;
+ ib_q_array = softs->op_raid_ib_q;
+ } else {
+ rcb->cdbp = raidbypass_cdb;
+ }
}
+ } else {
+ /* Retrying failed AIO IO */
+ io_path = RAID_PATH;
+ rcb->cdbp = OS_GET_CDBP(rcb);
+ num_op_ib_q = softs->num_op_raid_ibq;
+ ib_q_array = softs->op_raid_ib_q;
}
-
+
num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
DBG_IO("num_elem_needed :%d",num_elem_needed);
-
+
do {
uint32_t num_elem_available;
ib_q = (ib_q_array + qindex);
- PQI_LOCK(&ib_q->lock);
+ PQI_LOCK(&ib_q->lock);
num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
*(ib_q->ci_virt_addr), ib_q->num_elem);
-
+
DBG_IO("num_elem_avialable :%d\n",num_elem_available);
if(num_elem_available >= num_elem_needed) {
num_elem_alloted = num_elem_needed;
@@ -385,7 +405,7 @@
}
DBG_IO("Current queue is busy! Hop to next queue\n");
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
qindex = (qindex + 1) % num_op_ib_q;
if(qindex == first_qindex) {
if (num_elem_needed == 1)
@@ -394,16 +414,18 @@
num_elem_needed = 1;
}
}while(TraverseCount < 2);
-
+
DBG_IO("num_elem_alloted :%d",num_elem_alloted);
if (num_elem_alloted == 0) {
DBG_WARN("OUT: IB Queues were full\n");
return PQI_STATUS_QFULL;
- }
-
+ }
+
+ pqisrc_increment_device_active_io(softs,devp);
+
/* Get IB Queue Slot address to build IU */
ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
-
+
if(io_path == AIO_PATH) {
/** Build AIO structure **/
pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
@@ -413,9 +435,11 @@
pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
num_elem_alloted);
}
-
+
rcb->req_pending = true;
-
+ rcb->req_q = ib_q;
+ rcb->path = io_path;
+
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
@@ -424,16 +448,16 @@
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
-
- PQI_UNLOCK(&ib_q->lock);
+
+ PQI_UNLOCK(&ib_q->lock);
DBG_FUNC(" OUT ");
return PQI_STATUS_SUCCESS;
}
/* Subroutine used to set encryption info as part of RAID bypass IO*/
-static inline void pqisrc_set_enc_info(
- struct pqi_enc_info *enc_info, struct raid_map *raid_map,
- uint64_t first_block)
+static inline void
+pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
+ struct raid_map *raid_map, uint64_t first_block)
{
uint32_t volume_blk_size;
@@ -452,25 +476,12 @@
enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
}
-
-/*
- * Attempt to perform offload RAID mapping for a logical volume I/O.
- */
-
-#define HPSA_RAID_0 0
-#define HPSA_RAID_4 1
-#define HPSA_RAID_1 2 /* also used for RAID 10 */
-#define HPSA_RAID_5 3 /* also used for RAID 50 */
-#define HPSA_RAID_51 4
-#define HPSA_RAID_6 5 /* also used for RAID 60 */
-#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define HPSA_RAID_MAX HPSA_RAID_ADM
-#define HPSA_RAID_UNKNOWN 0xff
-
/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
-int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
- uint32_t *blk_cnt) {
-
+int
+check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
+ uint32_t *blk_cnt)
+{
+
switch (cdb[0]) {
case SCMD_WRITE_6:
*is_write = true;
@@ -506,10 +517,58 @@
return PQI_STATUS_SUCCESS;
}
+/* print any arbitrary buffer of length total_len */
+void
+pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
+ uint32_t total_len, uint32_t flags)
+{
+#define LINE_BUF_LEN 60
+#define INDEX_PER_LINE 16
+ uint32_t buf_consumed = 0;
+ int ii;
+ char line_buf[LINE_BUF_LEN];
+ int line_len; /* written length per line */
+ uint8_t this_char;
+
+ if (user_buf == NULL)
+ return;
+
+ /* Print index columns */
+ if (flags & PRINT_FLAG_HDR_COLUMN)
+ {
+ for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
+ {
+ line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
+ if ((line_len + 4) >= LINE_BUF_LEN)
+ break;
+ }
+ DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf);
+ }
+
+ /* Print index columns */
+ while(buf_consumed < total_len)
+ {
+ memset(line_buf, 0, LINE_BUF_LEN);
+
+ for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
+ {
+ this_char = *((char*)(user_buf) + buf_consumed);
+ line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
+
+ buf_consumed++;
+ if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
+ break;
+ }
+ DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf);
+ }
+}
+
+
/*
* Function used to build and send RAID bypass request to the adapter
*/
-int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+int
+pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
{
struct raid_map *raid_map;
@@ -540,17 +599,17 @@
/* Check for eligible opcode, get LBA and block count. */
memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
-
+
for(i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
- if(check_for_scsi_opcode(cdb, &is_write,
+ if(check_for_scsi_opcode(cdb, &is_write,
&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
return PQI_STATUS_FAILURE;
/* Check for write to non-RAID-0. */
if (is_write && device->raid_level != SA_RAID_0)
- return PQI_STATUS_FAILURE;;
+ return PQI_STATUS_FAILURE;
- if(blk_cnt == 0)
+ if(blk_cnt == 0)
return PQI_STATUS_FAILURE;
lst_blk = fst_blk + blk_cnt - 1;
@@ -568,7 +627,8 @@
/* Calculate stripe information for the request. */
blks_per_row = data_disks_per_row * strip_sz;
if (!blks_per_row)
- return PQI_STATUS_FAILURE;
+ return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/
+
/* use __udivdi3 ? */
fst_row = fst_blk / blks_per_row;
lst_row = lst_blk / blks_per_row;
@@ -693,9 +753,6 @@
(map_row * total_disks_per_row) + fst_col;
}
- if (map_idx >= RAID_MAP_MAX_ENTRIES)
- return PQI_STATUS_FAILURE;
-
rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
fst_row * strip_sz +
@@ -740,47 +797,60 @@
}
rcb->cmdlen = cdb_length;
-
-
+
+
DBG_FUNC("OUT");
-
+
return PQI_STATUS_SUCCESS;
}
-/* Function used to submit a TMF to the adater */
-int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
- rcb_t *rcb, int req_id, int tmf_type)
+/* Function used to submit an AIO TMF to the adapter
+ * DEVICE_RESET is not supported.
+ */
+static int
+pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
{
int rval = PQI_STATUS_SUCCESS;
- pqi_tmf_req_t tmf_req;
+ pqi_aio_tmf_req_t tmf_req;
+ ib_queue_t *op_ib_q = NULL;
- memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
+ memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
DBG_FUNC("IN");
- tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+ tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
tmf_req.req_id = rcb->tag;
-
- memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.error_idx = rcb->tag;
+ tmf_req.nexus = devp->ioaccel_handle;
+ //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
tmf_req.tmf = tmf_type;
- tmf_req.req_id_to_manage = req_id;
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
- tmf_req.obq_id_to_manage = rcb->resp_qid;
+ op_ib_q = &softs->op_aio_ib_q[0];
+
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ tmf_req.req_id_to_manage = rcb_to_manage->tag;
+ tmf_req.nexus = rcb_to_manage->ioaccel_handle;
+ }
+
+ DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage);
+ DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id);
rcb->req_pending = true;
+ /* Timedout tmf response goes here */
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
- rval = pqisrc_submit_cmnd(softs,
- &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
+ rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command rval=%d\n", rval);
return rval;
}
- rval = pqisrc_wait_on_condition(softs, rcb);
+ rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
if (rval != PQI_STATUS_SUCCESS){
DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
- rcb->status = REQUEST_FAILED;
+ rcb->status = rval;
}
if (rcb->status != REQUEST_SUCCESS) {
@@ -792,3 +862,186 @@
DBG_FUNC("OUT");
return rval;
}
+
+/* Function used to submit a Raid TMF to the adapter */
+static int
+pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ pqi_raid_tmf_req_t tmf_req;
+ ib_queue_t *op_ib_q = NULL;
+
+ memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
+
+ DBG_FUNC("IN");
+
+ tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
+ tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
+ tmf_req.req_id = rcb->tag;
+
+ memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.tmf = tmf_type;
+ tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
+
+ /* Decide the queue where the tmf request should be submitted */
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
+ tmf_req.req_id_to_manage = rcb_to_manage->tag;
+ }
+
+ if (softs->timeout_in_tmf &&
+ tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
+ /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */
+ tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
+ /* if OS tmf timeout is 0, set minimum value for timeout */
+ if (!tmf_req.timeout_in_sec)
+ tmf_req.timeout_in_sec = 1;
+ }
+
+ op_ib_q = &softs->op_raid_ib_q[0];
+ rcb->req_pending = true;
+ /* Timedout tmf response goes here */
+ rcb->error_cmp_callback = pqisrc_process_raid_response_error;
+
+ rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command rval=%d\n", rval);
+ return rval;
+ }
+
+ rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
+ rcb->status = rval;
+ }
+
+ if (rcb->status != REQUEST_SUCCESS) {
+ DBG_NOTE("Task Management failed tmf_type:%d "
+ "stat:0x%x\n", tmf_type, rcb->status);
+ rval = PQI_STATUS_FAILURE;
+ }
+
+ DBG_FUNC("OUT");
+ return rval;
+}
+
+int
+pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN");
+
+ if(!devp->is_physical_device) {
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ if(rcb_to_manage->path == AIO_PATH) {
+ if(devp->offload_enabled)
+ ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+ else {
+ DBG_INFO("TASK ABORT not supported in raid\n");
+ ret = PQI_STATUS_FAILURE;
+ }
+ }
+ else {
+ ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+ } else {
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
+ ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ else
+ ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+
+ DBG_FUNC("IN");
+
+ return ret;
+}
+
+/*
+ * Function used to build and send the vendor general request
+ * Used for configuring PQI feature bits between firmware and driver
+ */
+int
+pqisrc_build_send_vendor_request(
+ pqisrc_softstate_t *softs,
+ pqi_vendor_general_request_t *request,
+ raid_path_error_info_elem_t *error_info)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+
+ rcb_t *rcb = NULL;
+
+ uint16_t request_id = 0;
+
+ /* Get the tag */
+ request_id = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == request_id) {
+ DBG_ERR("Tag not available\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_notag;
+ }
+
+ ((pqi_vendor_general_request_t *)request)->request_id = request_id;
+ ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id;
+
+ rcb = &softs->rcb[request_id];
+
+ rcb->req_pending = true;
+ rcb->tag = request_id;
+
+ ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_out;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Management request timed out!\n");
+ goto err_out;
+ }
+
+ ret = rcb->status;
+ if (ret) {
+ ret = PQI_STATUS_FAILURE;
+ if(error_info) {
+ // TODO: config table err handling.
+ }
+ } else {
+ if(error_info) {
+ ret = PQI_STATUS_SUCCESS;
+ memset(error_info, 0, sizeof(*error_info));
+ }
+ }
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_ERR("Vender general request submission failed.\n");
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
+err_notag:
+ DBG_FUNC("FAILED \n");
+ return ret;
+}
+
+/* return the path as ASCII-string */
+char *
+io_path_to_ascii(IO_PATH_T path)
+{
+ switch (path)
+ {
+ case AIO_PATH: return "Aio";
+ case RAID_PATH: return "Raid";
+ default: return "Unknown";
+ }
+}
diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c
--- a/sys/dev/smartpqi/smartpqi_response.c
+++ b/sys/dev/smartpqi/smartpqi_response.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,8 +30,8 @@
/*
* Process internal RAID response in the case of success.
*/
-void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,
- rcb_t *rcb)
+void
+pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb)
{
DBG_FUNC("IN");
@@ -45,7 +44,8 @@
/*
* Process internal RAID response in the case of failure.
*/
-void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t error_info;
@@ -54,18 +54,25 @@
rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
(err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- rcb->status = REQUEST_SUCCESS;
+
memcpy(&error_info, rcb->error_info, sizeof(error_info));
DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result);
- if (error_info.status != 0)
- rcb->status = REQUEST_FAILED;
- if (error_info.data_in_result != PQI_RAID_DATA_IN_OUT_GOOD)
- rcb->status = REQUEST_FAILED;
- if (error_info.data_out_result != PQI_RAID_DATA_IN_OUT_GOOD)
- rcb->status = REQUEST_FAILED;
+ rcb->status = REQUEST_FAILED;
+
+ switch (error_info.data_out_result) {
+ case PQI_RAID_DATA_IN_OUT_GOOD:
+ if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD)
+ rcb->status = REQUEST_SUCCESS;
+ break;
+ case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
+ if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD ||
+ error_info.status == PQI_RAID_STATUS_CHECK_CONDITION)
+ rcb->status = REQUEST_SUCCESS;
+ break;
+ }
rcb->req_pending = false;
@@ -75,8 +82,8 @@
/*
* Process the AIO/RAID IO in the case of success.
*/
-void pqisrc_process_io_response_success(pqisrc_softstate_t *softs,
- rcb_t *rcb)
+void
+pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb)
{
DBG_FUNC("IN");
@@ -85,25 +92,79 @@
DBG_FUNC("OUT");
}
+static void
+pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc, uint8_t *ascq)
+{
+ if (sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_70 ||
+ sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_71)
+ {
+ sense_data_fixed_t *fixed = &sense_data->fixed_format;
+
+ *key = fixed->sense_key;
+ *asc = fixed->sense_code;
+ *ascq = fixed->sense_qual;
+ }
+ else if (sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_72 ||
+ sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_73)
+ {
+ sense_data_descriptor_t *desc = &sense_data->descriptor_format;
+
+ *key = desc->sense_key;
+ *asc = desc->sense_code;
+ *ascq = desc->sense_qual;
+ }
+ else
+ {
+ *key = 0xFF;
+ *asc = 0xFF;
+ *ascq = 0xFF;
+ }
+}
+
+static void
+pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
+{
+ uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
+ char *path = io_path_to_ascii(rcb->path);
+ uint8_t key, asc, ascq;
+ pqisrc_extract_sense_data(sense_data, &key, &asc, &ascq);
+
+ DBG_NOTE("[ERR INFO] BTL: %d:%d:%d op=0x%x path=%s K:C:Q: %x:%x:%x\n",
+ rcb->dvp->bus, rcb->dvp->target, rcb->dvp->lun,
+ opcode, path, key, asc, ascq);
+}
+
+void
+pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
+{
+ pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0);
+
+ pqisrc_show_sense_data_simple(softs, rcb, sense_data);
+
+ /* add more detail here as needed */
+}
+
+
/*
* Process the error info for AIO in the case of failure.
*/
-void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
aio_path_error_info_elem_t *err_info = NULL;
-
+
DBG_FUNC("IN");
-
+
err_info = (aio_path_error_info_elem_t*)
- softs->err_buf_dma_mem.virt_addr +
+ softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
return;
}
-
+
os_aio_response_error(rcb, err_info);
DBG_FUNC("OUT");
@@ -112,22 +173,23 @@
/*
* Process the error info for RAID IO in the case of failure.
*/
-void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t *err_info = NULL;
-
+
DBG_FUNC("IN");
-
+
err_info = (raid_path_error_info_elem_t*)
- softs->err_buf_dma_mem.virt_addr +
+ softs->err_buf_dma_mem.virt_addr +
err_idx;
-
+
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
return;
}
-
+
os_raid_response_error(rcb, err_info);
DBG_FUNC("OUT");
@@ -136,7 +198,8 @@
/*
* Process the Task Management function response.
*/
-int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
+int
+pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
pqi_tmf_resp_t *tmf_resp)
{
int ret = REQUEST_SUCCESS;
@@ -153,7 +216,7 @@
ret = REQUEST_SUCCESS;
break;
default:
- DBG_ERR("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
+ DBG_WARN("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
ret = REQUEST_FAILED;
break;
}
@@ -165,21 +228,39 @@
return ret;
}
+static int
+pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
+{
+
+ int ret = REQUEST_SUCCESS;
+
+ switch(response->status) {
+ case PQI_VENDOR_RESPONSE_IU_SUCCESS:
+ break;
+ case PQI_VENDOR_RESPONSE_IU_UNSUCCESS:
+ case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM:
+ case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC:
+ ret = REQUEST_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
/*
* Function used to process the response from the adapter
* which is invoked by IRQ handler.
*/
-void
+void
pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
{
ob_queue_t *ob_q;
struct pqi_io_response *response;
uint32_t oq_pi, oq_ci;
+ pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN");
- OS_ATOMIC64_INC(softs, num_intrs);
-
ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
oq_ci = ob_q->ci_local;
oq_pi = *(ob_q->pi_virt_addr);
@@ -190,18 +271,42 @@
rcb_t *rcb = NULL;
uint32_t tag = 0;
uint32_t offset;
+ boolean_t os_scsi_cmd = false;
if (oq_pi == oq_ci)
break;
/* Get the response */
offset = oq_ci * ob_q->elem_size;
- response = (struct pqi_io_response *)(ob_q->array_virt_addr +
+ response = (struct pqi_io_response *)(ob_q->array_virt_addr +
offset);
tag = response->request_id;
rcb = &softs->rcb[tag];
- /* Make sure we are processing a valid response. */
- ASSERT(rcb->tag == tag && rcb->req_pending);
- rcb->req_pending = false;
+ /* Make sure we are processing a valid response. */
+ if ((rcb->tag != tag) || (rcb->req_pending == false)) {
+ DBG_ERR("No such request pending with tag : %x", tag);
+ oq_ci = (oq_ci + 1) % ob_q->num_elem;
+ break;
+ }
+ /* Timedout request has been completed. This should not hit,
+ * if timeout is set as TIMEOUT_INFINITE while calling
+ * pqisrc_wait_on_condition(softs,rcb,timeout).
+ */
+ if (rcb->timedout) {
+ DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag %d\n", tag);
+ oq_ci = (oq_ci + 1) % ob_q->num_elem;
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
+ break;
+ }
+
+ if (IS_OS_SCSICMD(rcb)) {
+ dvp = rcb->dvp;
+ if (dvp)
+ os_scsi_cmd = true;
+ else
+ DBG_WARN("Received IO completion for the Null device!!!\n");
+ }
+
DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
@@ -209,14 +314,24 @@
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
rcb->success_cmp_callback(softs, rcb);
+ if (os_scsi_cmd)
+ pqisrc_decrement_device_active_io(softs, dvp);
+
break;
case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index));
+ if (os_scsi_cmd)
+ pqisrc_decrement_device_active_io(softs, dvp);
break;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
rcb->req_pending = false;
break;
+ case PQI_RESPONSE_IU_VENDOR_GENERAL:
+ rcb->req_pending = false;
+ rcb->status = pqisrc_process_vendor_general_response(
+ (pqi_vendor_general_response_t *)response);
+ break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
rcb->status = pqisrc_process_task_management_response(softs, (void *)response);
break;
@@ -230,7 +345,7 @@
}
ob_q->ci_local = oq_ci;
- PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, ob_q->ci_local );
DBG_FUNC("OUT");
}
diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c
--- a/sys/dev/smartpqi/smartpqi_sis.c
+++ b/sys/dev/smartpqi/smartpqi_sis.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +27,9 @@
#include "smartpqi_includes.h"
-/* */
-void sis_disable_msix(pqisrc_softstate_t *softs)
+/* Function for disabling msix interrupots */
+void
+sis_disable_msix(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
@@ -44,25 +44,27 @@
DBG_FUNC("OUT\n");
}
-void sis_enable_intx(pqisrc_softstate_t *softs)
+void
+sis_enable_intx(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
DBG_FUNC("IN\n");
db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
- LEGACY_SIS_IDBR);
+ LEGACY_SIS_IDBR);
db_reg |= SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
- if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
- != PQI_STATUS_SUCCESS) {
+ if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
+ != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to wait for enable intx db bit to clear\n");
}
DBG_FUNC("OUT\n");
}
-void sis_disable_intx(pqisrc_softstate_t *softs)
+void
+sis_disable_intx(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
@@ -77,10 +79,11 @@
DBG_FUNC("OUT\n");
}
-void sis_disable_interrupt(pqisrc_softstate_t *softs)
+void
+sis_disable_interrupt(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN");
-
+
switch(softs->intr_type) {
case INTR_TYPE_FIXED:
pqisrc_configure_legacy_intx(softs,false);
@@ -94,30 +97,33 @@
DBG_ERR("Inerrupt mode none!\n");
break;
}
-
+
DBG_FUNC("OUT");
}
+
/* Trigger a NMI as part of taking controller offline procedure */
-void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
+void
+pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(TRIGGER_NMI_SIS));
DBG_FUNC("OUT\n");
}
/* Switch the adapter back to SIS mode during uninitialization */
-int pqisrc_reenable_sis(pqisrc_softstate_t *softs)
+int
+pqisrc_reenable_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_ENABLE_TIMEOUT;
DBG_FUNC("IN\n");
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
@@ -126,13 +132,14 @@
DBG_WARN(" [ %s ] failed to re enable sis\n",__func__);
ret = PQI_STATUS_TIMEOUT;
}
-
+
DBG_FUNC("OUT\n");
return ret;
}
/* Validate the FW status PQI_CTRL_KERNEL_UP_AND_RUNNING */
-int pqisrc_check_fw_status(pqisrc_softstate_t *softs)
+int
+pqisrc_check_fw_status(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_STATUS_OK_TIMEOUT;
@@ -152,8 +159,8 @@
}
/* Function used to submit a SIS command to the adapter */
-static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs,
- uint32_t *mb)
+static int
+pqisrc_send_sis_cmd(pqisrc_softstate_t *softs, uint32_t *mb)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
@@ -166,14 +173,15 @@
/* Copy Command to mailbox */
for (i = 0; i < 6; i++)
- PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
LEGACY_SIS_SRCV_MAILBOX+i*4, LE_32(mb[i]));
-
- PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
+
+ /* TODO : Switch to INTX Mode ?*/
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
LEGACY_SIS_ODBR_R, LE_32(0x1000));
/* Submit the command */
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(SIS_CMD_SUBMIT));
#ifdef SIS_POLL_WAIT
@@ -215,7 +223,8 @@
}
/* First SIS command for the adapter to check PQI support */
-int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
+int
+pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
uint32_t *prop, uint32_t *ext_prop)
{
int ret = PQI_STATUS_SUCCESS;
@@ -237,7 +246,8 @@
}
/* Second SIS command to the adapter GET_COMM_PREFERRED_SETTINGS */
-int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
+int
+pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
@@ -261,7 +271,8 @@
}
/* Get supported PQI capabilities from the adapter */
-int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
+int
+pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
@@ -277,6 +288,8 @@
softs->pqi_cap.conf_tab_off = mb[4];
softs->pqi_cap.conf_tab_sz = mb[5];
+ os_update_dma_attributes(softs);
+
DBG_INIT("max_sg_elem = %x\n",
softs->pqi_cap.max_sg_elem);
DBG_INIT("max_transfer_size = %x\n",
@@ -290,7 +303,8 @@
}
/* Send INIT STRUCT BASE ADDR - one of the SIS command */
-int pqisrc_init_struct_base(pqisrc_softstate_t *softs)
+int
+pqisrc_init_struct_base(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t elem_size = 0;
@@ -370,7 +384,8 @@
* - GET_PQI_CAPABILITIES
* - INIT_STRUCT_BASE ADDR
*/
-int pqisrc_sis_init(pqisrc_softstate_t *softs)
+int
+pqisrc_sis_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t prop = 0;
@@ -422,34 +437,51 @@
goto err_out;
}
+ /* We need to allocate DMA memory here ,
+ * Do any os specific DMA setup.
+ */
+ ret = os_dma_setup(softs);
+ if (ret) {
+ DBG_ERR("Failed to Setup DMA\n");
+ goto err_out;
+ }
+
/* Init struct base addr */
ret = pqisrc_init_struct_base(softs);
if (ret) {
DBG_ERR("Failed to set init struct base addr\n");
- goto err_out;
+ goto err_dma;
}
+
DBG_FUNC("OUT\n");
return ret;
+err_dma:
+ os_dma_destroy(softs);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/* Deallocate the resources used during SIS initialization */
-void pqisrc_sis_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_sis_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
os_dma_mem_free(softs, &softs->err_buf_dma_mem);
+
+ os_dma_destroy(softs);
os_resource_free(softs);
pqi_reset(softs);
+
DBG_FUNC("OUT\n");
}
-int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
+int
+pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
{
int rcode = PQI_STATUS_SUCCESS;
uint32_t db_reg;
diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h
--- a/sys/dev/smartpqi/smartpqi_structures.h
+++ b/sys/dev/smartpqi/smartpqi_structures.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,9 +28,6 @@
#ifndef _PQI_STRUCTURES_H
#define _PQI_STRUCTURES_H
-
-
-
struct bmic_host_wellness_driver_version {
uint8_t start_tag[4];
uint8_t driver_version_tag[2];
@@ -41,7 +37,6 @@
}OS_ATTRIBUTE_PACKED;
-
struct bmic_host_wellness_time {
uint8_t start_tag[4];
uint8_t time_tag[2];
@@ -59,14 +54,13 @@
}OS_ATTRIBUTE_PACKED;
-
/* As per PQI Spec pqi-2r00a , 6.2.2. */
/* device capability register , for admin q table 24 */
struct pqi_dev_adminq_cap {
uint8_t max_admin_ibq_elem;
uint8_t max_admin_obq_elem;
- uint8_t admin_ibq_elem_len;
+ uint8_t admin_ibq_elem_len;
uint8_t admin_obq_elem_len;
uint16_t max_pqi_dev_reset_tmo;
uint8_t res[2];
@@ -96,7 +90,7 @@
uint64_t admin_obq_elem_array_addr;
uint64_t admin_ibq_ci_addr;
uint64_t admin_obq_pi_addr;
- uint32_t admin_q_param;
+ uint32_t admin_q_param;
uint8_t res3[4];
uint32_t pqi_dev_err;
uint8_t res4[4];
@@ -110,17 +104,17 @@
* IOA controller registers
* Mapped in PCIe BAR 0.
*/
-
+
struct ioa_registers {
- uint8_t res1[0x18];
+ uint8_t res1[0x18];
uint32_t host_to_ioa_db_mask_clr; /* 18h */
- uint8_t res2[4];
+ uint8_t res2[4];
uint32_t host_to_ioa_db; /* 20h */
uint8_t res3[4];
uint32_t host_to_ioa_db_clr; /* 28h */
uint8_t res4[8];
uint32_t ioa_to_host_glob_int_mask; /* 34h */
- uint8_t res5[0x64];
+ uint8_t res5[0x64];
uint32_t ioa_to_host_db; /* 9Ch */
uint32_t ioa_to_host_db_clr; /* A0h */
uint8_t res6[4];
@@ -191,10 +185,10 @@
uint32_t all_bits;
}pqi_reset_reg_t;
-/* Memory descriptor for DMA memory allocation */
+/* Memory descriptor for DMA memory allocation */
typedef struct dma_mem {
void *virt_addr;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr;
uint32_t size;
uint32_t align;
char *tag;
@@ -228,9 +222,10 @@
};
/* lock-free stack used to push and pop the tag used for IO request */
typedef struct lockless_stack {
- uint32_t *next_index_array;
- uint32_t num_elements;
- volatile union head_list head OS_ATTRIBUTE_ALIGNED(8);
+ uint32_t *next_index_array;
+ uint32_t max_elem;/*No.of total elements*/
+ uint32_t num_elem;/*No.of present elements*/
+ volatile union head_list head OS_ATTRIBUTE_ALIGNED(8);
}lockless_stack_t;
#endif /* LOCKFREE_STACK */
@@ -254,12 +249,12 @@
uint64_t addr; /* !< Bytes 0-7. The starting 64-bit memory byte address of the data block. */
uint32_t length; /* !< Bytes 8-11. The length in bytes of the data block. Set to 0x00000000 specifies that no data be transferred. */
uint8_t res[3]; /* !< Bytes 12-14. */
- uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */
- uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */
+ uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */
+ uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */
} sg_desc_t;
/* PQI IUs */
-typedef struct iu_header
+typedef struct iu_header
{
uint8_t iu_type;
uint8_t comp_feature;
@@ -278,7 +273,7 @@
struct {
uint8_t res2[33]; /* !< Bytes 11-43. function specific */
uint32_t buf_size; /* !< Bytes 44-47. size in bytes of the Data-In/Out Buffer */
- sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */
+ sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */
} OS_ATTRIBUTE_PACKED general_func;
struct {
@@ -357,15 +352,15 @@
typedef struct pqi_event_config_request {
iu_header_t header;
- uint16_t response_queue_id; /* specifies the OQ where the response
- IU is to be delivered */
- uint8_t work_area[2]; /* reserved for driver use */
- uint16_t request_id;
+ uint16_t response_queue_id; /* specifies the OQ where the response
+ IU is to be delivered */
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
union {
- uint16_t reserved; /* Report event config iu */
+ uint16_t reserved; /* Report event config iu */
uint16_t global_event_oq_id; /* Set event config iu */
}iu_specific;
- uint32_t buffer_length;
+ uint32_t buffer_length;
sg_desc_t sg_desc;
}pqi_event_config_request_t;
#if 0
@@ -380,9 +375,9 @@
sg_desc_t sg_desc;
}pqi_set_event_config_request_t;
#endif
-
+
/* Report/Set event config data-in/data-out buffer structure */
-
+
#define PQI_MAX_EVENT_DESCRIPTORS 255
struct pqi_event_descriptor {
@@ -390,7 +385,7 @@
uint8_t reserved;
uint16_t oq_id;
};
-
+
typedef struct pqi_event_config {
uint8_t reserved[2];
uint8_t num_event_descriptors;
@@ -410,7 +405,7 @@
}pqi_management_response_t;
/*Event response IU*/
typedef struct pqi_event_response {
- iu_header_t header;
+ iu_header_t header;
uint16_t reserved1;
uint8_t work_area[2];
uint8_t event_type;
@@ -423,7 +418,7 @@
/*event acknowledge IU*/
typedef struct pqi_event_acknowledge_request {
- iu_header_t header;
+ iu_header_t header;
uint16_t reserved1;
uint8_t work_area[2];
uint8_t event_type;
@@ -439,22 +434,114 @@
uint32_t additional_event_id;
};
+typedef struct pqi_vendor_general_request {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t request_id;
+ uint16_t function_code;
+ union {
+ struct {
+ uint16_t first_section;
+ uint16_t last_section;
+ uint8_t reserved1[48];
+ } OS_ATTRIBUTE_PACKED config_table_update;
+
+ struct {
+ uint64_t buffer_address;
+ uint32_t buffer_length;
+ uint8_t reserved2[40];
+ } OS_ATTRIBUTE_PACKED ofa_memory_allocation;
+ } data;
+} OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
+
+typedef struct pqi_vendor_general_response {
+ iu_header_t header;
+ uint16_t reserved1;
+ uint8_t work_area[2];
+ uint16_t request_id;
+ uint16_t function_code;
+ uint16_t status;
+ uint8_t reserved2[2];
+} OS_ATTRIBUTE_PACKED pqi_vendor_general_response_t;
typedef struct op_q_params
{
- uint8_t fn_code;
- uint16_t qid;
+ uint8_t fn_code;
+ uint16_t qid;
uint16_t num_elem;
uint16_t elem_len;
uint16_t int_msg_num;
-
+
} OS_ATTRIBUTE_PACKED op_q_params;
+/* "Fixed Format Sense Data" (0x70 or 0x71) (Table 45 in SPC5) */
+typedef struct sense_data_fixed {
+ uint8_t response_code : 7; // Byte 0, 0x70 or 0x71
+ uint8_t valid : 1; // Byte 0, bit 7
+ uint8_t byte_1; // Byte 1
+ uint8_t sense_key : 4; // Byte 2, bit 0-3 (Key)
+ uint8_t byte_2_other : 4; // Byte 2, bit 4-7
+ uint32_t information; // Byte 3-6, big-endian like block # in CDB
+ uint8_t addtnl_length; // Byte 7
+ uint8_t cmd_specific[4]; // Byte 8-11
+ uint8_t sense_code; // Byte 12 (ASC)
+ uint8_t sense_qual; // Byte 13 (ASCQ)
+ uint8_t fru_code; // Byte 14
+ uint8_t sense_key_specific[3]; // Byte 15-17
+ uint8_t addtnl_sense[1]; // Byte 18+
+} OS_ATTRIBUTE_PACKED sense_data_fixed_t;
+
+
+/* Generic Sense Data Descriptor (Table 29 in SPC5) */
+typedef struct descriptor_entry
+{
+ uint8_t desc_type; // Byte 9/0
+ uint8_t desc_type_length; // Byte 10/1
+ union
+ {
+ /* Sense data descriptor specific */
+ uint8_t bytes[1];
+
+ /* Information (Type 0) (Table 31 is SPC5) */
+ struct {
+ uint8_t byte_2_rsvd : 7; // Byte 11/2
+ uint8_t valid : 1; // Byte 11/2, bit 7
+ uint8_t byte_3; // Byte 12/3
+ uint8_t information[8]; // Byte 13-20/4-11
+ } OS_ATTRIBUTE_PACKED type_0;
+
+ }u;
+} OS_ATTRIBUTE_PACKED descriptor_entry_t;
+
+/* "Descriptor Format Sense Data" (0x72 or 0x73) (Table 28 in SPC5) */
+typedef struct sense_data_descriptor {
+ uint8_t response_code : 7; // Byte 0, 0x72 or 0x73
+ uint8_t byte_0_rsvd: 1; // Byte 0, bit 7
+ uint8_t sense_key : 4; // Byte 1, bit 0-3 (Key)
+ uint8_t byte_1_other : 4; // Byte 1, bit 4-7
+ uint8_t sense_code; // Byte 2 (ASC)
+ uint8_t sense_qual; // Byte 3 (ASCQ)
+ uint8_t byte4_6[3]; // Byte 4-6
+ uint8_t more_length; // Byte 7
+ descriptor_entry_t descriptor_list; // Bytes 8+
+
+} OS_ATTRIBUTE_PACKED sense_data_descriptor_t;
+
+typedef union sense_data_u
+{
+ sense_data_fixed_t fixed_format;
+ sense_data_descriptor_t descriptor_format;
+ uint8_t data[256];
+} sense_data_u_t;
+
+
+
-/* Driver will use this structure to interpret the error
+/* Driver will use this structure to interpret the error
info element returned from a failed requests */
typedef struct raid_path_error_info_elem {
- uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
+ uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
uint8_t reserved[3]; /* !< Bytes 2-4. */
uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */
@@ -463,29 +550,33 @@
uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
- uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */
+ union
+ {
+ sense_data_u_t sense_data;
+ uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */
+ };
}OS_ATTRIBUTE_PACKED raid_path_error_info_elem_t;
#define PQI_ERROR_BUFFER_ELEMENT_LENGTH sizeof(raid_path_error_info_elem_t)
-typedef enum error_data_present
+typedef enum error_data_present
{
- DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
+ DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
} error_data_present_t;
-typedef struct aio_path_error_info_elem
+typedef struct aio_path_error_info_elem
{
uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */
- uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
+ uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
- uint8_t reserved1; /* !< Byte 3. Reserved. */
+ uint8_t reserved1; /* !< Byte 3. Reserved. */
uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
- uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
+ uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
uint16_t reserved2; /* !< Bytes 10. Reserved. */
uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */
- uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
+ uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
}OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t;
struct init_base_struct {
@@ -495,7 +586,7 @@
uint32_t err_buf_paddr_h; /* upper 32 bits of physical address of error buffer */
uint32_t err_buf_elem_len; /* length of each element in error buffer (in bytes) */
uint32_t err_buf_num_elem; /* number of elements in error buffer */
-}OS_ATTRIBUTE_PACKED;
+}OS_ATTRIBUTE_PACKED;
/* Queue details */
typedef struct ib_queue {
@@ -611,40 +702,42 @@
typedef struct pqisrc_raid_request {
- iu_header_t header;
- uint16_t response_queue_id; /* specifies the OQ where the response
+ iu_header_t header;
+ uint16_t response_queue_id; /* specifies the OQ where the response
IU is to be delivered */
- uint8_t work_area[2]; /* reserved for driver use */
- uint16_t request_id;
- uint16_t nexus_id;
- uint32_t buffer_length;
- uint8_t lun_number[8];
- uint16_t protocol_spec;
- uint8_t data_direction : 2;
- uint8_t partial : 1;
- uint8_t reserved1 : 4;
- uint8_t fence : 1;
- uint16_t error_index;
- uint8_t reserved2;
- uint8_t task_attribute : 3;
- uint8_t command_priority : 4;
- uint8_t reserved3 : 1;
- uint8_t reserved4 : 2;
- uint8_t additional_cdb_bytes_usage : 3;
- uint8_t reserved5 : 3;
- uint8_t cdb[16];
- uint8_t additional_cdb_bytes[16];
- sgt_t sg_descriptors[4];
-}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
-
-
-typedef struct pqi_tmf_req {
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
+ uint16_t nexus_id;
+ uint32_t buffer_length;
+ uint8_t lun_number[8];
+ uint16_t protocol_spec;
+ uint8_t data_direction : 2;
+ uint8_t partial : 1;
+ uint8_t reserved1 : 4;
+ uint8_t fence : 1;
+ uint16_t error_index;
+ uint8_t reserved2;
+ uint8_t task_attribute : 3;
+ uint8_t command_priority : 4;
+ uint8_t reserved3 : 1;
+ uint8_t reserved4 : 2;
+ uint8_t additional_cdb_bytes_usage : 3;
+ uint8_t reserved5 : 3;
+ uint8_t cdb[16];
+ uint8_t reserved[12];
+ uint32_t timeout_in_sec;
+ sgt_t sg_descriptors[4];
+} OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
+
+
+typedef struct pqi_raid_tmf_req {
iu_header_t header;
uint16_t resp_qid;
uint8_t work_area[2];
uint16_t req_id;
uint16_t nexus;
- uint8_t res1[4];
+ uint8_t res1[2];
+ uint16_t timeout_in_sec;
uint8_t lun[8];
uint16_t protocol_spec;
uint16_t obq_id_to_manage;
@@ -652,8 +745,22 @@
uint8_t tmf;
uint8_t res2 : 7;
uint8_t fence : 1;
-}OS_ATTRIBUTE_PACKED pqi_tmf_req_t;
-
+} OS_ATTRIBUTE_PACKED pqi_raid_tmf_req_t;
+
+typedef struct pqi_aio_tmf_req {
+ iu_header_t header;
+ uint16_t resp_qid;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t res1;
+ uint32_t nexus;
+ uint8_t lun[8];
+ uint32_t req_id_to_manage;
+ uint8_t tmf;
+ uint8_t res2 : 7;
+ uint8_t fence : 1;
+ uint16_t error_idx;
+}OS_ATTRIBUTE_PACKED pqi_aio_tmf_req_t;
typedef struct pqi_tmf_resp {
iu_header_t header;
@@ -669,7 +776,7 @@
struct pqi_io_response {
iu_header_t header;
uint16_t queue_id;
- uint8_t work_area[2];
+ uint8_t work_area[2];
uint16_t request_id;
uint16_t error_index;
uint8_t reserved[4];
@@ -685,100 +792,156 @@
typedef struct pqi_scsi_device {
device_type_t devtype; /* as reported by INQUIRY commmand */
- uint8_t device_type; /* as reported by
+ uint8_t device_type; /* as reported by
BMIC_IDENTIFY_PHYSICAL_DEVICE - only
valid for devtype = TYPE_DISK */
- int bus;
- int target;
- int lun;
- uint8_t flags;
- uint8_t scsi3addr[8];
+ int bus;
+ int target;
+ int lun;
+ uint8_t flags;
+ uint8_t scsi3addr[8];
uint64_t wwid;
- uint8_t is_physical_device : 1;
- uint8_t is_external_raid_device : 1;
- uint8_t target_lun_valid : 1;
- uint8_t expose_device : 1;
- uint8_t no_uld_attach : 1;
- uint8_t is_obdr_device : 1;
- uint8_t aio_enabled : 1;
- uint8_t device_gone : 1;
- uint8_t new_device : 1;
- uint8_t volume_offline : 1;
- uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
- uint8_t model[16]; /* bytes 16-31 of inquiry data */
+ uint8_t is_physical_device : 1;
+ uint8_t is_external_raid_device : 1;
+ uint8_t target_lun_valid : 1;
+ uint8_t expose_device : 1;
+ uint8_t no_uld_attach : 1;
+ uint8_t is_obdr_device : 1;
+ uint8_t aio_enabled : 1;
+ uint8_t device_gone : 1;
+ uint8_t new_device : 1;
+ uint8_t volume_offline : 1;
+ uint8_t scsi_rescan : 1;
+ uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
+ uint8_t model[16]; /* bytes 16-31 of inquiry data */
uint64_t sas_address;
- uint8_t raid_level;
+ uint8_t raid_level;
uint16_t queue_depth; /* max. queue_depth for this device */
uint16_t advertised_queue_depth;
uint32_t ioaccel_handle;
- uint8_t volume_status;
- uint8_t active_path_index;
- uint8_t path_map;
- uint8_t bay;
- uint8_t box[8];
+ uint8_t volume_status;
+ uint8_t active_path_index;
+ uint8_t path_map;
+ uint8_t bay;
+ uint8_t box[8];
uint16_t phys_connector[8];
- int offload_config; /* I/O accel RAID offload configured */
- int offload_enabled; /* I/O accel RAID offload enabled */
- int offload_enabled_pending;
- int offload_to_mirror; /* Send next I/O accelerator RAID
- offload request to mirror drive. */
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_enabled_pending;
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ offload request to mirror drive. */
struct raid_map *raid_map; /* I/O accelerator RAID map */
- int reset_in_progress;
+
+ int reset_in_progress;
+ int logical_unit_number;
os_dev_info_t *dip; /*os specific scsi device information*/
- boolean_t invalid;
+ boolean_t invalid;
+ boolean_t path_destroyed;
+ boolean_t firmware_queue_depth_set;
+ OS_ATOMIC64_T active_requests;
}pqi_scsi_dev_t;
+typedef struct pqisrc_softstate pqisrc_softstate_t;
+typedef struct pqi_firmware_feature pqi_firmware_feature_t;
+typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
+ pqi_firmware_feature_t *firmware_feature);
+
+struct pqi_firmware_feature {
+ char *feature_name;
+ unsigned int feature_bit;
+ boolean_t supported;
+ boolean_t enabled;
+ feature_status_fn feature_status;
+};
+
+struct pqi_conf_table_firmware_features {
+ struct pqi_conf_table_section_header header;
+ uint16_t num_elements;
+ uint8_t features_supported[];
+};
+
+struct pqi_conf_table_section_info {
+ struct pqisrc_softstate *softs;
+ void *section;
+ uint32_t section_offset;
+ void *section_addr;
+};
struct sense_header_scsi { /* See SPC-3 section 4.5 */
- uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
- uint8_t sense_key;
- uint8_t asc;
- uint8_t ascq;
- uint8_t byte4;
- uint8_t byte5;
- uint8_t byte6;
- uint8_t additional_length; /* always 0 for fixed sense format */
+ uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+ uint8_t byte4;
+ uint8_t byte5;
+ uint8_t byte6;
+ uint8_t additional_length; /* always 0 for fixed sense format */
}OS_ATTRIBUTE_PACKED;
typedef struct report_lun_header {
- uint32_t list_length;
- uint8_t extended_response;
- uint8_t reserved[3];
+ uint32_t list_length;
+ uint8_t extended_response;
+ uint8_t reserved[3];
}OS_ATTRIBUTE_PACKED reportlun_header_t;
typedef struct report_lun_ext_entry {
- uint8_t lunid[8];
- uint64_t wwid;
- uint8_t device_type;
- uint8_t device_flags;
- uint8_t lun_count; /* number of LUNs in a multi-LUN device */
- uint8_t redundant_paths;
- uint32_t ioaccel_handle;
+ uint8_t lunid[8];
+ uint64_t wwid;
+ uint8_t device_type;
+ uint8_t device_flags;
+ uint8_t lun_count; /* number of LUNs in a multi-LUN device */
+ uint8_t redundant_paths;
+ uint32_t ioaccel_handle;
}OS_ATTRIBUTE_PACKED reportlun_ext_entry_t;
typedef struct report_lun_data_ext {
- reportlun_header_t header;
- reportlun_ext_entry_t lun_entries[1];
+ reportlun_header_t header;
+ reportlun_ext_entry_t lun_entries[1];
}OS_ATTRIBUTE_PACKED reportlun_data_ext_t;
+typedef struct reportlun_queue_depth_entry {
+ uint8_t logical_unit_num;
+ uint8_t reserved_1:6;
+ uint8_t address:2;
+ uint8_t box_bus_num;
+ uint8_t reserved_2:6;
+ uint8_t mode:2;
+ uint8_t bus_ident;
+
+ /* Byte 5 */
+ uint8_t queue_depth:7;
+ uint8_t multiplier:1;
+
+ /* Byte 6 */
+ uint8_t drive_type_mix_flags;
+ uint8_t level_2_bus:6;
+ uint8_t level_2_mode:2;
+ uint8_t unused_bytes[16];
+}OS_ATTRIBUTE_PACKED reportlun_queue_depth_entry_t;
+
+typedef struct reportlun_queue_depth_data {
+ reportlun_header_t header;
+ reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
+}OS_ATTRIBUTE_PACKED reportlun_queue_depth_data_t;
+
typedef struct raidmap_data {
- uint32_t ioaccel_handle;
- uint8_t xor_mult[2];
- uint8_t reserved[2];
+ uint32_t ioaccel_handle;
+ uint8_t xor_mult[2];
+ uint8_t reserved[2];
}OS_ATTRIBUTE_PACKED raidmap_data_t;
typedef struct raid_map {
uint32_t structure_size; /* size of entire structure in bytes */
uint32_t volume_blk_size; /* bytes / block in the volume */
uint64_t volume_blk_cnt; /* logical blocks on the volume */
- uint8_t phys_blk_shift; /* shift factor to convert between
+ uint8_t phys_blk_shift; /* shift factor to convert between
units of logical blocks and physical
disk blocks */
- uint8_t parity_rotation_shift; /* shift factor to convert between units
+ uint8_t parity_rotation_shift; /* shift factor to convert between units
of logical stripes and physical
stripes */
uint16_t strip_size; /* blocks used on each disk / stripe */
@@ -792,8 +955,8 @@
group) */
uint16_t flags;
uint16_t data_encryption_key_index;
- uint8_t reserved[16];
- raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
+ uint8_t reserved[16];
+ raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
}OS_ATTRIBUTE_PACKED pqisrc_raid_map_t;
@@ -813,79 +976,79 @@
}OS_ATTRIBUTE_PACKED bmic_ident_ctrl_t;
typedef struct bmic_identify_physical_device {
- uint8_t scsi_bus; /* SCSI Bus number on controller */
- uint8_t scsi_id; /* SCSI ID on this bus */
+ uint8_t scsi_bus; /* SCSI Bus number on controller */
+ uint8_t scsi_id; /* SCSI ID on this bus */
uint16_t block_size; /* sector size in bytes */
uint32_t total_blocks; /* number for sectors on drive */
uint32_t reserved_blocks; /* controller reserved (RIS) */
- uint8_t model[40]; /* Physical Drive Model */
- uint8_t serial_number[40]; /* Drive Serial Number */
- uint8_t firmware_revision[8]; /* drive firmware revision */
- uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */
- uint8_t compaq_drive_stamp; /* 0 means drive not stamped */
- uint8_t last_failure_reason;
- uint8_t flags;
- uint8_t more_flags;
- uint8_t scsi_lun; /* SCSI LUN for phys drive */
- uint8_t yet_more_flags;
- uint8_t even_more_flags;
+ uint8_t model[40]; /* Physical Drive Model */
+ uint8_t serial_number[40]; /* Drive Serial Number */
+ uint8_t firmware_revision[8]; /* drive firmware revision */
+ uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */
+ uint8_t compaq_drive_stamp; /* 0 means drive not stamped */
+ uint8_t last_failure_reason;
+ uint8_t flags;
+ uint8_t more_flags;
+ uint8_t scsi_lun; /* SCSI LUN for phys drive */
+ uint8_t yet_more_flags;
+ uint8_t even_more_flags;
uint32_t spi_speed_rules;
- uint8_t phys_connector[2]; /* connector number on controller */
- uint8_t phys_box_on_bus; /* phys enclosure this drive resides */
- uint8_t phys_bay_in_box; /* phys drv bay this drive resides */
+ uint8_t phys_connector[2]; /* connector number on controller */
+ uint8_t phys_box_on_bus; /* phys enclosure this drive resides */
+ uint8_t phys_bay_in_box; /* phys drv bay this drive resides */
uint32_t rpm; /* drive rotational speed in RPM */
- uint8_t device_type; /* type of drive */
- uint8_t sata_version; /* only valid when device_type =
+ uint8_t device_type; /* type of drive */
+ uint8_t sata_version; /* only valid when device_type =
BMIC_DEVICE_TYPE_SATA */
uint64_t big_total_block_count;
uint64_t ris_starting_lba;
uint32_t ris_size;
- uint8_t wwid[20];
- uint8_t controller_phy_map[32];
+ uint8_t wwid[20];
+ uint8_t controller_phy_map[32];
uint16_t phy_count;
- uint8_t phy_connected_dev_type[256];
- uint8_t phy_to_drive_bay_num[256];
+ uint8_t phy_connected_dev_type[256];
+ uint8_t phy_to_drive_bay_num[256];
uint16_t phy_to_attached_dev_index[256];
- uint8_t box_index;
- uint8_t reserved;
+ uint8_t box_index;
+ uint8_t reserved;
uint16_t extra_physical_drive_flags;
- uint8_t negotiated_link_rate[256];
- uint8_t phy_to_phy_map[256];
- uint8_t redundant_path_present_map;
- uint8_t redundant_path_failure_map;
- uint8_t active_path_number;
+ uint8_t negotiated_link_rate[256];
+ uint8_t phy_to_phy_map[256];
+ uint8_t redundant_path_present_map;
+ uint8_t redundant_path_failure_map;
+ uint8_t active_path_number;
uint16_t alternate_paths_phys_connector[8];
- uint8_t alternate_paths_phys_box_on_port[8];
- uint8_t multi_lun_device_lun_count;
- uint8_t minimum_good_fw_revision[8];
- uint8_t unique_inquiry_bytes[20];
- uint8_t current_temperature_degreesC;
- uint8_t temperature_threshold_degreesC;
- uint8_t max_temperature_degreesC;
- uint8_t logical_blocks_per_phys_block_exp;
+ uint8_t alternate_paths_phys_box_on_port[8];
+ uint8_t multi_lun_device_lun_count;
+ uint8_t minimum_good_fw_revision[8];
+ uint8_t unique_inquiry_bytes[20];
+ uint8_t current_temperature_degreesC;
+ uint8_t temperature_threshold_degreesC;
+ uint8_t max_temperature_degreesC;
+ uint8_t logical_blocks_per_phys_block_exp;
uint16_t current_queue_depth_limit;
- uint8_t switch_name[10];
+ uint8_t switch_name[10];
uint16_t switch_port;
- uint8_t alternate_paths_switch_name[40];
- uint8_t alternate_paths_switch_port[8];
+ uint8_t alternate_paths_switch_name[40];
+ uint8_t alternate_paths_switch_port[8];
uint16_t power_on_hours;
uint16_t percent_endurance_used;
- uint8_t drive_authentication;
- uint8_t smart_carrier_authentication;
- uint8_t smart_carrier_app_fw_version;
- uint8_t smart_carrier_bootloader_fw_version;
- uint8_t encryption_key_name[64];
+ uint8_t drive_authentication;
+ uint8_t smart_carrier_authentication;
+ uint8_t smart_carrier_app_fw_version;
+ uint8_t smart_carrier_bootloader_fw_version;
+ uint8_t encryption_key_name[64];
uint32_t misc_drive_flags;
uint16_t dek_index;
uint8_t padding[112];
}OS_ATTRIBUTE_PACKED bmic_ident_physdev_t;
typedef struct pqisrc_bmic_flush_cache {
- uint8_t disable_cache;
- uint8_t power_action;
- uint8_t ndu_flush_cache;
- uint8_t halt_event;
- uint8_t reserved[28];
+ uint8_t disable_cache;
+ uint8_t power_action;
+ uint8_t ndu_flush_cache;
+ uint8_t halt_event;
+ uint8_t reserved[28];
} OS_ATTRIBUTE_PACKED pqisrc_bmic_flush_cache_t;
/* for halt_event member of pqisrc_bmic_flush_cache_t */
@@ -897,7 +1060,6 @@
PQISRC_RESTART = 4
};
-struct pqisrc_softstate;
struct request_container_block;
typedef void (*success_callback)(struct pqisrc_softstate *, struct request_container_block *);
typedef void (*error_callback)(struct pqisrc_softstate *, struct request_container_block *, uint16_t);
@@ -921,14 +1083,20 @@
uint32_t ioaccel_handle;
boolean_t encrypt_enable;
struct pqi_enc_info enc_info;
+ ib_queue_t *req_q;
+ int path;
+ int resp_qid;
+ boolean_t req_pending;
+ boolean_t timedout;
+ int tm_req;
+ int aio_retry;
int cm_flags;
void *cm_data; /* pointer to data in kernel space */
bus_dmamap_t cm_datamap;
uint32_t nseg;
union ccb *cm_ccb;
sgt_t *sgt; /* sg table */
- int resp_qid;
- boolean_t req_pending;
+
}rcb_t;
typedef struct tid_pool {
@@ -936,12 +1104,12 @@
int index;
}tid_pool_t;
-typedef struct pqisrc_softstate {
- OS_SPECIFIC_T os_specific;
- struct ioa_registers *ioa_reg;
- struct pqi_registers *pqi_reg;
- char *pci_mem_base_vaddr;
- PCI_ACC_HANDLE_T pci_mem_handle;
+struct pqisrc_softstate {
+ OS_SPECIFIC_T os_specific;
+ struct ioa_registers *ioa_reg;
+ struct pqi_registers *pqi_reg;
+ uint8_t *pci_mem_base_vaddr;
+ PCI_ACC_HANDLE_T pci_mem_handle;
struct pqi_cap pqi_cap;
struct pqi_pref_settings pref_settings;
char fw_version[11];
@@ -951,7 +1119,7 @@
uint16_t subvendid; /* sub vendor id */
uint16_t devid; /* device id */
uint16_t subsysid; /* sub system id */
- controller_state_t ctlr_state;
+ controller_state_t ctlr_state;
struct dma_mem err_buf_dma_mem;
struct dma_mem admin_queue_dma_mem;
struct dma_mem op_ibq_dma_mem;
@@ -979,7 +1147,7 @@
unsigned max_sg_per_iu;
uint8_t ib_spanning_supported : 1;
uint8_t ob_spanning_supported : 1;
- pqi_event_config_t event_config;
+ pqi_event_config_t event_config;
struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
int intr_type;
int intr_count;
@@ -991,19 +1159,18 @@
#else
lockless_stack_t taglist;
#endif /* LOCKFREE_STACK */
- boolean_t devlist_lockcreated;
+ boolean_t devlist_lockcreated;
OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8);
char devlist_lock_name[LOCKNAME_SIZE];
pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN];
OS_SEMA_LOCK_T scan_lock;
uint8_t lun_count[PQI_MAX_DEVICES];
- uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
- OS_ATOMIC64_T num_intrs;
- uint64_t prev_num_intrs;
+ uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
uint64_t prev_heartbeat_count;
uint64_t *heartbeat_counter_abs_addr;
uint64_t heartbeat_counter_off;
- uint64_t num_heartbeats_requested;
+ uint8_t *fw_features_section_abs_addr;
+ uint64_t fw_features_section_off;
uint32_t bus_id;
uint32_t device_id;
uint32_t func_id;
@@ -1011,7 +1178,21 @@
boolean_t ctrl_online;
uint8_t pqi_reset_quiesce_allowed : 1;
boolean_t ctrl_in_pqi_mode;
- tid_pool_t tid_pool;
-}pqisrc_softstate_t;
+ tid_pool_t tid_pool;
+ uint32_t adapterQDepth;
+ uint32_t dma_mem_consumed;
+ boolean_t timeout_in_passthrough;
+ boolean_t timeout_in_tmf;
+};
+
+typedef struct vpd_logical_volume_status {
+ uint8_t peripheral_info;
+ uint8_t page_code;
+ uint8_t reserved;
+ uint8_t page_length;
+ uint8_t volume_status;
+ uint8_t reserved2[3];
+ uint32_t flags;
+}vpd_volume_status;
#endif
diff --git a/sys/dev/smartpqi/smartpqi_tag.c b/sys/dev/smartpqi/smartpqi_tag.c
--- a/sys/dev/smartpqi/smartpqi_tag.c
+++ b/sys/dev/smartpqi/smartpqi_tag.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,11 +32,12 @@
/*
* Function used to release the tag from taglist.
*/
-void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
+void
+pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
{
OS_ACQUIRE_SPINLOCK(&(taglist->lock));
- /*DBG_FUNC("IN\n");*/
+ DBG_FUNC("IN\n");
ASSERT(taglist->num_elem < taglist->max_elem);
@@ -49,17 +49,18 @@
OS_RELEASE_SPINLOCK(&taglist->lock);
- /*DBG_FUNC("OUT\n");*/
+ DBG_FUNC("OUT\n");
}
/*
* Function used to get an unoccupied tag from the tag list.
*/
-uint32_t pqisrc_get_tag(pqi_taglist_t *taglist)
+uint32_t
+pqisrc_get_tag(pqi_taglist_t *taglist)
{
uint32_t elem = INVALID_ELEM;
- /*DBG_FUNC("IN\n");*/
+/* DBG_FUNC("IN\n");*/
OS_ACQUIRE_SPINLOCK(&taglist->lock);
@@ -73,19 +74,20 @@
OS_RELEASE_SPINLOCK(&taglist->lock);
- /*DBG_FUNC("OUT got %d\n", elem);*/
+/* DBG_FUNC("OUT got %d\n", elem);*/
return elem;
}
/*
* Initialize circular queue implementation of tag list.
*/
-int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
+int
+pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
-
+
DBG_FUNC("IN\n");
taglist->max_elem = max_elem;
@@ -93,34 +95,34 @@
taglist->head = 0;
taglist->tail = 0;
taglist->elem_array = os_mem_alloc(softs,
- (max_elem * sizeof(uint32_t)));
+ (max_elem * sizeof(uint32_t)));
if (!(taglist->elem_array)) {
DBG_FUNC("Unable to allocate memory for taglist\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
-
- os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
- ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
- if(ret){
- DBG_ERR("tag lock initialization failed\n");
- taglist->lockcreated=false;
- goto err_lock;
+
+ os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
+ ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
+ if(ret){
+ DBG_ERR("tag lock initialization failed\n");
+ taglist->lockcreated=false;
+ goto err_lock;
}
- taglist->lockcreated = true;
-
+ taglist->lockcreated = true;
+
/* indices 1 to max_elem are considered as valid tags */
for (i=1; i <= max_elem; i++) {
softs->rcb[i].tag = INVALID_ELEM;
pqisrc_put_tag(taglist, i);
}
-
+
DBG_FUNC("OUT\n");
return ret;
err_lock:
- os_mem_free(softs, (char *)taglist->elem_array,
- (taglist->max_elem * sizeof(uint32_t)));
+ os_mem_free(softs, (char *)taglist->elem_array,
+ (taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
err_out:
DBG_FUNC("OUT failed\n");
@@ -130,54 +132,56 @@
/*
* Destroy circular queue implementation of tag list.
*/
-void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
+void
+pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
{
DBG_FUNC("IN\n");
- os_mem_free(softs, (char *)taglist->elem_array,
+ os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
-
- if(taglist->lockcreated==true){
- os_uninit_spinlock(&taglist->lock);
- taglist->lockcreated = false;
- }
-
+
+ if(taglist->lockcreated==true){
+ os_uninit_spinlock(&taglist->lock);
+ taglist->lockcreated = false;
+ }
+
DBG_FUNC("OUT\n");
}
-#else /* LOCKFREE_STACK */
+#else /* LOCKFREE_STACK */
/*
* Initialize circular queue implementation of tag list.
*/
-int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
+int
+pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
int index = 0;
-
+
DBG_FUNC("IN\n");
-
+
/* indices 1 to max_elem are considered as valid tags */
- stack->num_elements = max_elem + 1;
- stack->head.data = 0;
+ stack->max_elem = max_elem + 1;
+ stack->head.data = 0;
DBG_INFO("Stack head address :%p\n",&stack->head);
-
+
/*Allocate memory for stack*/
stack->next_index_array = (uint32_t*)os_mem_alloc(softs,
- (stack->num_elements * sizeof(uint32_t)));
+ (stack->max_elem * sizeof(uint32_t)));
if (!(stack->next_index_array)) {
DBG_ERR("Unable to allocate memory for stack\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
- }
+ }
/* push all the entries to the stack */
- for (index = 1; index < stack->num_elements ; index++) {
+ for (index = 1; index < stack->max_elem ; index++) {
softs->rcb[index].tag = INVALID_ELEM;
pqisrc_put_tag(stack, index);
}
-
+
DBG_FUNC("OUT\n");
return ret;
err_out:
@@ -188,39 +192,41 @@
/*
* Destroy circular queue implementation of tag list.
*/
-void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
+void
+pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
{
DBG_FUNC("IN\n");
-
+
/* de-allocate stack memory */
if (stack->next_index_array) {
os_mem_free(softs,(char*)stack->next_index_array,
- (stack->num_elements * sizeof(uint32_t)));
+ (stack->max_elem * sizeof(uint32_t)));
stack->next_index_array = NULL;
}
-
+
DBG_FUNC("OUT\n");
}
/*
* Function used to release the tag from taglist.
*/
-void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
+void
+pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
{
- union head_list cur_head, new_head;
+ union head_list cur_head, new_head;
DBG_FUNC("IN\n");
DBG_INFO("push tag :%d\n",index);
- if ( index >= stack->num_elements ) {
+ if (index >= stack->max_elem) {
ASSERT(false);
- DBG_ERR("Pushed Invalid index\n"); /* stack full */
+ DBG_INFO("Pushed Invalid index\n"); /* stack full */
return;
}
-
- if ( stack->next_index_array[index] != 0) {
+
+ if (stack->next_index_array[index] != 0) {
ASSERT(false);
- DBG_ERR("Index already present as tag in the stack\n");
+ DBG_INFO("Index already present as tag in the stack\n");
return;
}
@@ -232,8 +238,8 @@
new_head.top.index = index;
/* Create a link to the previous index */
stack->next_index_array[index] = cur_head.top.index;
- }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
- != cur_head.data);
+ }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data));
+ stack->num_elem++;
DBG_FUNC("OUT\n");
return;
}
@@ -241,7 +247,8 @@
/*
* Function used to get an unoccupied tag from the tag list.
*/
-uint32_t pqisrc_get_tag(lockless_stack_t *stack)
+uint32_t
+pqisrc_get_tag(lockless_stack_t *stack)
{
union head_list cur_head, new_head;
@@ -254,9 +261,9 @@
new_head.top.seq_no = cur_head.top.seq_no + 1;
/* update the index at the top of the stack with the next index */
new_head.top.index = stack->next_index_array[cur_head.top.index];
- }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
- != cur_head.data);
+ }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data));
stack->next_index_array[cur_head.top.index] = 0;
+ stack->num_elem--;
DBG_INFO("pop tag: %d\n",cur_head.top.index);
DBG_FUNC("OUT\n");

File Metadata

Mime Type
text/plain
Expires
Mon, Apr 28, 6:44 PM (2 h, 43 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17832641
Default Alt Text
D24428.diff (308 KB)

Event Timeline