git: 9fac68fc3853 - main - Newly added features and bug fixes in latest Microchip SmartPQI driver

Warner Losh imp at FreeBSD.org
Fri May 28 22:42:43 UTC 2021


The branch main has been updated by imp:

URL: https://cgit.FreeBSD.org/src/commit/?id=9fac68fc3853b696c8479bb3a8181d62cb9f59c9

commit 9fac68fc3853b696c8479bb3a8181d62cb9f59c9
Author:     PAPANI SRIKANTH <papani.srikanth at microchip.com>
AuthorDate: 2021-05-28 06:17:56 +0000
Commit:     Warner Losh <imp at FreeBSD.org>
CommitDate: 2021-05-28 22:40:23 +0000

    Newly added features and bug fixes in latest Microchip SmartPQI driver
    
    It includes:
    
    1)Newly added TMF feature.
    2)Added newly Huawei & Inspur PCI ID's
    3)Fixed smartpqi driver hangs in Z-Pool while running on FreeBSD12.1
    4)Fixed flooding dmesg in kernel while the controller is offline during in ioctls.
    5)Avoided unnecessary host memory allocation for rcb sg buffers.
    6)Fixed race conditions while accessing internal rcb structure.
    7)Fixed where Logical volumes exposing two different names to the OS it's due to the system memory is overwritten with DMA stale data.
    8)Fixed dynamically unloading a smartpqi driver.
    9)Added device_shutdown callback instead of deprecated shutdown_final kernel event in smartpqi driver.
    10)Fixed where Os is crashed during physical drive hot removal during heavy IO.
    11)Fixed OS crash during controller lockup/offline during heavy IO.
    12)Fixed coverity issues in smartpqi driver
    13)Fixed system crash while creating and deleting logical volume in a continuous loop.
    14)Fixed where the volume size is not exposing to OS when it expands.
    15)Added HC3 pci id's.
    
    Reviewed by:            Scott Benesh (microsemi), Murthy Bhat (microsemi), imp
    Differential Revision:  https://reviews.freebsd.org/D30182
    
    Sponsored by:           Netflix
---
 sys/dev/smartpqi/smartpqi_cam.c        | 586 ++++++++++++++++++-----------
 sys/dev/smartpqi/smartpqi_cmd.c        |  11 +-
 sys/dev/smartpqi/smartpqi_defines.h    | 459 +++++++++++++++-------
 sys/dev/smartpqi/smartpqi_discovery.c  | 668 +++++++++++++++++++++------------
 sys/dev/smartpqi/smartpqi_event.c      |  94 ++---
 sys/dev/smartpqi/smartpqi_helper.c     | 289 +++++++++++---
 sys/dev/smartpqi/smartpqi_includes.h   |   6 +-
 sys/dev/smartpqi/smartpqi_init.c       | 455 ++++++++++++++++------
 sys/dev/smartpqi/smartpqi_intr.c       | 151 ++++----
 sys/dev/smartpqi/smartpqi_ioctl.c      | 122 +++---
 sys/dev/smartpqi/smartpqi_ioctl.h      |  25 +-
 sys/dev/smartpqi/smartpqi_main.c       | 247 +++++++-----
 sys/dev/smartpqi/smartpqi_mem.c        |  74 +++-
 sys/dev/smartpqi/smartpqi_misc.c       |  77 ++--
 sys/dev/smartpqi/smartpqi_prototypes.h |  65 +++-
 sys/dev/smartpqi/smartpqi_queue.c      | 236 +++++++-----
 sys/dev/smartpqi/smartpqi_request.c    | 461 ++++++++++++++++++-----
 sys/dev/smartpqi/smartpqi_response.c   | 171 +++++++--
 sys/dev/smartpqi/smartpqi_sis.c        |  93 +++--
 sys/dev/smartpqi/smartpqi_structures.h | 597 +++++++++++++++++++----------
 sys/dev/smartpqi/smartpqi_tag.c        | 103 ++---
 21 files changed, 3392 insertions(+), 1598 deletions(-)

diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
index c81b5a049da6..3d39e1ffbdd4 100644
--- a/sys/dev/smartpqi/smartpqi_cam.c
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -1,6 +1,5 @@
 /*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -34,11 +33,15 @@
 /*
  * Set cam sim properties of the smartpqi adapter.
  */
-static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
+static void
+update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
 {
 
 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
 					cam_sim_softc(sim);
+
+	device_t dev = softs->os_specific.pqi_dev;
+
 	DBG_FUNC("IN\n");
 
 	cpi->version_num = 1;
@@ -50,9 +53,9 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
 	cpi->max_target = 1088;
 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
 	cpi->initiator_id = 255;
-	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
-	strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
-	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+	strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
+	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 	cpi->unit_number = cam_sim_unit(sim);
 	cpi->bus_id = cam_sim_bus(sim);
 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
@@ -61,6 +64,11 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
 	cpi->transport = XPORT_SPI;
 	cpi->transport_version = 2;
 	cpi->ccb_h.status = CAM_REQ_CMP;
+	cpi->hba_vendor = pci_get_vendor(dev);
+	cpi->hba_device = pci_get_device(dev);
+	cpi->hba_subvendor = pci_get_subvendor(dev);
+	cpi->hba_subdevice = pci_get_subdevice(dev);
+
 
 	DBG_FUNC("OUT\n");
 }
@@ -68,7 +76,8 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
 /*
  * Get transport settings of the smartpqi adapter 
  */
-static void get_transport_settings(struct pqisrc_softstate *softs,
+static void
+get_transport_settings(struct pqisrc_softstate *softs,
 		struct ccb_trans_settings *cts)
 {
 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
@@ -76,7 +85,7 @@ static void get_transport_settings(struct pqisrc_softstate *softs,
 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
 
 	DBG_FUNC("IN\n");
-
+	
 	cts->protocol = PROTO_SCSI;
 	cts->protocol_version = SCSI_REV_SPC4;
 	cts->transport = XPORT_SPI;
@@ -94,8 +103,10 @@ static void get_transport_settings(struct pqisrc_softstate *softs,
 /*
  *  Add the target to CAM layer and rescan, when a new device is found
  */
-void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
-	union ccb			*ccb;
+void
+os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+	union ccb *ccb;
 
 	DBG_FUNC("IN\n");
 
@@ -121,12 +132,13 @@ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
 /*
  * Remove the device from CAM layer when deleted or hot removed
  */
-void os_remove_device(pqisrc_softstate_t *softs,
-        pqi_scsi_dev_t *device) {
+void
+os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
 	struct cam_path *tmppath;
 
 	DBG_FUNC("IN\n");
-
+	
 	if(softs->os_specific.sim_registered) {
 		if (xpt_create_path(&tmppath, NULL, 
 			cam_sim_path(softs->os_specific.sim),
@@ -136,6 +148,7 @@ void os_remove_device(pqisrc_softstate_t *softs,
 		}
 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
 		xpt_free_path(tmppath);
+		softs->device_list[device->target][device->lun] = NULL;
 		pqisrc_free_device(softs, device);
 	}
 
@@ -146,7 +159,8 @@ void os_remove_device(pqisrc_softstate_t *softs,
 /*
  * Function to release the frozen simq
  */
-static void pqi_release_camq( rcb_t *rcb )
+static void
+pqi_release_camq(rcb_t *rcb)
 {
 	pqisrc_softstate_t *softs;
 	struct ccb_scsiio *csio;
@@ -167,18 +181,12 @@ static void pqi_release_camq( rcb_t *rcb )
 	DBG_FUNC("OUT\n");
 }
 
-/*
- * Function to dma-unmap the completed request
- */
-static void pqi_unmap_request(void *arg)
+static void
+pqi_synch_request(rcb_t *rcb)
 {
-	pqisrc_softstate_t *softs;
-	rcb_t *rcb;
-
-	DBG_IO("IN rcb = %p\n", arg);
+	pqisrc_softstate_t *softs = rcb->softs;
 
-	rcb = (rcb_t *)arg;
-	softs = rcb->softs;
+	DBG_IO("IN rcb = %p\n", rcb);
 
 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
 		return;
@@ -199,9 +207,21 @@ static void pqi_unmap_request(void *arg)
 
 	if(rcb->sgt && rcb->nseg)
 		os_mem_free(rcb->softs, (void*)rcb->sgt,
-			rcb->nseg*sizeof(sgt_t));
+				rcb->nseg*sizeof(sgt_t));
 
-	pqisrc_put_tag(&softs->taglist, rcb->tag);
+	DBG_IO("OUT\n");
+}
+
+/*
+ * Function to dma-unmap the completed request
+ */
+static inline void
+pqi_unmap_request(rcb_t *rcb)
+{
+	DBG_IO("IN rcb = %p\n", rcb);
+
+	pqi_synch_request(rcb);
+	pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
 
 	DBG_IO("OUT\n");
 }
@@ -218,61 +238,103 @@ smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
 
 	DBG_FUNC("IN\n");
 
+	if (pqisrc_ctrl_offline(softs))
+		return;
+
  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
-	if(cdb[0] == INQUIRY && 
+	if(cdb[0] == INQUIRY &&
 		(cdb[1] & SI_EVPD) == 0 &&
 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
+
 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
 
 		device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
 
 		/* Let the disks be probed and dealt with via CAM. Only for LD
 		  let it fall through and inquiry be tweaked */
-		if( !device || 	!pqisrc_is_logical_device(device) ||
-				(device->devtype != DISK_DEVICE)  || 
+		if (!device || !pqisrc_is_logical_device(device) ||
+				(device->devtype != DISK_DEVICE) ||
 				pqisrc_is_external_raid_device(device)) {
  	 		return;
 		}
 
-		strncpy(inq->vendor, "MSCC",
-       			SID_VENDOR_SIZE);
-		strncpy(inq->product, 
-			pqisrc_raidlevel_to_string(device->raid_level),
-       			SID_PRODUCT_SIZE);
+		strncpy(inq->vendor, device->vendor,
+				SID_VENDOR_SIZE);
+		strncpy(inq->product,
+				pqisrc_raidlevel_to_string(device->raid_level),
+				SID_PRODUCT_SIZE);
 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
-       			SID_REVISION_SIZE);
+				SID_REVISION_SIZE);
     	}
 
 	DBG_FUNC("OUT\n");
 }
 
+static void
+pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
+{
+	uint32_t release_tag;
+	pqisrc_softstate_t *softs = rcb->softs;
+
+	DBG_IO("IN scsi io = %p\n", csio);
+
+	pqi_synch_request(rcb);
+	smartpqi_fix_ld_inquiry(rcb->softs, csio);
+	pqi_release_camq(rcb);
+	release_tag = rcb->tag;
+	os_reset_rcb(rcb);
+	pqisrc_put_tag(&softs->taglist, release_tag);
+	xpt_done((union ccb *)csio);
+
+	DBG_FUNC("OUT\n");
+}
+
 /*
  * Handle completion of a command - pass results back through the CCB
  */
 void
 os_io_response_success(rcb_t *rcb)
 {
-	struct ccb_scsiio		*csio;
+	struct ccb_scsiio *csio;
 
 	DBG_IO("IN rcb = %p\n", rcb);
 
-	if (rcb == NULL) 
+	if (rcb == NULL)
 		panic("rcb is null");
 
 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
 
-	if (csio == NULL) 
+	if (csio == NULL)
 		panic("csio is null");
 
 	rcb->status = REQUEST_SUCCESS;
 	csio->ccb_h.status = CAM_REQ_CMP;
 
-	smartpqi_fix_ld_inquiry(rcb->softs, csio);
-	pqi_release_camq(rcb);
-	pqi_unmap_request(rcb);
-	xpt_done((union ccb *)csio);
+	pqi_complete_scsi_io(csio, rcb);
+
+	DBG_IO("OUT\n");
+}
+
+static void
+copy_sense_data_to_csio(struct ccb_scsiio *csio,
+		uint8_t *sense_data, uint16_t sense_data_len)
+{
+	DBG_IO("IN csio = %p\n", csio);
+
+	memset(&csio->sense_data, 0, csio->sense_len);
+
+	sense_data_len = (sense_data_len > csio->sense_len) ?
+		csio->sense_len : sense_data_len;
+
+	if (sense_data)
+		memcpy(&csio->sense_data, sense_data, sense_data_len);
+
+	if (csio->sense_len > sense_data_len)
+		csio->sense_resid = csio->sense_len - sense_data_len;
+	else
+		csio->sense_resid = 0;
 
 	DBG_IO("OUT\n");
 }
@@ -280,7 +342,8 @@ os_io_response_success(rcb_t *rcb)
 /*
  * Error response handling for raid IO
  */
-void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
+void
+os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
 {
 	struct ccb_scsiio *csio;
 	pqisrc_softstate_t *softs;
@@ -294,10 +357,16 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
 
 	softs = rcb->softs;
 
-	ASSERT(err_info != NULL);
-	csio->scsi_status = err_info->status;
 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
 
+	if (!err_info || !rcb->dvp) {
+		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+				err_info, rcb->dvp);
+		goto error_out;
+	}
+
+	csio->scsi_status = err_info->status;
+
 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
 		/*
 		 * Handle specific SCSI status values.
@@ -305,59 +374,40 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
 		switch(csio->scsi_status) {
 			case PQI_RAID_STATUS_QUEUE_FULL:
 				csio->ccb_h.status = CAM_REQ_CMP;
-				DBG_ERR("Queue Full error");
+				DBG_ERR("Queue Full error\n");
 				break;
 				/* check condition, sense data included */
 			case PQI_RAID_STATUS_CHECK_CONDITION:
 				{
-				uint16_t sense_data_len = 
-					LE_16(err_info->sense_data_len);
-				uint8_t *sense_data = NULL;
-				if (sense_data_len)
-					sense_data = err_info->data;
-				memset(&csio->sense_data, 0, csio->sense_len);
-				sense_data_len = (sense_data_len >
-						csio->sense_len) ?
-						csio->sense_len :
-						sense_data_len;
-				if (sense_data)
-					memcpy(&csio->sense_data, sense_data,
-						sense_data_len);
-				if (csio->sense_len > sense_data_len)
-					csio->sense_resid = csio->sense_len
-							- sense_data_len;
-					else
-						csio->sense_resid = 0;
-				csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
+					uint16_t sense_data_len =
+						LE_16(err_info->sense_data_len);
+					uint8_t *sense_data = NULL;
+					if (sense_data_len)
+						sense_data = err_info->data;
+					copy_sense_data_to_csio(csio, sense_data, sense_data_len);
+					csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
 							| CAM_AUTOSNS_VALID
 							| CAM_REQ_CMP_ERR;
+
 				}
 				break;
 
 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
 				{
-				uint32_t resid = 0;
-				resid = rcb->bcount-err_info->data_out_transferred;
-			    	csio->resid  = resid;
-				csio->ccb_h.status = CAM_REQ_CMP;
-				break;
+					uint32_t resid = 0;
+					resid = rcb->bcount-err_info->data_out_transferred;
+					csio->resid  = resid;
+					csio->ccb_h.status = CAM_REQ_CMP;
 				}
+				break;
 			default:
 				csio->ccb_h.status = CAM_REQ_CMP;
 				break;
 		}
 	}
 
-	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
-		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
-		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
-			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
-		else
-			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
-	}
-
-	pqi_unmap_request(rcb);
-	xpt_done((union ccb *)csio);
+error_out:
+	pqi_complete_scsi_io(csio, rcb);
 
 	DBG_IO("OUT\n");
 }
@@ -365,14 +415,15 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
 /*
  * Error response handling for aio.
  */
-void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
+void
+os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
 {
 	struct ccb_scsiio *csio;
 	pqisrc_softstate_t *softs;
 
 	DBG_IO("IN\n");
 
-        if (rcb == NULL)
+	if (rcb == NULL)
 		panic("rcb is null");
 
 	rcb->status = REQUEST_SUCCESS;
@@ -382,6 +433,13 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
 
 	softs = rcb->softs;
 
+	if (!err_info || !rcb->dvp) {
+		csio->ccb_h.status = CAM_REQ_CMP_ERR;
+		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+				err_info, rcb->dvp);
+		goto error_out;
+	}
+
 	switch (err_info->service_resp) {
 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
 			csio->ccb_h.status = err_info->status;
@@ -402,6 +460,14 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
 					break;
 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
+					/* Timed out TMF response comes here */
+					if (rcb->tm_req) {
+						rcb->req_pending = false;
+						rcb->status = REQUEST_SUCCESS;
+						DBG_ERR("AIO Disabled for TMF\n");
+						return;
+					}
+					rcb->dvp->aio_enabled = false;
 					rcb->dvp->offload_enabled = false;
 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
 					break;
@@ -417,18 +483,24 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
 			break;
 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
-			csio->ccb_h.status = CAM_REQ_CMP;
-			break;
+			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
+			rcb->status = REQUEST_SUCCESS;
+			rcb->req_pending = false;
+			return;
 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
-			DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
-			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
-			break;
+			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
+			rcb->status = REQUEST_FAILED;
+			rcb->req_pending = false;
+			return;
 		default:
 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
 			break;
 	}
+
 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
 		uint8_t *sense_data = NULL;
@@ -437,21 +509,12 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
 			sense_data = err_info->data;
 		DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND  sense size %u\n",
 			sense_data_len);
-		memset(&csio->sense_data, 0, csio->sense_len);
-		if (sense_data)
-			memcpy(&csio->sense_data, sense_data, ((sense_data_len >
-                        	csio->sense_len) ? csio->sense_len : sense_data_len));
-		if (csio->sense_len > sense_data_len)
-			csio->sense_resid = csio->sense_len - sense_data_len;
-        	else
-			csio->sense_resid = 0;
+		copy_sense_data_to_csio(csio, sense_data, sense_data_len);
 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
 	}
 
-	smartpqi_fix_ld_inquiry(softs, csio);
-	pqi_release_camq(rcb);
-	pqi_unmap_request(rcb);
-	xpt_done((union ccb *)csio);
+error_out:
+	pqi_complete_scsi_io(csio, rcb);
 	DBG_IO("OUT\n");
 }
 
@@ -470,31 +533,21 @@ pqi_freeze_ccb(union ccb *ccb)
 static void
 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 {
-	pqisrc_softstate_t *softs;
-	rcb_t *rcb;
-
-	rcb = (rcb_t *)arg;
-	softs = rcb->softs;
+	rcb_t *rcb = (rcb_t *)arg;
+	pqisrc_softstate_t *softs = rcb->softs;
+	union ccb *ccb;
 
-	if(  error || nseg > softs->pqi_cap.max_sg_elem )
-	{
-		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
-		pqi_freeze_ccb(rcb->cm_ccb);
-		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", 
+	if (error || nseg > softs->pqi_cap.max_sg_elem) {
+		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
 			error, nseg, softs->pqi_cap.max_sg_elem);
-		pqi_unmap_request(rcb);
-		xpt_done((union ccb *)rcb->cm_ccb);
-		return;
+		goto error_io;
 	}
 
-	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
-	if (rcb->sgt == NULL) {
-		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
-		pqi_freeze_ccb(rcb->cm_ccb);
+	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
+
+	if (!rcb->sgt) {
 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
-		pqi_unmap_request(rcb);
-		xpt_done((union ccb *)rcb->cm_ccb);
-		return;
+		goto error_io;
 	}
 
 	rcb->nseg = nseg;
@@ -518,38 +571,46 @@ pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 
 	if (error) {
 		rcb->req_pending = false;
-		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
-		pqi_freeze_ccb(rcb->cm_ccb);
 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
-	   	pqi_unmap_request(rcb);
-		xpt_done((union ccb *)rcb->cm_ccb);
+	} else {
+		/* Successfully IO was submitted to the device. */
 		return;
 	}
+
+error_io:
+	ccb = rcb->cm_ccb;
+	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+	pqi_freeze_ccb(ccb);
+	pqi_unmap_request(rcb);
+	xpt_done(ccb);
+	return;
 }
 
 /*
- * Function to dma-map the request buffer 
+ * Function to dma-map the request buffer
  */
-static int pqi_map_request( rcb_t *rcb )
+static int
+pqi_map_request(rcb_t *rcb)
 {
 	pqisrc_softstate_t *softs = rcb->softs;
-	int error = PQI_STATUS_SUCCESS;
+	int bsd_status = BSD_SUCCESS;
 	union ccb *ccb = rcb->cm_ccb;
 
 	DBG_FUNC("IN\n");
 
 	/* check that mapping is necessary */
 	if (rcb->cm_flags & PQI_CMD_MAPPED)
-		return(0);
+		return BSD_SUCCESS;
+
 	rcb->cm_flags |= PQI_CMD_MAPPED;
 
 	if (rcb->bcount) {
-		error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
+		bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
-		if (error != 0){
-			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", 
-					error, rcb->bcount);
-			return error;
+		if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
+			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
+					bsd_status, rcb->bcount);
+			return bsd_status;
 		}
 	} else {
 		/*
@@ -560,18 +621,21 @@ static int pqi_map_request( rcb_t *rcb )
 		/* Call IO functions depending on pd or ld */
 		rcb->status = REQUEST_PENDING;
 
-		error = pqisrc_build_send_io(softs, rcb);
+		if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
+			bsd_status = EIO;
+		}
 	}
 
-	DBG_FUNC("OUT error = %d\n", error);
+	DBG_FUNC("OUT error = %d\n", bsd_status);
 
-	return error;
+	return bsd_status;
 }
 
 /*
  * Function to clear the request control block
  */
-void os_reset_rcb( rcb_t *rcb )
+void
+os_reset_rcb(rcb_t *rcb)
 {
 	rcb->error_info = NULL;
 	rcb->req = NULL;
@@ -582,7 +646,7 @@ void os_reset_rcb( rcb_t *rcb )
 	rcb->softs = NULL;
 	rcb->cm_flags = 0;
 	rcb->cm_data = NULL;
-	rcb->bcount = 0;	
+	rcb->bcount = 0;
 	rcb->nseg = 0;
 	rcb->sgt = NULL;
 	rcb->cm_ccb = NULL;
@@ -590,30 +654,39 @@ void os_reset_rcb( rcb_t *rcb )
 	rcb->ioaccel_handle = 0;
 	rcb->resp_qid = 0;
 	rcb->req_pending = false;
+	rcb->tm_req = false;
 }
 
 /*
  * Callback function for the lun rescan
  */
-static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
+static void
+smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
 {
         xpt_free_path(ccb->ccb_h.path);
         xpt_free_ccb(ccb);
 }
 
+
 /*
  * Function to rescan the lun
  */
-static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, 
+static void
+smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
 			int lun)
 {
-	union ccb   *ccb = NULL;
-	cam_status  status = 0;
-	struct cam_path     *path = NULL;	
+	union ccb *ccb = NULL;
+	cam_status status = 0;
+	struct cam_path *path = NULL;
 
 	DBG_FUNC("IN\n");
 
 	ccb = xpt_alloc_ccb_nowait();
+	if (ccb == NULL) {
+		DBG_ERR("Unable to alloc ccb for lun rescan\n");
+		return;
+	}
+
 	status = xpt_create_path(&path, NULL,
 				cam_sim_path(softs->os_specific.sim), target, lun);
 	if (status != CAM_REQ_CMP) {
@@ -623,6 +696,7 @@ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
 		return;
 	}
 
+	bzero(ccb, sizeof(union ccb));
 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
@@ -636,7 +710,8 @@ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
 /*
  * Function to rescan the lun under each target
  */
-void smartpqi_target_rescan(struct pqisrc_softstate *softs)
+void
+smartpqi_target_rescan(struct pqisrc_softstate *softs)
 {
 	int target = 0, lun = 0;
 
@@ -656,7 +731,8 @@ void smartpqi_target_rescan(struct pqisrc_softstate *softs)
 /*
  * Set the mode of tagged command queueing for the current task.
  */
-uint8_t os_get_task_attr(rcb_t *rcb) 
+uint8_t
+os_get_task_attr(rcb_t *rcb)
 {
 	union ccb *ccb = rcb->cm_ccb;
 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
@@ -679,19 +755,24 @@ uint8_t os_get_task_attr(rcb_t *rcb)
 /*
  * Complete all outstanding commands
  */
-void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
+void
+os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
 {
 	int tag = 0;
+	pqi_scsi_dev_t  *dvp = NULL;
 
 	DBG_FUNC("IN\n");
 
-	for (tag = 1; tag < softs->max_outstanding_io; tag++) {
+	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
 		rcb_t *prcb = &softs->rcb[tag];
+		dvp = prcb->dvp;
 		if(prcb->req_pending && prcb->cm_ccb ) {
 			prcb->req_pending = false;
 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
-			xpt_done((union ccb *)prcb->cm_ccb);
-			prcb->cm_ccb = NULL;
+			pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
+			if (dvp)
+				pqisrc_decrement_device_active_io(softs, dvp);
+
 		}
 	}
 
@@ -701,21 +782,22 @@ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
 /*
  * IO handling functionality entry point
  */
-static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
+static int
+pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
 {
 	rcb_t *rcb;
 	uint32_t tag, no_transfer = 0;
 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
 					cam_sim_softc(sim);
-	int32_t error = PQI_STATUS_FAILURE;
+	int32_t error;
 	pqi_scsi_dev_t *dvp;
 
 	DBG_FUNC("IN\n");
 
-	if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
+	if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
-		return PQI_STATUS_FAILURE;
+		return ENXIO;
 	}
 
 	dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
@@ -724,40 +806,40 @@ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
 					| CAM_BUSY | CAM_REQ_INPROG;
 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
-		return error;
+		return ENXIO;
 	}
 	/* Check device state */
 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
-		return error;
+		return ENXIO;
 	}
 	/* Check device reset */
-	if (dvp->reset_in_progress) {
+	if (DEVICE_RESET(dvp)) {
 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
-		return error;
+		return EBUSY;
 	}
 
 	if (dvp->expose_device == false) {
 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
-		return error;
+		return ENXIO;
 	}
 
 	tag = pqisrc_get_tag(&softs->taglist);
-	if( tag == INVALID_ELEM ) {
+	if (tag == INVALID_ELEM) {
 		DBG_ERR("Get Tag failed\n");
 		xpt_freeze_simq(softs->os_specific.sim, 1);
 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
-		return PQI_STATUS_FAILURE;
+		return EIO;
 	}
 
 	DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
 
 	rcb = &softs->rcb[tag];
-	os_reset_rcb( rcb );
+	os_reset_rcb(rcb);
 	rcb->tag = tag;
 	rcb->softs = softs;
 	rcb->cmdlen = ccb->csio.cdb_len;
@@ -794,57 +876,72 @@ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
 	 * if we ever learn a transport layer other than simple, may fail
 	 * if the adapter rejects the command).
 	 */
-	if ((error = pqi_map_request(rcb)) != 0) {
-		rcb->req_pending = false;
+	if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
 		xpt_freeze_simq(softs->os_specific.sim, 1);
-		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 		if (error == EINPROGRESS) {
-			DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
-			error = 0;
+			/* Release simq in the completion */
+			softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
+			error = BSD_SUCCESS;
 		} else {
-			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+			rcb->req_pending = false;
+			ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
 			DBG_WARN("Requeue req error = %d target = %d\n", error,
 				ccb->ccb_h.target_id);
 			pqi_unmap_request(rcb);
+			error = EIO;
 		}
 	}
 
 	DBG_FUNC("OUT error = %d\n", error);
+
 	return error;
 }
 
+static inline int
+pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
+{
+	if (PQI_STATUS_SUCCESS == pqi_status &&
+			REQUEST_SUCCESS == rcb->status)
+		return BSD_SUCCESS;
+	else
+		return EIO;
+}
+
 /*
  * Abort a task, task management functionality
  */
 static int
 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
 {
-	rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
-	uint32_t abort_tag = rcb->tag;
-	uint32_t tag = 0;
-	int rval = PQI_STATUS_SUCCESS;
-	uint16_t qid;
-
-    DBG_FUNC("IN\n");
+	struct ccb_hdr *ccb_h = &ccb->ccb_h;
+	rcb_t *rcb = NULL;
+	rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
+	uint32_t tag;
+	int rval;
 
-	qid = (uint16_t)rcb->resp_qid;
+	DBG_FUNC("IN\n");
 
 	tag = pqisrc_get_tag(&softs->taglist);
 	rcb = &softs->rcb[tag];
 	rcb->tag = tag;
-	rcb->resp_qid = qid;
 
-	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
+	if (!rcb->dvp) {
+		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+		rval = ENXIO;
+		goto error_tmf;
+	}
+
+	rcb->tm_req = true;
+
+	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
 
-	if (PQI_STATUS_SUCCESS == rval) {
-		rval = rcb->status;
-		if (REQUEST_SUCCESS == rval) {
-			ccb->ccb_h.status = CAM_REQ_ABORTED;
-		}
-	}
-	pqisrc_put_tag(&softs->taglist, abort_tag);
-	pqisrc_put_tag(&softs->taglist,rcb->tag);
+	if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
+		ccb->ccb_h.status = CAM_REQ_ABORTED;
+
+error_tmf:
+	os_reset_rcb(rcb);
+	pqisrc_put_tag(&softs->taglist, tag);
 
 	DBG_FUNC("OUT rval = %d\n", rval);
 
@@ -857,9 +954,10 @@ pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
 static int
 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
 {
+	struct ccb_hdr *ccb_h = &ccb->ccb_h;
 	rcb_t *rcb = NULL;
-	uint32_t tag = 0;
-	int rval = PQI_STATUS_SUCCESS;
+	uint32_t tag;
+	int rval;
 
 	DBG_FUNC("IN\n");
 
@@ -867,14 +965,22 @@ pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
 	rcb = &softs->rcb[tag];
 	rcb->tag = tag;
 
-	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
+	if (!rcb->dvp) {
+		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+		rval = ENXIO;
+		goto error_tmf;
*** 9201 LINES SKIPPED ***


More information about the dev-commits-src-main mailing list