svn commit: r265648 - stable/9/sys/cam/ctl

Alexander Motin mav at FreeBSD.org
Thu May 8 08:15:43 UTC 2014


Author: mav
Date: Thu May  8 08:15:42 2014
New Revision: 265648
URL: http://svnweb.freebsd.org/changeset/base/265648

Log:
  MFC r257946:
  Introduce seperate mutex lock to protect protect CTL I/O pools, slightly
  reducing global CTL lock scope and congestion.
  
  While there, simplify CTL I/O pools KPI, hiding implementation details.

Modified:
  stable/9/sys/cam/ctl/ctl.c
  stable/9/sys/cam/ctl/ctl_frontend.c
  stable/9/sys/cam/ctl/ctl_private.h
Directory Properties:
  stable/9/   (props changed)
  stable/9/sys/   (props changed)

Modified: stable/9/sys/cam/ctl/ctl.c
==============================================================================
--- stable/9/sys/cam/ctl/ctl.c	Thu May  8 08:11:44 2014	(r265647)
+++ stable/9/sys/cam/ctl/ctl.c	Thu May  8 08:15:42 2014	(r265648)
@@ -364,7 +364,6 @@ static union ctl_io *ctl_malloc_io(ctl_i
 				   int can_wait);
 static void ctl_kfree_io(union ctl_io *io);
 #endif /* unused */
-static void ctl_free_io_internal(union ctl_io *io, int have_lock);
 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
 			 struct ctl_be_lun *be_lun, struct ctl_id target_id);
 static int ctl_free_lun(struct ctl_lun *lun);
@@ -1001,6 +1000,7 @@ ctl_init(void)
 		       "Report no lun possible for invalid LUNs");
 
 	mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
+	mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
 	softc->open_count = 0;
 
 	/*
@@ -1060,7 +1060,7 @@ ctl_init(void)
 			    CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
 		printf("ctl: can't allocate %d entry emergency pool, "
 		       "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
-		ctl_pool_free(softc, internal_pool);
+		ctl_pool_free(internal_pool);
 		return (ENOMEM);
 	}
 
@@ -1069,8 +1069,8 @@ ctl_init(void)
 	{
 		printf("ctl: can't allocate %d entry other SC pool, "
 		       "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
-		ctl_pool_free(softc, internal_pool);
-		ctl_pool_free(softc, emergency_pool);
+		ctl_pool_free(internal_pool);
+		ctl_pool_free(emergency_pool);
 		return (ENOMEM);
 	}
 
@@ -1078,12 +1078,6 @@ ctl_init(void)
 	softc->emergency_pool = emergency_pool;
 	softc->othersc_pool = other_pool;
 
-	mtx_lock(&softc->ctl_lock);
-	ctl_pool_acquire(internal_pool);
-	ctl_pool_acquire(emergency_pool);
-	ctl_pool_acquire(other_pool);
-	mtx_unlock(&softc->ctl_lock);
-
 	/*
 	 * We used to allocate a processor LUN here.  The new scheme is to
 	 * just let the user allocate LUNs as he sees fit.
@@ -1100,10 +1094,10 @@ ctl_init(void)
 		printf("error creating CTL work thread!\n");
 		mtx_lock(&softc->ctl_lock);
 		ctl_free_lun(lun);
-		ctl_pool_free(softc, internal_pool);
-		ctl_pool_free(softc, emergency_pool);
-		ctl_pool_free(softc, other_pool);
 		mtx_unlock(&softc->ctl_lock);
+		ctl_pool_free(internal_pool);
+		ctl_pool_free(emergency_pool);
+		ctl_pool_free(other_pool);
 		return (error);
 	}
 	printf("ctl: CAM Target Layer loaded\n");
@@ -1156,7 +1150,7 @@ ctl_shutdown(void)
 {
 	struct ctl_softc *softc;
 	struct ctl_lun *lun, *next_lun;
-	struct ctl_io_pool *pool, *next_pool;
+	struct ctl_io_pool *pool;
 
 	softc = (struct ctl_softc *)control_softc;
 
@@ -1173,6 +1167,8 @@ ctl_shutdown(void)
 		ctl_free_lun(lun);
 	}
 
+	mtx_unlock(&softc->ctl_lock);
+
 	/*
 	 * This will rip the rug out from under any FETDs or anyone else
 	 * that has a pool allocated.  Since we increment our module
@@ -1181,18 +1177,14 @@ ctl_shutdown(void)
 	 * able to unload the CTL module until client modules have
 	 * successfully unloaded.
 	 */
-	for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL;
-	     pool = next_pool) {
-		next_pool = STAILQ_NEXT(pool, links);
-		ctl_pool_free(softc, pool);
-	}
-
-	mtx_unlock(&softc->ctl_lock);
+	while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
+		ctl_pool_free(pool);
 
 #if 0
 	ctl_shutdown_thread(softc->work_thread);
 #endif
 
+	mtx_destroy(&softc->pool_lock);
 	mtx_destroy(&softc->ctl_lock);
 
 	destroy_dev(softc->dev);
@@ -3342,11 +3334,12 @@ ctl_pool_create(struct ctl_softc *ctl_so
 	pool->type = pool_type;
 	pool->ctl_softc = ctl_softc;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&ctl_softc->pool_lock);
 	pool->id = ctl_softc->cur_pool_id++;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&ctl_softc->pool_lock);
 
 	pool->flags = CTL_POOL_FLAG_NONE;
+	pool->refcount = 1;		/* Reference for validity. */
 	STAILQ_INIT(&pool->free_queue);
 
 	/*
@@ -3382,7 +3375,7 @@ ctl_pool_create(struct ctl_softc *ctl_so
 		free(pool, M_CTL);
 		goto bailout;
 	}
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&ctl_softc->pool_lock);
 	ctl_softc->num_pools++;
 	STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
 	/*
@@ -3401,7 +3394,7 @@ ctl_pool_create(struct ctl_softc *ctl_so
 		MOD_INC_USE_COUNT;
 #endif
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&ctl_softc->pool_lock);
 
 	*npool = pool;
 
@@ -3410,14 +3403,11 @@ bailout:
 	return (retval);
 }
 
-int
+static int
 ctl_pool_acquire(struct ctl_io_pool *pool)
 {
 
-	mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
-	if (pool == NULL)
-		return (-EINVAL);
+	mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
 
 	if (pool->flags & CTL_POOL_FLAG_INVALID)
 		return (-EINVAL);
@@ -3427,51 +3417,21 @@ ctl_pool_acquire(struct ctl_io_pool *poo
 	return (0);
 }
 
-int
-ctl_pool_invalidate(struct ctl_io_pool *pool)
-{
-
-	mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
-	if (pool == NULL)
-		return (-EINVAL);
-
-	pool->flags |= CTL_POOL_FLAG_INVALID;
-
-	return (0);
-}
-
-int
+static void
 ctl_pool_release(struct ctl_io_pool *pool)
 {
+	struct ctl_softc *ctl_softc = pool->ctl_softc;
+	union ctl_io *io;
 
-	mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
-	if (pool == NULL)
-		return (-EINVAL);
-
-	if ((--pool->refcount == 0)
-	 && (pool->flags & CTL_POOL_FLAG_INVALID)) {
-		ctl_pool_free(pool->ctl_softc, pool);
-	}
-
-	return (0);
-}
-
-void
-ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
-{
-	union ctl_io *cur_io, *next_io;
+	mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
 
-	mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
+	if (--pool->refcount != 0)
+		return;
 
-	for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
-	     cur_io != NULL; cur_io = next_io) {
-		next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
-						      links);
-		STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr,
+	while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
+		STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
 			      links);
-		free(cur_io, M_CTL);
+		free(io, M_CTL);
 	}
 
 	STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
@@ -3490,6 +3450,21 @@ ctl_pool_free(struct ctl_softc *ctl_soft
 	free(pool, M_CTL);
 }
 
+void
+ctl_pool_free(struct ctl_io_pool *pool)
+{
+	struct ctl_softc *ctl_softc;
+
+	if (pool == NULL)
+		return;
+
+	ctl_softc = pool->ctl_softc;
+	mtx_lock(&ctl_softc->pool_lock);
+	pool->flags |= CTL_POOL_FLAG_INVALID;
+	ctl_pool_release(pool);
+	mtx_unlock(&ctl_softc->pool_lock);
+}
+
 /*
  * This routine does not block (except for spinlocks of course).
  * It tries to allocate a ctl_io union from the caller's pool as quickly as
@@ -3514,7 +3489,7 @@ ctl_alloc_io(void *pool_ref)
 
 	ctl_softc = pool->ctl_softc;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&ctl_softc->pool_lock);
 	/*
 	 * First, try to get the io structure from the user's pool.
 	 */
@@ -3524,7 +3499,7 @@ ctl_alloc_io(void *pool_ref)
 			STAILQ_REMOVE_HEAD(&pool->free_queue, links);
 			pool->total_allocated++;
 			pool->free_ctl_io--;
-			mtx_unlock(&ctl_softc->ctl_lock);
+			mtx_unlock(&ctl_softc->pool_lock);
 			return (io);
 		} else
 			ctl_pool_release(pool);
@@ -3547,14 +3522,14 @@ ctl_alloc_io(void *pool_ref)
 			STAILQ_REMOVE_HEAD(&npool->free_queue, links);
 			npool->total_allocated++;
 			npool->free_ctl_io--;
-			mtx_unlock(&ctl_softc->ctl_lock);
+			mtx_unlock(&ctl_softc->pool_lock);
 			return (io);
 		} else
 			ctl_pool_release(npool);
 	}
 
 	/* Drop the spinlock before we malloc */
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&ctl_softc->pool_lock);
 
 	/*
 	 * The emergency pool (if it exists) didn't have one, so try an
@@ -3567,7 +3542,7 @@ ctl_alloc_io(void *pool_ref)
 		 * ctl_io to its list when it gets freed.
 		 */
 		if (emergency_pool != NULL) {
-			mtx_lock(&ctl_softc->ctl_lock);
+			mtx_lock(&ctl_softc->pool_lock);
 			if (ctl_pool_acquire(emergency_pool) == 0) {
 				io->io_hdr.pool = emergency_pool;
 				emergency_pool->total_ctl_io++;
@@ -3579,7 +3554,7 @@ ctl_alloc_io(void *pool_ref)
 				 */
 				emergency_pool->total_allocated++;
 			}
-			mtx_unlock(&ctl_softc->ctl_lock);
+			mtx_unlock(&ctl_softc->pool_lock);
 		} else
 			io->io_hdr.pool = NULL;
 	}
@@ -3587,8 +3562,8 @@ ctl_alloc_io(void *pool_ref)
 	return (io);
 }
 
-static void
-ctl_free_io_internal(union ctl_io *io, int have_lock)
+void
+ctl_free_io(union ctl_io *io)
 {
 	if (io == NULL)
 		return;
@@ -3609,8 +3584,7 @@ ctl_free_io_internal(union ctl_io *io, i
 
 		pool = (struct ctl_io_pool *)io->io_hdr.pool;
 
-		if (have_lock == 0)
-			mtx_lock(&pool->ctl_softc->ctl_lock);
+		mtx_lock(&pool->ctl_softc->pool_lock);
 #if 0
 		save_flags(xflags);
 
@@ -3647,8 +3621,7 @@ ctl_free_io_internal(union ctl_io *io, i
 		pool->total_freed++;
 		pool->free_ctl_io++;
 		ctl_pool_release(pool);
-		if (have_lock == 0)
-			mtx_unlock(&pool->ctl_softc->ctl_lock);
+		mtx_unlock(&pool->ctl_softc->pool_lock);
 	} else {
 		/*
 		 * Otherwise, just free it.  We probably malloced it and
@@ -3660,12 +3633,6 @@ ctl_free_io_internal(union ctl_io *io, i
 }
 
 void
-ctl_free_io(union ctl_io *io)
-{
-	ctl_free_io_internal(io, /*have_lock*/ 0);
-}
-
-void
 ctl_zero_io(union ctl_io *io)
 {
 	void *pool_ref;
@@ -4466,7 +4433,7 @@ ctl_free_lun(struct ctl_lun *lun)
 	     io = next_io) {
 		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
 		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
-		ctl_free_io_internal(io, /*have_lock*/ 1);
+		ctl_free_io(io);
 	}
 
 	softc->num_luns--;
@@ -10160,7 +10127,7 @@ ctl_failover(void)
 					TAILQ_REMOVE(&lun->ooa_queue,
 						     &io->io_hdr, ooa_links);
 
-					ctl_free_io_internal(io, 1);
+					ctl_free_io(io);
 				}
 			}
 
@@ -10176,7 +10143,7 @@ ctl_failover(void)
 						&io->io_hdr,
 					     	ooa_links);
 
-					ctl_free_io_internal(io, 1);
+					ctl_free_io(io);
 				}
 			}
 			ctl_check_blocked(lun);
@@ -11057,7 +11024,7 @@ ctl_run_task_queue(struct ctl_softc *ctl
 			       io->taskio.tag_num : io->scsiio.tag_num);
 			STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
 				      ctl_io_hdr, links);
-			ctl_free_io_internal(io, 1);
+			ctl_free_io(io);
 			break;
 		}
 		}
@@ -11150,7 +11117,7 @@ ctl_handle_isc(union ctl_io *io)
 		break;
 	}
 	if (free_io)
-		ctl_free_io_internal(io, 0);
+		ctl_free_io(io);
 
 }
 
@@ -12298,7 +12265,7 @@ ctl_process_done(union ctl_io *io, int h
 	case CTL_IO_TASK:
 		ctl_io_error_print(io, NULL);
 		if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
-			ctl_free_io_internal(io, /*have_lock*/ 0);
+			ctl_free_io(io);
 		else
 			fe_done(io);
 		return (CTL_RETVAL_COMPLETE);
@@ -12617,7 +12584,7 @@ ctl_process_done(union ctl_io *io, int h
 			/* XXX do something here */
 		}
 
-		ctl_free_io_internal(io, /*have_lock*/ 0);
+		ctl_free_io(io);
 	} else 
 		fe_done(io);
 

Modified: stable/9/sys/cam/ctl/ctl_frontend.c
==============================================================================
--- stable/9/sys/cam/ctl/ctl_frontend.c	Thu May  8 08:11:44 2014	(r265647)
+++ stable/9/sys/cam/ctl/ctl_frontend.c	Thu May  8 08:15:42 2014	(r265648)
@@ -114,7 +114,6 @@ ctl_frontend_register(struct ctl_fronten
 	fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS);
 	fe->max_initiators = CTL_MAX_INIT_PER_PORT;
 	STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
-	ctl_pool_acquire(pool);
 	control_softc->ctl_ports[port_num] = fe;
 
 	mtx_unlock(&control_softc->ctl_lock);
@@ -141,10 +140,6 @@ ctl_frontend_deregister(struct ctl_front
 	}
 
 	mtx_lock(&control_softc->ctl_lock);
-
-	ctl_pool_invalidate(pool);
-	ctl_pool_release(pool);
-
 	STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
 	control_softc->num_frontends--;
 	port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
@@ -152,6 +147,9 @@ ctl_frontend_deregister(struct ctl_front
 	ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
 	control_softc->ctl_ports[port_num] = NULL;
 	mtx_unlock(&control_softc->ctl_lock);
+
+	ctl_pool_free(pool);
+
 bailout:
 	return (retval);
 }

Modified: stable/9/sys/cam/ctl/ctl_private.h
==============================================================================
--- stable/9/sys/cam/ctl/ctl_private.h	Thu May  8 08:11:44 2014	(r265647)
+++ stable/9/sys/cam/ctl/ctl_private.h	Thu May  8 08:15:42 2014	(r265648)
@@ -448,6 +448,7 @@ struct ctl_softc {
 	struct ctl_frontend *ctl_ports[CTL_MAX_PORTS];
 	uint32_t num_backends;
 	STAILQ_HEAD(, ctl_backend_driver) be_list;
+	struct mtx pool_lock;
 	uint32_t num_pools;
 	uint32_t cur_pool_id;
 	STAILQ_HEAD(, ctl_io_pool) io_pools;
@@ -462,10 +463,7 @@ extern struct ctl_cmd_entry ctl_cmd_tabl
 uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
 int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
 		    uint32_t total_ctl_io, struct ctl_io_pool **npool);
-int ctl_pool_acquire(struct ctl_io_pool *pool);
-int ctl_pool_invalidate(struct ctl_io_pool *pool);
-int ctl_pool_release(struct ctl_io_pool *pool);
-void ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool);
+void ctl_pool_free(struct ctl_io_pool *pool);
 int ctl_scsi_release(struct ctl_scsiio *ctsio);
 int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
 int ctl_start_stop(struct ctl_scsiio *ctsio);


More information about the svn-src-stable-9 mailing list