git: b785f83e987d - stable/14 - tcp: small cleanup

From: Michael Tuexen <tuexen_at_FreeBSD.org>
Date: Thu, 31 Oct 2024 13:58:46 UTC
The branch stable/14 has been updated by tuexen:

URL: https://cgit.FreeBSD.org/src/commit/?id=b785f83e987dd59d4a2812fc2e300436c47e08c0

commit b785f83e987dd59d4a2812fc2e300436c47e08c0
Author:     Michael Tuexen <tuexen@FreeBSD.org>
AuthorDate: 2024-10-01 15:32:18 +0000
Commit:     Michael Tuexen <tuexen@FreeBSD.org>
CommitDate: 2024-10-31 11:37:05 +0000

    tcp: small cleanup
    
    No functional change intended.
    
    Reviewed by:            cc, glebius, markj, rscheff
    Sponsored by:           Netflix, Inc.
    Differential Revision:  https://reviews.freebsd.org/D46850
    
    (cherry picked from commit 2eacb0841c7dfc92030abc433e53cd31383a0648)
---
 sys/netinet/tcp_syncache.c | 59 ++++++++++++++++++++++------------------------
 1 file changed, 28 insertions(+), 31 deletions(-)

diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 6d171b2b7ea6..15244a61d8da 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -1577,49 +1577,46 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
 		goto donenoprobe;
 	}
 
-	if (tfo_cookie_valid) {
-		bzero(&scs, sizeof(scs));
-		sc = &scs;
-		goto skip_alloc;
-	}
-
+	KASSERT(sc == NULL, ("sc(%p) != NULL", sc));
 	/*
 	 * Skip allocating a syncache entry if we are just going to discard
 	 * it later.
 	 */
-	if (!locked) {
+	if (!locked || tfo_cookie_valid) {
 		bzero(&scs, sizeof(scs));
 		sc = &scs;
-	} else
-		sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
-	if (sc == NULL) {
-		/*
-		 * The zone allocator couldn't provide more entries.
-		 * Treat this as if the cache was full; drop the oldest
-		 * entry and insert the new one.
-		 */
-		TCPSTAT_INC(tcps_sc_zonefail);
-		if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
-			sch->sch_last_overflow = time_uptime;
-			syncache_drop(sc, sch);
-			syncache_pause(inc);
-		}
+	} else {
 		sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
 		if (sc == NULL) {
-			if (V_tcp_syncookies) {
-				bzero(&scs, sizeof(scs));
-				sc = &scs;
-			} else {
-				KASSERT(locked,
-				    ("%s: bucket unexpectedly unlocked",
-				    __func__));
-				SCH_UNLOCK(sch);
-				goto done;
+			/*
+			 * The zone allocator couldn't provide more entries.
+			 * Treat this as if the cache was full; drop the oldest
+			 * entry and insert the new one.
+			 */
+			TCPSTAT_INC(tcps_sc_zonefail);
+			sc = TAILQ_LAST(&sch->sch_bucket, sch_head);
+			if (sc != NULL) {
+				sch->sch_last_overflow = time_uptime;
+				syncache_drop(sc, sch);
+				syncache_pause(inc);
+			}
+			sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
+			if (sc == NULL) {
+				if (V_tcp_syncookies) {
+					bzero(&scs, sizeof(scs));
+					sc = &scs;
+				} else {
+					KASSERT(locked,
+					    ("%s: bucket unexpectedly unlocked",
+					    __func__));
+					SCH_UNLOCK(sch);
+					goto done;
+				}
 			}
 		}
 	}
 
-skip_alloc:
+	KASSERT(sc != NULL, ("sc == NULL"));
 	if (!tfo_cookie_valid && tfo_response_cookie_valid)
 		sc->sc_tfo_cookie = &tfo_response_cookie;