git: a69d075e96a6 - stable/13 - if_pfsync: lock buckets during pfsync_drop()
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 17 Jul 2024 21:55:26 UTC
The branch stable/13 has been updated by kp: URL: https://cgit.FreeBSD.org/src/commit/?id=a69d075e96a60273a9b39005b3e765e6476a8b53 commit a69d075e96a60273a9b39005b3e765e6476a8b53 Author: Kristof Provost <kp@FreeBSD.org> AuthorDate: 2024-07-07 14:42:48 +0000 Commit: Kristof Provost <kp@FreeBSD.org> CommitDate: 2024-07-17 21:54:35 +0000 if_pfsync: lock buckets during pfsync_drop() We failed to lock buckets while dropping messages, which could potentially lead to crashes, and is the likely cause of panics like: > pfsync_drop: st->sync_state == q > # pfsync_drop > # pfsync_q_ins > # pfsync_insert_state > # pf_state_insert > ... Handle this by only handling the currently relevant (and this locked) bucket. This ensures that the bucket is locked while we manipulate it. While here also log slightly more information in the KASSERT(). MFC after: 2 weeks Sponsored by: Orange Business Services (cherry picked from commit 5f75cd390a67cbec06993c4c66f784f0f777c854) --- sys/netpfil/pf/if_pfsync.c | 70 ++++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 27 deletions(-) diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c index 1dde4e52e3f8..b8632e1fd61b 100644 --- a/sys/netpfil/pf/if_pfsync.c +++ b/sys/netpfil/pf/if_pfsync.c @@ -300,7 +300,8 @@ static void pfsync_defer_tmo(void *); static void pfsync_request_update(u_int32_t, u_int64_t); static bool pfsync_update_state_req(struct pf_kstate *); -static void pfsync_drop(struct pfsync_softc *); +static void pfsync_drop_all(struct pfsync_softc *); +static void pfsync_drop(struct pfsync_softc *, int); static void pfsync_sendout(int, int); static void pfsync_send_plus(void *, size_t); @@ -434,7 +435,7 @@ pfsync_clone_destroy(struct ifnet *ifp) bpfdetach(ifp); if_detach(ifp); - pfsync_drop(sc); + pfsync_drop_all(sc); if_free(ifp); pfsync_multicast_cleanup(sc); @@ -1532,39 +1533,54 @@ pfsync_out_del(struct pf_kstate *st, void *buf) } static void -pfsync_drop(struct pfsync_softc *sc) +pfsync_drop_all(struct pfsync_softc *sc) { - struct pf_kstate *st, *next; - struct pfsync_upd_req_item *ur; struct pfsync_bucket *b; - int c, q; + int c; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; - for (q = 0; q < PFSYNC_S_COUNT; q++) { - if (TAILQ_EMPTY(&b->b_qs[q])) - continue; - TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { - KASSERT(st->sync_state == q, - ("%s: st->sync_state == q", - __func__)); - st->sync_state = PFSYNC_S_NONE; - pf_release_state(st); - } - TAILQ_INIT(&b->b_qs[q]); - } + PFSYNC_BUCKET_LOCK(b); + pfsync_drop(sc, c); + PFSYNC_BUCKET_UNLOCK(b); + } +} - while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { - TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); - free(ur, M_PFSYNC); +static void +pfsync_drop(struct pfsync_softc *sc, int c) +{ + struct pf_kstate *st, *next; + struct pfsync_upd_req_item *ur; + struct pfsync_bucket *b; + int q; + + b = &sc->sc_buckets[c]; + PFSYNC_BUCKET_LOCK_ASSERT(b); + + for (q = 0; q < PFSYNC_S_COUNT; q++) { + if (TAILQ_EMPTY(&b->b_qs[q])) + continue; + + TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { + KASSERT(st->sync_state == q, + ("%s: st->sync_state %d == q %d", + __func__, st->sync_state, q)); + st->sync_state = PFSYNC_S_NONE; + pf_release_state(st); } + TAILQ_INIT(&b->b_qs[q]); + } - b->b_len = PFSYNC_MINPKT; - free(b->b_plus, M_PFSYNC); - b->b_plus = NULL; - b->b_pluslen = 0; + while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { + TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); + free(ur, M_PFSYNC); } + + b->b_len = PFSYNC_MINPKT; + free(b->b_plus, M_PFSYNC); + b->b_plus = NULL; + b->b_pluslen = 0; } static void @@ -1588,7 +1604,7 @@ pfsync_sendout(int schedswi, int c) PFSYNC_BUCKET_LOCK_ASSERT(b); if (!bpf_peers_present(ifp->if_bpf) && sc->sc_sync_if == NULL) { - pfsync_drop(sc); + pfsync_drop(sc, c); return; } @@ -2130,8 +2146,8 @@ pfsync_q_ins(struct pf_kstate *st, int q, bool ref) } b->b_len += nlen; - TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); st->sync_state = q; + TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); if (ref) pf_ref_state(st); }