git: 2e6cdfe29355 - main - pf: change pf_rules_lock and pf_ioctl_lock to per-vnet locks
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 19 Apr 2023 07:51:35 UTC
The branch main has been updated by kp: URL: https://cgit.FreeBSD.org/src/commit/?id=2e6cdfe29355cd81a4e2299d61e6ed57f6798a99 commit 2e6cdfe29355cd81a4e2299d61e6ed57f6798a99 Author: Kristof Provost <kp@FreeBSD.org> AuthorDate: 2023-04-18 14:06:36 +0000 Commit: Kristof Provost <kp@FreeBSD.org> CommitDate: 2023-04-19 07:50:52 +0000 pf: change pf_rules_lock and pf_ioctl_lock to per-vnet locks Both pf_rules_lock and pf_ioctl_lock only ever affect one vnet, so there's no point in having these locks affect other vnets. (In fact, the only lock in pf that can affect multiple vnets is pf_end_lock.) That's especially important for the rules lock, because taking the write lock suspends all network traffic until it's released. This will reduce the impact a vnet running pf can have on other vnets, and improve concurrency on machines running multiple pf-enabled vnets. Reviewed by: zlei Sponsored by: Rubicon Communications, LLC ("Netgate") Differential Revision: https://reviews.freebsd.org/D39658 --- sys/net/pfvar.h | 20 +++++++++++--------- sys/netpfil/pf/pf_ioctl.c | 23 +++++++++++++---------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index e9e23d985cfa..2f017923afa1 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -385,16 +385,18 @@ extern struct sx pf_config_lock; #define PF_CONFIG_UNLOCK() sx_xunlock(&pf_config_lock) #define PF_CONFIG_ASSERT() sx_assert(&pf_config_lock, SA_XLOCKED) -extern struct rmlock pf_rules_lock; +VNET_DECLARE(struct rmlock, pf_rules_lock); +#define V_pf_rules_lock VNET(pf_rules_lock) + #define PF_RULES_RLOCK_TRACKER struct rm_priotracker _pf_rules_tracker -#define PF_RULES_RLOCK() rm_rlock(&pf_rules_lock, &_pf_rules_tracker) -#define PF_RULES_RUNLOCK() rm_runlock(&pf_rules_lock, &_pf_rules_tracker) -#define PF_RULES_WLOCK() rm_wlock(&pf_rules_lock) -#define PF_RULES_WUNLOCK() rm_wunlock(&pf_rules_lock) -#define PF_RULES_WOWNED() rm_wowned(&pf_rules_lock) -#define PF_RULES_ASSERT() rm_assert(&pf_rules_lock, RA_LOCKED) -#define PF_RULES_RASSERT() rm_assert(&pf_rules_lock, RA_RLOCKED) -#define PF_RULES_WASSERT() rm_assert(&pf_rules_lock, RA_WLOCKED) +#define PF_RULES_RLOCK() rm_rlock(&V_pf_rules_lock, &_pf_rules_tracker) +#define PF_RULES_RUNLOCK() rm_runlock(&V_pf_rules_lock, &_pf_rules_tracker) +#define PF_RULES_WLOCK() rm_wlock(&V_pf_rules_lock) +#define PF_RULES_WUNLOCK() rm_wunlock(&V_pf_rules_lock) +#define PF_RULES_WOWNED() rm_wowned(&V_pf_rules_lock) +#define PF_RULES_ASSERT() rm_assert(&V_pf_rules_lock, RA_LOCKED) +#define PF_RULES_RASSERT() rm_assert(&V_pf_rules_lock, RA_RLOCKED) +#define PF_RULES_WASSERT() rm_assert(&V_pf_rules_lock, RA_WLOCKED) extern struct mtx_padalign pf_table_stats_lock; #define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock) diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c index 5dc0072451a7..c800d2048547 100644 --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -277,8 +277,9 @@ VNET_DEFINE(int, pf_vnet_active); int pf_end_threads; struct proc *pf_purge_proc; -struct rmlock pf_rules_lock; -struct sx pf_ioctl_lock; +VNET_DEFINE(struct rmlock, pf_rules_lock); +VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); +#define V_pf_ioctl_lock VNET(pf_ioctl_lock) struct sx pf_end_lock; /* pfsync */ @@ -2606,7 +2607,7 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td switch (cmd) { case DIOCSTART: - sx_xlock(&pf_ioctl_lock); + sx_xlock(&V_pf_ioctl_lock); if (V_pf_status.running) error = EEXIST; else { @@ -2622,7 +2623,7 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td break; case DIOCSTOP: - sx_xlock(&pf_ioctl_lock); + sx_xlock(&V_pf_ioctl_lock); if (!V_pf_status.running) error = ENOENT; else { @@ -5652,8 +5653,8 @@ DIOCCHANGEADDR_error: break; } fail: - if (sx_xlocked(&pf_ioctl_lock)) - sx_xunlock(&pf_ioctl_lock); + if (sx_xlocked(&V_pf_ioctl_lock)) + sx_xunlock(&V_pf_ioctl_lock); CURVNET_RESTORE(); #undef ERROUT_IOCTL @@ -6692,6 +6693,9 @@ pf_load_vnet(void) V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); + sx_init(&V_pf_ioctl_lock, "pf ioctl"); + pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT); #ifdef ALTQ @@ -6710,8 +6714,6 @@ pf_load(void) { int error; - rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); - sx_init(&pf_ioctl_lock, "pf ioctl"); sx_init(&pf_end_lock, "pf end thread"); pf_mtag_initialize(); @@ -6815,6 +6817,9 @@ pf_unload_vnet(void) pf_counter_u64_deinit(&V_pf_status.fcounters[i]); for (int i = 0; i < SCNT_MAX; i++) counter_u64_free(V_pf_status.scounters[i]); + + rm_destroy(&V_pf_rules_lock); + sx_destroy(&V_pf_ioctl_lock); } static void @@ -6834,8 +6839,6 @@ pf_unload(void) pfi_cleanup(); - rm_destroy(&pf_rules_lock); - sx_destroy(&pf_ioctl_lock); sx_destroy(&pf_end_lock); }