svn commit: r232330 - projects/pf/head/sys/contrib/pf/net
Gleb Smirnoff
glebius at FreeBSD.org
Thu Mar 1 10:55:02 UTC 2012
Author: glebius
Date: Thu Mar 1 10:55:01 2012
New Revision: 232330
URL: http://svn.freebsd.org/changeset/base/232330
Log:
Locking pf normalization code:
- Make struct pf_frent, pf_frcache, pf_fragment
private to pf_norm.c.
- Make fragment UMA zones, TAILQs and RB-trees
static to pf_norm.c
- Introduce mutex pf_frag_mtx for the above fragment
storage structures.
- Assert the mutex in storage manipulation functions,
obtain it in pf_normalize_ip() and in pf_normalize_ip6(),
when we are dealing with a fragment.
- TODO: lock pf_normalize_tcp_stateful()
Other bits in progress:
- Rename the main pf_task_mtx, to pf_mtx, description
"pf Giant".
- Introduce rwlock pf_rules_lock, that protects rules.
- Obtain the rwlock in pf_test(), pf_test6().
- Micro-optimise pf_test(), pf_test6() obtaining the pf Giant
after initial assertions and basic checks.
Modified:
projects/pf/head/sys/contrib/pf/net/if_pfsync.c
projects/pf/head/sys/contrib/pf/net/pf.c
projects/pf/head/sys/contrib/pf/net/pf_ioctl.c
projects/pf/head/sys/contrib/pf/net/pf_norm.c
projects/pf/head/sys/contrib/pf/net/pf_ruleset.c
projects/pf/head/sys/contrib/pf/net/pfvar.h
Modified: projects/pf/head/sys/contrib/pf/net/if_pfsync.c
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/if_pfsync.c Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/if_pfsync.c Thu Mar 1 10:55:01 2012 (r232330)
@@ -348,7 +348,7 @@ pfsync_clone_create(struct if_clone *ifc
ifp->if_hdrlen = sizeof(struct pfsync_header);
ifp->if_mtu = ETHERMTU;
callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
- callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
+ callout_init_mtx(&sc->sc_bulk_tmo, &pf_mtx, 0);
callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
if_attach(ifp);
Modified: projects/pf/head/sys/contrib/pf/net/pf.c
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/pf.c Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/pf.c Thu Mar 1 10:55:01 2012 (r232330)
@@ -5354,48 +5354,41 @@ pf_test(int dir, struct ifnet *ifp, stru
struct pf_pdesc pd;
int off, dirndx, pqid = 0;
- PF_LOCK();
+ M_ASSERTPKTHDR(m);
+
if (!V_pf_status.running)
- {
- PF_UNLOCK();
return (PF_PASS);
- }
memset(&pd, 0, sizeof(pd));
if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
- PF_UNLOCK();
DPFPRINTF(PF_DEBUG_URGENT,
("pf_test: pf_get_mtag returned NULL\n"));
return (PF_DROP);
}
- kif = (struct pfi_kif *)ifp->if_pf_kif;
+ kif = (struct pfi_kif *)ifp->if_pf_kif;
if (kif == NULL) {
- PF_UNLOCK();
DPFPRINTF(PF_DEBUG_URGENT,
("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
return (PF_DROP);
}
if (kif->pfik_flags & PFI_IFLAG_SKIP)
- {
- PF_UNLOCK();
return (PF_PASS);
- }
- M_ASSERTPKTHDR(m);
-
- if (m->m_pkthdr.len < (int)sizeof(*h)) {
+ if (m->m_flags & M_SKIP_FIREWALL)
+ return (PF_PASS);
+
+ if (m->m_pkthdr.len < (int)sizeof(struct ip)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = 1;
+ PF_LOCK();
goto done;
}
- if (m->m_flags & M_SKIP_FIREWALL) {
- PF_UNLOCK();
- return (PF_PASS);
- }
-
+ PF_LOCK();
+ PF_RULES_RLOCK();
+
if (ip_divert_ptr != NULL &&
((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
@@ -5407,9 +5400,8 @@ pf_test(int dir, struct ifnet *ifp, stru
m->m_flags |= M_FASTFWD_OURS;
pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
}
- } else
- /* We do IP header normalization and packet reassembly here */
- if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ /* We do IP header normalization and packet reassembly here */
action = PF_DROP;
goto done;
}
@@ -5417,7 +5409,7 @@ pf_test(int dir, struct ifnet *ifp, stru
h = mtod(m, struct ip *);
off = h->ip_hl << 2;
- if (off < (int)sizeof(*h)) {
+ if (off < (int)sizeof(struct ip)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = 1;
@@ -5591,28 +5583,27 @@ done:
(ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
m->m_flags |= M_SKIP_FIREWALL;
- if (action == PF_PASS && r->divert.port &&
- ip_divert_ptr != NULL && !PACKET_LOOPED()) {
+ if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
+ !PACKET_LOOPED()) {
ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
- sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
+ sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
if (ipfwtag != NULL) {
((struct ipfw_rule_ref *)(ipfwtag+1))->info =
ntohs(r->divert.port);
((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
- m_tag_prepend(m, ipfwtag);
-
+ PF_RULES_RUNLOCK();
PF_UNLOCK();
+ m_tag_prepend(m, ipfwtag);
if (m->m_flags & M_FASTFWD_OURS) {
pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
m->m_flags &= ~M_FASTFWD_OURS;
}
-
- ip_divert_ptr(*m0,
- dir == PF_IN ? DIR_IN : DIR_OUT);
+ ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
*m0 = NULL;
+
return (action);
} else {
/* XXX: ipfw has the same behaviour! */
@@ -5697,7 +5688,9 @@ done:
pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
break;
}
+ PF_RULES_RUNLOCK();
PF_UNLOCK();
+
return (action);
}
#endif /* INET */
@@ -5717,46 +5710,40 @@ pf_test6(int dir, struct ifnet *ifp, str
struct pf_pdesc pd;
int off, terminal = 0, dirndx, rh_cnt = 0;
- PF_LOCK();
- if (!V_pf_status.running) {
- PF_UNLOCK();
+ M_ASSERTPKTHDR(m);
+
+ if (!V_pf_status.running)
return (PF_PASS);
- }
memset(&pd, 0, sizeof(pd));
if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
- PF_UNLOCK();
DPFPRINTF(PF_DEBUG_URGENT,
("pf_test: pf_get_mtag returned NULL\n"));
return (PF_DROP);
}
- kif = (struct pfi_kif *)ifp->if_pf_kif;
+ if (pd.pf_mtag->flags & PF_TAG_GENERATED)
+ return (PF_PASS);
+
+ kif = (struct pfi_kif *)ifp->if_pf_kif;
if (kif == NULL) {
- PF_UNLOCK();
DPFPRINTF(PF_DEBUG_URGENT,
("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
return (PF_DROP);
}
if (kif->pfik_flags & PFI_IFLAG_SKIP)
- {
- PF_UNLOCK();
return (PF_PASS);
- }
-
- M_ASSERTPKTHDR(m);
if (m->m_pkthdr.len < (int)sizeof(*h)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = 1;
+ PF_LOCK();
goto done;
}
- if (pd.pf_mtag->flags & PF_TAG_GENERATED) {
- PF_UNLOCK();
- return (PF_PASS);
- }
+ PF_LOCK();
+ PF_RULES_RLOCK();
/* We do IP header normalization and packet reassembly here */
if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
@@ -6081,6 +6068,7 @@ done:
break;
}
+ PF_RULES_RUNLOCK();
PF_UNLOCK();
return (action);
}
Modified: projects/pf/head/sys/contrib/pf/net/pf_ioctl.c
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Thu Mar 1 10:55:01 2012 (r232330)
@@ -104,7 +104,6 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <net/pfil.h>
-static void init_zone_var(void);
static void cleanup_pf_zone(void);
int pfattach(void);
static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
@@ -208,7 +207,9 @@ static struct cdevsw pf_cdevsw = {
static volatile VNET_DEFINE(int, pf_pfil_hooked);
#define V_pf_pfil_hooked VNET(pf_pfil_hooked)
VNET_DEFINE(int, pf_end_threads);
-struct mtx pf_task_mtx;
+
+struct mtx pf_mtx;
+struct rwlock pf_rules_lock;
/* pfsync */
pfsync_state_import_t *pfsync_state_import_ptr = NULL;
@@ -233,25 +234,19 @@ static void
init_pf_mutex(void)
{
- mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
+ mtx_init(&pf_mtx, "pf Giant", NULL, MTX_DEF);
+ rw_init(&pf_rules_lock, "pf rulesets");
+ /* XXXGL: name */
+ sx_init(&V_pf_consistency_lock, "pf_statetbl_lock");
}
static void
destroy_pf_mutex(void)
{
- mtx_destroy(&pf_task_mtx);
-}
-
-static void
-init_zone_var(void)
-{
- V_pf_src_tree_pl = V_pf_rule_pl = NULL;
- V_pf_state_pl = V_pf_state_key_pl = V_pf_state_item_pl = NULL;
- V_pf_altq_pl = V_pf_pooladdr_pl = NULL;
- V_pf_frent_pl = V_pf_frag_pl = V_pf_cache_pl = V_pf_cent_pl = NULL;
- V_pf_state_scrub_pl = NULL;
- V_pfr_ktable_pl = V_pfr_kentry_pl = NULL;
+ mtx_destroy(&pf_mtx);
+ rw_destroy(&pf_rules_lock);
+ sx_destroy(&V_pf_consistency_lock);
}
static void
@@ -265,13 +260,8 @@ cleanup_pf_zone(void)
uma_zdestroy(V_pf_state_item_pl);
uma_zdestroy(V_pf_altq_pl);
uma_zdestroy(V_pf_pooladdr_pl);
- uma_zdestroy(V_pf_frent_pl);
- uma_zdestroy(V_pf_frag_pl);
- uma_zdestroy(V_pf_cache_pl);
- uma_zdestroy(V_pf_cent_pl);
uma_zdestroy(V_pfr_ktable_pl);
uma_zdestroy(V_pfr_kentry_pl);
- uma_zdestroy(V_pf_state_scrub_pl);
uma_zdestroy(V_pfi_addr_pl);
}
@@ -300,29 +290,17 @@ pfattach(void)
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
V_pfr_kentry_pl = uma_zcreate("pfrkentry", sizeof(struct pfr_kentry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- V_pf_frent_pl = uma_zcreate("pffrent", sizeof(struct pf_frent),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- V_pf_frag_pl = uma_zcreate("pffrag", sizeof(struct pf_fragment),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- V_pf_cache_pl = uma_zcreate("pffrcache", sizeof(struct pf_fragment),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- V_pf_cent_pl = uma_zcreate("pffrcent", sizeof(struct pf_frcache),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- V_pf_state_scrub_pl = uma_zcreate("pfstatescrub",
- sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
- UMA_ALIGN_PTR, 0);
V_pfi_addr_pl = uma_zcreate("pfiaddrpl", sizeof(struct pfi_dynaddr),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
pfr_initialize();
pfi_initialize();
pf_osfp_initialize();
+ pf_normalize_init();
V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_pl;
V_pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_pl;
V_pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
- V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl;
- V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_pl;
V_pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT;
V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_pl;
@@ -369,8 +347,6 @@ pfattach(void)
my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
- pf_normalize_init();
-
bzero(&V_pf_status, sizeof(V_pf_status));
V_pf_status.debug = PF_DEBUG_URGENT;
@@ -3533,8 +3509,6 @@ pf_load(void)
init_pf_mutex();
pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
- init_zone_var();
- sx_init(&V_pf_consistency_lock, "pf_statetbl_lock");
if ((error = pfattach()) != 0)
return (error);
@@ -3565,8 +3539,9 @@ pf_unload(void)
V_pf_end_threads = 1;
while (V_pf_end_threads < 2) {
wakeup_one(pf_purge_thread);
- msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz);
+ msleep(pf_purge_thread, &pf_mtx, 0, "pftmo", hz);
}
+ pf_normalize_cleanup();
pfi_cleanup();
pf_osfp_flush();
pf_osfp_cleanup();
@@ -3574,8 +3549,8 @@ pf_unload(void)
PF_UNLOCK();
destroy_dev(pf_dev);
destroy_pf_mutex();
- sx_destroy(&V_pf_consistency_lock);
- return error;
+
+ return (error);
}
static int
Modified: projects/pf/head/sys/contrib/pf/net/pf_norm.c
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/pf_norm.c Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/pf_norm.c Thu Mar 1 10:55:01 2012 (r232330)
@@ -68,30 +68,75 @@ __FBSDID("$FreeBSD$");
#include <net/pfvar.h>
-
+struct pf_frent {
+ LIST_ENTRY(pf_frent) fr_next;
+ struct ip *fr_ip;
+ struct mbuf *fr_m;
+};
+
+struct pf_frcache {
+ LIST_ENTRY(pf_frcache) fr_next;
+ uint16_t fr_off;
+ uint16_t fr_end;
+};
+
+struct pf_fragment {
+ RB_ENTRY(pf_fragment) fr_entry;
+ TAILQ_ENTRY(pf_fragment) frag_next;
+ struct in_addr fr_src;
+ struct in_addr fr_dst;
+ u_int8_t fr_p; /* protocol of this fragment */
+ u_int8_t fr_flags; /* status flags */
#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
#define PFFRAG_DROP 0x0004 /* Drop all fragments */
#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
-
+ u_int16_t fr_id; /* fragment id for reassemble */
+ u_int16_t fr_max; /* fragment data max */
+ u_int32_t fr_timeout;
+#define fr_queue fr_u.fru_queue
+#define fr_cache fr_u.fru_cache
+ union {
+ LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
+ LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
+ } fr_u;
+};
+
+static struct mtx pf_frag_mtx;
+#define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
+#define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
+#define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
+
+VNET_DEFINE(uma_zone_t, pf_state_scrub_pl); /* XXX: shared with pfsync */
+
+static VNET_DEFINE(uma_zone_t, pf_frent_pl);
+#define V_pf_frent_pl VNET(pf_frent_pl)
+static VNET_DEFINE(uma_zone_t, pf_frag_pl);
+#define V_pf_frag_pl VNET(pf_frag_pl)
+static VNET_DEFINE(uma_zone_t, pf_cache_pl);
+#define V_pf_cache_pl VNET(pf_cache_pl)
+static VNET_DEFINE(uma_zone_t, pf_cent_pl);
+#define V_pf_cent_pl VNET(pf_cent_pl)
+static VNET_DEFINE(int, pf_nfrents);
+#define V_pf_nfrents VNET(pf_nfrents)
+static VNET_DEFINE(int, pf_ncache);
+#define V_pf_ncache VNET(pf_ncache)
TAILQ_HEAD(pf_fragqueue, pf_fragment);
TAILQ_HEAD(pf_cachequeue, pf_fragment);
-VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
+static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
#define V_pf_fragqueue VNET(pf_fragqueue)
-VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
+static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
#define V_pf_cachequeue VNET(pf_cachequeue)
-
-static int pf_frag_compare(struct pf_fragment *,
- struct pf_fragment *);
-
RB_HEAD(pf_frag_tree, pf_fragment);
-VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
+static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
#define V_pf_frag_tree VNET(pf_frag_tree)
-VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
+static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
#define V_pf_cache_tree VNET(pf_cache_tree)
-RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
-RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
+static int pf_frag_compare(struct pf_fragment *,
+ struct pf_fragment *);
+static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
+static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
/* Private prototypes */
static void pf_ip2key(struct pf_fragment *, struct ip *);
@@ -117,21 +162,23 @@ static void pf_scrub_ip6(struct mbuf *
} \
} while(0)
-/* Globals */
-VNET_DEFINE(uma_zone_t, pf_frent_pl);
-VNET_DEFINE(uma_zone_t, pf_frag_pl);
-VNET_DEFINE(uma_zone_t, pf_cache_pl);
-VNET_DEFINE(uma_zone_t, pf_cent_pl);
-VNET_DEFINE(uma_zone_t, pf_state_scrub_pl);
-
-VNET_DEFINE(int, pf_nfrents);
-#define V_pf_nfrents VNET(pf_nfrents)
-VNET_DEFINE(int, pf_ncache);
-#define V_pf_ncache VNET(pf_ncache)
-
void
pf_normalize_init(void)
{
+
+ V_pf_frent_pl = uma_zcreate("pffrent", sizeof(struct pf_frent),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ /* XXXGL: two zones of struct pf_fragment */
+ V_pf_frag_pl = uma_zcreate("pffrag", sizeof(struct pf_fragment),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ V_pf_cache_pl = uma_zcreate("pffrcache", sizeof(struct pf_fragment),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ V_pf_cent_pl = uma_zcreate("pffrcent", sizeof(struct pf_frcache),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ V_pf_state_scrub_pl = uma_zcreate("pfstatescrub",
+ sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+
/*
* XXX
* No high water mark support(It's hint not hard limit).
@@ -141,10 +188,28 @@ pf_normalize_init(void)
uma_zone_set_max(V_pf_cache_pl, PFFRAG_FRCACHE_HIWAT);
uma_zone_set_max(V_pf_cent_pl, PFFRAG_FRCENT_HIWAT);
+ V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl;
+ V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
+
+ mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
+
TAILQ_INIT(&V_pf_fragqueue);
TAILQ_INIT(&V_pf_cachequeue);
}
+void
+pf_normalize_cleanup(void)
+{
+
+ uma_zdestroy(V_pf_frent_pl);
+ uma_zdestroy(V_pf_frag_pl);
+ uma_zdestroy(V_pf_cache_pl);
+ uma_zdestroy(V_pf_cent_pl);
+ uma_zdestroy(V_pf_state_scrub_pl);
+
+ mtx_destroy(&pf_frag_mtx);
+}
+
static int
pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
{
@@ -172,6 +237,7 @@ pf_purge_expired_fragments(void)
u_int32_t expire = time_second -
V_pf_default_rule.timeout[PFTM_FRAG];
+ PF_FRAG_LOCK();
while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
KASSERT((BUFFER_FRAGMENTS(frag)),
("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
@@ -195,6 +261,7 @@ pf_purge_expired_fragments(void)
("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
__FUNCTION__));
}
+ PF_FRAG_UNLOCK();
}
/*
@@ -207,6 +274,8 @@ pf_flush_fragments(void)
struct pf_fragment *frag;
int goal;
+ PF_FRAG_ASSERT();
+
goal = V_pf_nfrents * 9 / 10;
DPFPRINTF(("trying to free > %d frents\n",
V_pf_nfrents - goal));
@@ -237,6 +306,8 @@ pf_free_fragment(struct pf_fragment *fra
struct pf_frent *frent;
struct pf_frcache *frcache;
+ PF_FRAG_ASSERT();
+
/* Free all fragments */
if (BUFFER_FRAGMENTS(frag)) {
for (frent = LIST_FIRST(&frag->fr_queue); frent;
@@ -281,6 +352,8 @@ pf_find_fragment(struct ip *ip, struct p
struct pf_fragment key;
struct pf_fragment *frag;
+ PF_FRAG_ASSERT();
+
pf_ip2key(&key, ip);
frag = RB_FIND(pf_frag_tree, tree, &key);
@@ -304,6 +377,9 @@ pf_find_fragment(struct ip *ip, struct p
static void
pf_remove_fragment(struct pf_fragment *frag)
{
+
+ PF_FRAG_ASSERT();
+
if (BUFFER_FRAGMENTS(frag)) {
RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
@@ -329,6 +405,7 @@ pf_reassemble(struct mbuf **m0, struct p
u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
u_int16_t max = ip_len + off;
+ PF_FRAG_ASSERT();
KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
@@ -533,6 +610,7 @@ pf_fragcache(struct mbuf **m0, struct ip
u_int16_t max = ip_len + off;
int hosed = 0;
+ PF_FRAG_ASSERT();
KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
@@ -847,6 +925,8 @@ pf_normalize_ip(struct mbuf **m0, int di
int ip_off;
int tag = -1;
+ PF_RULES_RASSERT();
+
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
@@ -924,8 +1004,9 @@ pf_normalize_ip(struct mbuf **m0, int di
max = fragoff + ip_len;
if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
- /* Fully buffer all of the fragments */
+ /* Fully buffer all of the fragments */
+ PF_FRAG_LOCK();
frag = pf_find_fragment(h, &V_pf_frag_tree);
/* Check if we saw the last fragment already */
@@ -936,6 +1017,7 @@ pf_normalize_ip(struct mbuf **m0, int di
/* Get an entry for the fragment queue */
frent = uma_zalloc(V_pf_frent_pl, M_NOWAIT);
if (frent == NULL) {
+ PF_FRAG_UNLOCK();
REASON_SET(reason, PFRES_MEMORY);
return (PF_DROP);
}
@@ -947,11 +1029,14 @@ pf_normalize_ip(struct mbuf **m0, int di
DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
*m0 = m = pf_reassemble(m0, &frag, frent, mff);
- if (m == NULL)
+ if (m == NULL) {
+ PF_FRAG_UNLOCK();
return (PF_DROP);
+ }
/* use mtag from concatenated mbuf chain */
pd->pf_mtag = pf_find_mtag(m);
+ PF_FRAG_UNLOCK();
#ifdef DIAGNOSTIC
if (pd->pf_mtag == NULL) {
printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
@@ -979,6 +1064,7 @@ pf_normalize_ip(struct mbuf **m0, int di
goto fragment_pass;
}
+ PF_FRAG_LOCK();
frag = pf_find_fragment(h, &V_pf_cache_tree);
/* Check if we saw the last fragment already */
@@ -992,6 +1078,7 @@ pf_normalize_ip(struct mbuf **m0, int di
*m0 = m = pf_fragcache(m0, h, &frag, mff,
(r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
if (m == NULL) {
+ PF_FRAG_UNLOCK();
if (nomem)
goto no_mem;
goto drop;
@@ -999,6 +1086,7 @@ pf_normalize_ip(struct mbuf **m0, int di
/* use mtag from copied and trimmed mbuf chain */
pd->pf_mtag = pf_find_mtag(m);
+ PF_FRAG_UNLOCK();
#ifdef DIAGNOSTIC
if (pd->pf_mtag == NULL) {
printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
@@ -1051,8 +1139,10 @@ pf_normalize_ip(struct mbuf **m0, int di
DPFPRINTF(("dropping bad fragment\n"));
/* Free associated fragments */
- if (frag != NULL)
+ if (frag != NULL) {
pf_free_fragment(frag);
+ PF_FRAG_UNLOCK();
+ }
REASON_SET(reason, PFRES_FRAG);
if (r != NULL && r->log)
@@ -1082,6 +1172,8 @@ pf_normalize_ip6(struct mbuf **m0, int d
u_int8_t proto;
int terminal;
+ PF_RULES_RASSERT();
+
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
@@ -1250,6 +1342,8 @@ pf_normalize_tcp(int dir, struct pfi_kif
u_int8_t flags;
sa_family_t af = pd->af;
+ PF_RULES_RASSERT();
+
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
@@ -1462,6 +1556,8 @@ pf_normalize_tcp_stateful(struct mbuf *m
int copyback = 0;
int got_ts = 0;
+ /* XXXGL: lock */
+
KASSERT((src->scrub || dst->scrub),
("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
Modified: projects/pf/head/sys/contrib/pf/net/pf_ruleset.c
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/pf_ruleset.c Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/pf_ruleset.c Thu Mar 1 10:55:01 2012 (r232330)
@@ -87,14 +87,14 @@ __FBSDID("$FreeBSD$");
#ifdef _KERNEL
VNET_DEFINE(struct pf_anchor_global, pf_anchors);
VNET_DEFINE(struct pf_anchor, pf_main_anchor);
-#else
+#else /* ! _KERNEL */
struct pf_anchor_global pf_anchors;
struct pf_anchor pf_main_anchor;
#undef V_pf_anchors
#define V_pf_anchors pf_anchors
#undef pf_main_ruleset
#define pf_main_ruleset pf_main_anchor.ruleset
-#endif
+#endif /* _KERNEL */
static __inline int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *);
Modified: projects/pf/head/sys/contrib/pf/net/pfvar.h
==============================================================================
--- projects/pf/head/sys/contrib/pf/net/pfvar.h Thu Mar 1 10:21:10 2012 (r232329)
+++ projects/pf/head/sys/contrib/pf/net/pfvar.h Thu Mar 1 10:55:01 2012 (r232330)
@@ -202,12 +202,19 @@ struct pfi_dynaddr {
#define PF_NAME "pf"
-extern struct mtx pf_task_mtx;
-
-#define PF_LOCK_ASSERT() mtx_assert(&pf_task_mtx, MA_OWNED)
-#define PF_UNLOCK_ASSERT() mtx_assert(&pf_task_mtx, MA_NOTOWNED)
-#define PF_LOCK() mtx_lock(&pf_task_mtx)
-#define PF_UNLOCK() mtx_unlock(&pf_task_mtx)
+extern struct mtx pf_mtx;
+#define PF_LOCK_ASSERT() mtx_assert(&pf_mtx, MA_OWNED)
+#define PF_UNLOCK_ASSERT() mtx_assert(&pf_mtx, MA_NOTOWNED)
+#define PF_LOCK() mtx_lock(&pf_mtx)
+#define PF_UNLOCK() mtx_unlock(&pf_mtx)
+
+extern struct rwlock pf_rules_lock;
+#define PF_RULES_RLOCK() rw_rlock(&pf_rules_lock)
+#define PF_RULES_RUNLOCK() rw_runlock(&pf_rules_lock)
+#define PF_RULES_RASSERT() rw_assert(&pf_rules_lock, RA_RLOCKED)
+#define PF_RULES_WLOCK() rw_wlock(&pf_rules_lock)
+#define PF_RULES_WUNLOCK() rw_wunlock(&pf_rules_lock)
+#define PF_RULES_WASSERT() rw_assert(&pf_rules_lock, RA_WLOCKED)
#define PF_COPYIN(uaddr, kaddr, len, r) do { \
PF_UNLOCK(); \
@@ -1769,10 +1776,6 @@ VNET_DECLARE(uma_zone_t, pfr_ktable_pl
#define V_pfr_ktable_pl VNET(pfr_ktable_pl)
VNET_DECLARE(uma_zone_t, pfr_kentry_pl);
#define V_pfr_kentry_pl VNET(pfr_kentry_pl)
-VNET_DECLARE(uma_zone_t, pf_cache_pl);
-#define V_pf_cache_pl VNET(pf_cache_pl)
-VNET_DECLARE(uma_zone_t, pf_cent_pl);
-#define V_pf_cent_pl VNET(pf_cent_pl)
VNET_DECLARE(uma_zone_t, pf_state_scrub_pl);
#define V_pf_state_scrub_pl VNET(pf_state_scrub_pl)
VNET_DECLARE(uma_zone_t, pfi_addr_pl);
@@ -1826,6 +1829,7 @@ int pf_match_addr_range(struct pf_addr *
int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t);
void pf_normalize_init(void);
+void pf_normalize_cleanup(void);
int pf_normalize_ip(struct mbuf **, int, struct pfi_kif *, u_short *,
struct pf_pdesc *);
int pf_normalize_ip6(struct mbuf **, int, struct pfi_kif *, u_short *,
@@ -1912,10 +1916,6 @@ void pf_qid2qname(u_int32_t, char *);
VNET_DECLARE(struct pf_status, pf_status);
#define V_pf_status VNET(pf_status)
-VNET_DECLARE(uma_zone_t, pf_frent_pl);
-#define V_pf_frent_pl VNET(pf_frent_pl)
-VNET_DECLARE(uma_zone_t, pf_frag_pl);
-#define V_pf_frag_pl VNET(pf_frag_pl)
VNET_DECLARE(struct sx, pf_consistency_lock);
#define V_pf_consistency_lock VNET(pf_consistency_lock)
@@ -1926,36 +1926,6 @@ struct pf_pool_limit {
VNET_DECLARE(struct pf_pool_limit, pf_pool_limits[PF_LIMIT_MAX]);
#define V_pf_pool_limits VNET(pf_pool_limits)
-struct pf_frent {
- LIST_ENTRY(pf_frent) fr_next;
- struct ip *fr_ip;
- struct mbuf *fr_m;
-};
-
-struct pf_frcache {
- LIST_ENTRY(pf_frcache) fr_next;
- uint16_t fr_off;
- uint16_t fr_end;
-};
-
-struct pf_fragment {
- RB_ENTRY(pf_fragment) fr_entry;
- TAILQ_ENTRY(pf_fragment) frag_next;
- struct in_addr fr_src;
- struct in_addr fr_dst;
- u_int8_t fr_p; /* protocol of this fragment */
- u_int8_t fr_flags; /* status flags */
- u_int16_t fr_id; /* fragment id for reassemble */
- u_int16_t fr_max; /* fragment data max */
- u_int32_t fr_timeout;
-#define fr_queue fr_u.fru_queue
-#define fr_cache fr_u.fru_cache
- union {
- LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
- LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
- } fr_u;
-};
-
#endif /* _KERNEL */
#ifdef _KERNEL
More information about the svn-src-projects
mailing list