PERFORCE change 118389 for review
Marko Zec
zec at FreeBSD.org
Thu Apr 19 04:13:57 UTC 2007
http://perforce.freebsd.org/chv.cgi?CH=118389
Change 118389 by zec at zec_tca51 on 2007/04/19 04:13:05
Initial attempt at virtualizing pf (modulo pflog and pfsync).
Affected files ...
.. //depot/projects/vimage/src/sys/contrib/pf/net/if_pflog.c#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/if_pflog.h#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/if_pfsync.c#4 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/if_pfsync.h#3 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf.c#4 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_if.c#3 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_ioctl.c#4 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_norm.c#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_osfp.c#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_subr.c#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pf_table.c#2 edit
.. //depot/projects/vimage/src/sys/contrib/pf/net/pfvar.h#2 edit
Differences ...
==== //depot/projects/vimage/src/sys/contrib/pf/net/if_pflog.c#2 (text+ko) ====
@@ -38,6 +38,7 @@
#ifdef __FreeBSD__
#include "opt_inet.h"
#include "opt_inet6.h"
+#include "opt_vimage.h"
#endif
#ifndef __FreeBSD__
==== //depot/projects/vimage/src/sys/contrib/pf/net/if_pflog.h#2 (text+ko) ====
==== //depot/projects/vimage/src/sys/contrib/pf/net/if_pfsync.c#4 (text+ko) ====
@@ -30,6 +30,7 @@
#ifdef __FreeBSD__
#include "opt_inet.h"
#include "opt_inet6.h"
+#include "opt_vimage.h"
#endif
#ifndef __FreeBSD__
@@ -71,6 +72,7 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#else
#include <sys/ioctl.h>
#include <sys/timeout.h>
@@ -353,6 +355,7 @@
int
pfsync_insert_net_state(struct pfsync_state *sp)
{
+ INIT_VNET_PF(curvnet);
struct pf_state *st = NULL;
struct pf_rule *r = NULL;
struct pfi_kif *kif;
@@ -360,7 +363,7 @@
#ifdef __FreeBSD__
PF_ASSERT(MA_OWNED);
#endif
- if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
+ if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pfsync_insert_net_state: invalid creator id:"
" %08x\n", ntohl(sp->creatorid));
return (EINVAL);
@@ -368,7 +371,7 @@
kif = pfi_lookup_create(sp->ifname);
if (kif == NULL) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_insert_net_state: "
"unknown interface: %s\n", sp->ifname);
/* skip this state */
@@ -379,7 +382,7 @@
* Just use the default rule until we have infrastructure to find the
* best matching rule.
*/
- r = &pf_default_rule;
+ r = &V_pf_default_rule;
if (!r->max_states || r->states < r->max_states)
st = pool_get(&pf_state_pl, PR_NOWAIT);
@@ -437,6 +440,7 @@
pfsync_input(struct mbuf *m, ...)
#endif
{
+ INIT_VNET_PF(curvnet);
struct ip *ip = mtod(m, struct ip *);
struct pfsync_header *ph;
#ifdef __FreeBSD__
@@ -458,7 +462,7 @@
pfsyncstats.pfsyncs_ipackets++;
/* verify that we have a sync interface configured */
- if (!sc->sc_sync_ifp || !pf_status.running) /* XXX PF_LOCK? */
+ if (!sc->sc_sync_ifp || !V_pf_status.running) /* XXX PF_LOCK? */
goto done;
/* verify that the packet came in on the right interface */
@@ -525,9 +529,9 @@
PF_LOCK();
#endif
if (cp->ifname[0] == '\0') {
- for (st = RB_MIN(pf_state_tree_id, &tree_id);
+ for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
st; st = nexts) {
- nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
+ nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
if (st->creatorid == creatorid) {
st->timeout = PFTM_PURGE;
pf_purge_expired_state(st);
@@ -536,7 +540,7 @@
} else {
kif = pfi_lookup_if(cp->ifname);
if (kif == NULL) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_input: PFSYNC_ACT_CLR "
"bad interface: %s\n", cp->ifname);
splx(s);
@@ -581,7 +585,7 @@
sp->dst.state > PF_TCPS_PROXY_DST ||
sp->direction > PF_OUT ||
(sp->af != AF_INET && sp->af != AF_INET6)) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_insert: PFSYNC_ACT_INS: "
"invalid value\n");
pfsyncstats.pfsyncs_badstate++;
@@ -623,7 +627,7 @@
if (sp->timeout >= PFTM_MAX ||
sp->src.state > PF_TCPS_PROXY_DST ||
sp->dst.state > PF_TCPS_PROXY_DST) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_insert: PFSYNC_ACT_UPD: "
"invalid value\n");
pfsyncstats.pfsyncs_badstate++;
@@ -679,7 +683,7 @@
sfail = 6;
}
if (sfail) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: %s stale update "
"(%d) id: %016llx "
"creatorid: %08x\n",
@@ -769,7 +773,7 @@
if (up->timeout >= PFTM_MAX ||
up->src.state > PF_TCPS_PROXY_DST ||
up->dst.state > PF_TCPS_PROXY_DST) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_insert: "
"PFSYNC_ACT_UPD_C: "
"invalid value\n");
@@ -822,7 +826,7 @@
sfail = 6;
}
if (sfail) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: ignoring stale update "
"(%d) id: %016llx "
"creatorid: %08x\n", sfail,
@@ -914,7 +918,7 @@
if (key.id == 0 && key.creatorid == 0) {
sc->sc_ureq_received = time_uptime;
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received "
"bulk update request\n");
pfsync_send_bus(sc, PFSYNC_BUS_START);
@@ -966,7 +970,7 @@
pf_pool_limits[PF_LIMIT_STATES].limit /
(PFSYNC_BULKPACKETS * sc->sc_maxcount));
#endif
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received bulk "
"update start\n");
break;
@@ -986,11 +990,11 @@
carp_suppress_preempt--;
#endif
pfsync_sync_ok = 1;
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received valid "
"bulk update end\n");
} else {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received invalid "
"bulk update end: bad timestamp\n");
}
@@ -1016,6 +1020,7 @@
int
pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
+ INIT_VNET_PF(curvnet);
#ifndef __FreeBSD__
struct proc *p = curproc;
#endif
@@ -1203,7 +1208,7 @@
carp_suppress_preempt++;
#endif
pfsync_sync_ok = 0;
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: requesting bulk update\n");
#ifdef __FreeBSD__
callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
@@ -1341,6 +1346,7 @@
int
pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
{
+ INIT_VNET_PF(curvnet);
#ifdef __FreeBSD__
struct ifnet *ifp = SCP2IFP(LIST_FIRST(&pfsync_list));
#else
@@ -1425,8 +1431,8 @@
secs = time_second;
st->pfsync_time = time_uptime;
- TAILQ_REMOVE(&state_updates, st, u.s.entry_updates);
- TAILQ_INSERT_TAIL(&state_updates, st, u.s.entry_updates);
+ TAILQ_REMOVE(&V_state_updates, st, u.s.entry_updates);
+ TAILQ_INSERT_TAIL(&V_state_updates, st, u.s.entry_updates);
if (sp == NULL) {
/* not a "duplicate" update */
@@ -1651,6 +1657,7 @@
void
pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
{
+ INIT_VNET_PF(curvnet);
struct pfsync_state_bus *bus;
#ifdef __FreeBSD__
@@ -1664,7 +1671,7 @@
(void *)&sc->sc_statep.b)) != NULL) {
sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
bus = sc->sc_statep.b;
- bus->creatorid = pf_status.hostid;
+ bus->creatorid = V_pf_status.hostid;
bus->status = status;
bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
pfsync_sendout(sc);
@@ -1674,6 +1681,7 @@
void
pfsync_bulk_update(void *v)
{
+ INIT_VNET_PF(curvnet);
struct pfsync_softc *sc = v;
int s, i = 0;
struct pf_state *state;
@@ -1689,7 +1697,7 @@
* Grab at most PFSYNC_BULKPACKETS worth of states which have not
* been sent since the latest request was made.
*/
- while ((state = TAILQ_FIRST(&state_updates)) != NULL &&
+ while ((state = TAILQ_FIRST(&V_state_updates)) != NULL &&
++i < (sc->sc_maxcount * PFSYNC_BULKPACKETS)) {
if (state->pfsync_time > sc->sc_ureq_received) {
/* we're done */
@@ -1700,7 +1708,7 @@
#else
timeout_del(&sc->sc_bulk_tmo);
#endif
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: bulk update complete\n");
break;
} else {
@@ -1708,8 +1716,8 @@
if (!state->sync_flags)
pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
state->pfsync_time = time_uptime;
- TAILQ_REMOVE(&state_updates, state, u.s.entry_updates);
- TAILQ_INSERT_TAIL(&state_updates, state,
+ TAILQ_REMOVE(&V_state_updates, state, u.s.entry_updates);
+ TAILQ_INSERT_TAIL(&V_state_updates, state,
u.s.entry_updates);
/* look again for more in a bit */
@@ -1732,6 +1740,7 @@
void
pfsync_bulkfail(void *v)
{
+ INIT_VNET_PF(curvnet);
struct pfsync_softc *sc = v;
int s, error;
@@ -1749,7 +1758,7 @@
s = splnet();
error = pfsync_request_update(NULL, NULL);
if (error == ENOMEM) {
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: cannot allocate mbufs for "
"bulk update\n");
} else
@@ -1764,7 +1773,7 @@
carp_suppress_preempt--;
#endif
pfsync_sync_ok = 1;
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: failed to receive "
"bulk update status\n");
#ifdef __FreeBSD__
==== //depot/projects/vimage/src/sys/contrib/pf/net/if_pfsync.h#3 (text+ko) ====
==== //depot/projects/vimage/src/sys/contrib/pf/net/pf.c#4 (text+ko) ====
@@ -141,12 +141,13 @@
extern int ip_optcopy(struct ip *, struct ip *);
#endif
-#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
+#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
/*
* Global variables
*/
+#ifndef VIMAGE
struct pf_anchor_global pf_anchors;
struct pf_ruleset pf_main_ruleset;
struct pf_altqqueue pf_altqs[2];
@@ -159,6 +160,7 @@
u_int32_t ticket_altqs_inactive;
int altqs_inactive_open;
u_int32_t ticket_pabuf;
+#endif
#ifdef __FreeBSD__
struct callout pf_expire_to; /* expire timeout */
@@ -385,10 +387,11 @@
static int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *);
#endif
+#ifndef VIMAGE
struct pf_src_tree tree_src_tracking;
-
struct pf_state_tree_id tree_id;
struct pf_state_queue state_updates;
+#endif
RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
RB_GENERATE(pf_state_tree_lan_ext, pf_state,
@@ -646,16 +649,19 @@
struct pf_state *
pf_find_state_byid(struct pf_state *key)
{
- pf_status.fcounters[FCNT_STATE_SEARCH]++;
- return (RB_FIND(pf_state_tree_id, &tree_id, key));
+ INIT_VNET_PF(curvnet);
+
+ V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
+ return (RB_FIND(pf_state_tree_id, &V_tree_id, key));
}
struct pf_state *
pf_find_state_recurse(struct pfi_kif *kif, struct pf_state *key, u_int8_t tree)
{
+ INIT_VNET_PF(curvnet);
struct pf_state *s;
- pf_status.fcounters[FCNT_STATE_SEARCH]++;
+ V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
switch (tree) {
case PF_LAN_EXT:
@@ -682,14 +688,15 @@
struct pf_state *
pf_find_state_all(struct pf_state *key, u_int8_t tree, int *more)
{
+ INIT_VNET_PF(curvnet);
struct pf_state *s, *ss = NULL;
struct pfi_kif *kif;
- pf_status.fcounters[FCNT_STATE_SEARCH]++;
+ V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
switch (tree) {
case PF_LAN_EXT:
- TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
+ TAILQ_FOREACH(kif, &V_pfi_statehead, pfik_w_states) {
s = RB_FIND(pf_state_tree_lan_ext,
&kif->pfik_lan_ext, key);
if (s == NULL)
@@ -701,7 +708,7 @@
}
return (ss);
case PF_EXT_GWY:
- TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
+ TAILQ_FOREACH(kif, &V_pfi_statehead, pfik_w_states) {
s = RB_FIND(pf_state_tree_ext_gwy,
&kif->pfik_ext_gwy, key);
if (s == NULL)
@@ -750,6 +757,7 @@
int
pf_src_connlimit(struct pf_state **state)
{
+ INIT_VNET_PF(curvnet);
struct pf_state *s;
int bad = 0;
@@ -762,13 +770,13 @@
if ((*state)->rule.ptr->max_src_conn &&
(*state)->rule.ptr->max_src_conn <
(*state)->src_node->conn) {
- pf_status.lcounters[LCNT_SRCCONN]++;
+ V_pf_status.lcounters[LCNT_SRCCONN]++;
bad++;
}
if ((*state)->rule.ptr->max_src_conn_rate.limit &&
pf_check_threshold(&(*state)->src_node->conn_rate)) {
- pf_status.lcounters[LCNT_SRCCONNRATE]++;
+ V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
bad++;
}
@@ -779,8 +787,8 @@
struct pfr_addr p;
u_int32_t killed = 0;
- pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pf_src_connlimit: blocking address ");
pf_print_host(&(*state)->src_node->addr, 0,
(*state)->af);
@@ -808,9 +816,9 @@
/* kill existing states if that's required. */
if ((*state)->rule.ptr->flush) {
- pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
+ V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
- RB_FOREACH(s, pf_state_tree_id, &tree_id) {
+ RB_FOREACH(s, pf_state_tree_id, &V_tree_id) {
/*
* Kill states from this source. (Only those
* from the same rule if PF_FLUSH_GLOBAL is not
@@ -832,10 +840,10 @@
killed++;
}
}
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf(", %u states killed", killed);
}
- if (pf_status.debug >= PF_DEBUG_MISC)
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("\n");
}
@@ -849,6 +857,7 @@
pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
struct pf_addr *src, sa_family_t af)
{
+ INIT_VNET_PF(curvnet);
struct pf_src_node k;
if (*sn == NULL) {
@@ -859,15 +868,15 @@
k.rule.ptr = rule;
else
k.rule.ptr = NULL;
- pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
- *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
+ V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
+ *sn = RB_FIND(pf_src_tree, &V_tree_src_tracking, &k);
}
if (*sn == NULL) {
if (!rule->max_src_nodes ||
rule->src_nodes < rule->max_src_nodes)
(*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT);
else
- pf_status.lcounters[LCNT_SRCNODES]++;
+ V_pf_status.lcounters[LCNT_SRCNODES]++;
if ((*sn) == NULL)
return (-1);
bzero(*sn, sizeof(struct pf_src_node));
@@ -884,8 +893,8 @@
(*sn)->rule.ptr = NULL;
PF_ACPY(&(*sn)->addr, src, af);
if (RB_INSERT(pf_src_tree,
- &tree_src_tracking, *sn) != NULL) {
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ &V_tree_src_tracking, *sn) != NULL) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pf: src_tree insert failed: ");
pf_print_host(&(*sn)->addr, 0, af);
printf("\n");
@@ -897,12 +906,12 @@
(*sn)->ruletype = rule->action;
if ((*sn)->rule.ptr != NULL)
(*sn)->rule.ptr->src_nodes++;
- pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
- pf_status.src_nodes++;
+ V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
+ V_pf_status.src_nodes++;
} else {
if (rule->max_src_states &&
(*sn)->states >= rule->max_src_states) {
- pf_status.lcounters[LCNT_SRCSTATES]++;
+ V_pf_status.lcounters[LCNT_SRCSTATES]++;
return (-1);
}
}
@@ -912,10 +921,12 @@
int
pf_insert_state(struct pfi_kif *kif, struct pf_state *state)
{
+ INIT_VNET_PF(curvnet);
+
/* Thou MUST NOT insert multiple duplicate keys */
state->u.s.kif = kif;
if (RB_INSERT(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state)) {
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pf: state insert failed: tree_lan_ext");
printf(" lan: ");
pf_print_host(&state->lan.addr, state->lan.port,
@@ -934,7 +945,7 @@
}
if (RB_INSERT(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state)) {
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pf: state insert failed: tree_ext_gwy");
printf(" lan: ");
pf_print_host(&state->lan.addr, state->lan.port,
@@ -954,11 +965,11 @@
}
if (state->id == 0 && state->creatorid == 0) {
- state->id = htobe64(pf_status.stateid++);
- state->creatorid = pf_status.hostid;
+ state->id = htobe64(V_pf_status.stateid++);
+ state->creatorid = V_pf_status.hostid;
}
- if (RB_INSERT(pf_state_tree_id, &tree_id, state) != NULL) {
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ if (RB_INSERT(pf_state_tree_id, &V_tree_id, state) != NULL) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
#ifdef __FreeBSD__
printf("pf: state insert failed: "
"id: %016llx creatorid: %08x",
@@ -977,10 +988,10 @@
RB_REMOVE(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state);
return (-1);
}
- TAILQ_INSERT_HEAD(&state_updates, state, u.s.entry_updates);
+ TAILQ_INSERT_HEAD(&V_state_updates, state, u.s.entry_updates);
- pf_status.fcounters[FCNT_STATE_INSERT]++;
- pf_status.states++;
+ V_pf_status.fcounters[FCNT_STATE_INSERT]++;
+ V_pf_status.states++;
pfi_attach_state(kif);
#if NPFSYNC
pfsync_insert_state(state);
@@ -991,9 +1002,9 @@
void
pf_purge_timeout(void *arg)
{
-#ifdef __FreeBSD__
- struct callout *to = arg;
-#else
+ CURVNET_SET((struct vnet *) arg);
+ INIT_VNET_PF((struct vnet *) arg);
+#ifndef __FreeBSD__
struct timeout *to = arg;
#endif
int s;
@@ -1011,16 +1022,19 @@
#endif
#ifdef __FreeBSD__
- callout_reset(to, pf_default_rule.timeout[PFTM_INTERVAL] * hz,
- pf_purge_timeout, to);
+ callout_reset(&V_pf_expire_to,
+ V_pf_default_rule.timeout[PFTM_INTERVAL] * hz,
+ pf_purge_timeout, arg);
#else
timeout_add(to, pf_default_rule.timeout[PFTM_INTERVAL] * hz);
#endif
+ CURVNET_RESTORE();
}
u_int32_t
pf_state_expires(const struct pf_state *state)
{
+ INIT_VNET_PF(curvnet);
u_int32_t timeout;
u_int32_t start;
u_int32_t end;
@@ -1039,15 +1053,15 @@
#endif
timeout = state->rule.ptr->timeout[state->timeout];
if (!timeout)
- timeout = pf_default_rule.timeout[state->timeout];
+ timeout = V_pf_default_rule.timeout[state->timeout];
start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
if (start) {
end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
states = state->rule.ptr->states;
} else {
- start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
- end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
- states = pf_status.states;
+ start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
+ end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
+ states = V_pf_status.states;
}
if (end && states > start && start < end) {
if (states < end)
@@ -1062,29 +1076,31 @@
void
pf_purge_expired_src_nodes(void)
{
- struct pf_src_node *cur, *next;
+ INIT_VNET_PF(curvnet);
+ struct pf_src_node *cur, *next;
- for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
- next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
+ for (cur = RB_MIN(pf_src_tree, &V_tree_src_tracking); cur; cur = next) {
+ next = RB_NEXT(pf_src_tree, &V_tree_src_tracking, cur);
- if (cur->states <= 0 && cur->expire <= time_second) {
- if (cur->rule.ptr != NULL) {
- cur->rule.ptr->src_nodes--;
- if (cur->rule.ptr->states <= 0 &&
- cur->rule.ptr->max_src_nodes <= 0)
- pf_rm_rule(NULL, cur->rule.ptr);
- }
- RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
- pool_put(&pf_src_tree_pl, cur);
- }
- }
+ if (cur->states <= 0 && cur->expire <= time_second) {
+ if (cur->rule.ptr != NULL) {
+ cur->rule.ptr->src_nodes--;
+ if (cur->rule.ptr->states <= 0 &&
+ cur->rule.ptr->max_src_nodes <= 0)
+ pf_rm_rule(NULL, cur->rule.ptr);
+ }
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, cur);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, cur);
+ }
+ }
}
void
pf_src_tree_remove_state(struct pf_state *s)
{
+ INIT_VNET_PF(curvnet);
u_int32_t timeout;
if (s->src_node != NULL) {
@@ -1101,7 +1117,7 @@
timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
if (!timeout)
timeout =
- pf_default_rule.timeout[PFTM_SRC_NODE];
+ V_pf_default_rule.timeout[PFTM_SRC_NODE];
s->src_node->expire = time_second + timeout;
}
}
@@ -1110,7 +1126,7 @@
timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
if (!timeout)
timeout =
- pf_default_rule.timeout[PFTM_SRC_NODE];
+ V_pf_default_rule.timeout[PFTM_SRC_NODE];
s->nat_src_node->expire = time_second + timeout;
}
}
@@ -1120,6 +1136,8 @@
void
pf_purge_expired_state(struct pf_state *cur)
{
+ INIT_VNET_PF(curvnet);
+
#ifdef __FreeBSD__
if (cur->local_flags & PFSTATE_EXPIRING)
return;
@@ -1139,7 +1157,7 @@
&cur->u.s.kif->pfik_ext_gwy, cur);
RB_REMOVE(pf_state_tree_lan_ext,
&cur->u.s.kif->pfik_lan_ext, cur);
- RB_REMOVE(pf_state_tree_id, &tree_id, cur);
+ RB_REMOVE(pf_state_tree_id, &V_tree_id, cur);
#if NPFSYNC
pfsync_delete_state(cur);
#endif
@@ -1156,22 +1174,23 @@
pf_rm_rule(NULL, cur->anchor.ptr);
pf_normalize_tcp_cleanup(cur);
pfi_detach_state(cur->u.s.kif);
- TAILQ_REMOVE(&state_updates, cur, u.s.entry_updates);
+ TAILQ_REMOVE(&V_state_updates, cur, u.s.entry_updates);
if (cur->tag)
pf_tag_unref(cur->tag);
pool_put(&pf_state_pl, cur);
- pf_status.fcounters[FCNT_STATE_REMOVALS]++;
- pf_status.states--;
+ V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
+ V_pf_status.states--;
}
void
pf_purge_expired_states(void)
{
+ INIT_VNET_PF(curvnet);
struct pf_state *cur, *next;
- for (cur = RB_MIN(pf_state_tree_id, &tree_id);
+ for (cur = RB_MIN(pf_state_tree_id, &V_tree_id);
cur; cur = next) {
- next = RB_NEXT(pf_state_tree_id, &tree_id, cur);
+ next = RB_NEXT(pf_state_tree_id, &V_tree_id, cur);
if (pf_state_expires(cur) <= time_second)
pf_purge_expired_state(cur);
}
@@ -2200,6 +2219,7 @@
pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
{
+ INIT_VNET_PF(curvnet);
unsigned char hash[16];
struct pf_pool *rpool = &r->rpool;
struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
@@ -2216,11 +2236,11 @@
k.rule.ptr = r;
else
k.rule.ptr = NULL;
- pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
- *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
+ V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
+ *sn = RB_FIND(pf_src_tree, &V_tree_src_tracking, &k);
if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
PF_ACPY(naddr, &(*sn)->raddr, af);
- if (pf_status.debug >= PF_DEBUG_MISC) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pf_map_addr: src tracking maps ");
pf_print_host(&k.addr, 0, af);
printf(" to ");
@@ -2367,7 +2387,7 @@
if (*sn != NULL)
PF_ACPY(&(*sn)->raddr, naddr, af);
- if (pf_status.debug >= PF_DEBUG_MISC &&
+ if (V_pf_status.debug >= PF_DEBUG_MISC &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
printf("pf_map_addr: selected address ");
pf_print_host(naddr, 0, af);
@@ -2473,13 +2493,14 @@
int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
struct pf_addr *daddr, u_int16_t dport, int rs_num)
{
+ INIT_VNET_PF(curvnet);
struct pf_rule *r, *rm = NULL;
struct pf_ruleset *ruleset = NULL;
struct pf_tag *pftag = NULL;
int tag = -1;
int asd = 0;
- r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
+ r = TAILQ_FIRST(V_pf_main_ruleset.rules[rs_num].active.ptr);
while (r && rm == NULL) {
struct pf_rule_addr *src = NULL, *dst = NULL;
struct pf_addr_wrap *xdst = NULL;
@@ -2551,6 +2572,7 @@
struct pf_addr *daddr, u_int16_t dport,
struct pf_addr *naddr, u_int16_t *nport)
{
+ INIT_VNET_PF(curvnet);
struct pf_rule *r = NULL;
if (direction == PF_OUT) {
@@ -3019,6 +3041,7 @@
#endif
{
INIT_VNET_INET(curvnet);
+ INIT_VNET_PF(curvnet);
struct pf_rule *nr = NULL;
struct pf_addr *saddr = pd->src, *daddr = pd->dst;
struct tcphdr *th = pd->hdr.tcp;
@@ -3048,7 +3071,7 @@
PF_LOCK();
#endif
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+ r = TAILQ_FIRST(V_pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
if (direction == PF_OUT) {
bport = nport = th->th_sport;
@@ -3222,7 +3245,7 @@
/* check maximums */
if (r->max_states && (r->states >= r->max_states)) {
- pf_status.lcounters[LCNT_STATES]++;
+ V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
goto cleanup;
}
@@ -3246,16 +3269,16 @@
REASON_SET(&reason, PFRES_MEMORY);
cleanup:
if (sn != NULL && sn->states == 0 && sn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, sn);
}
if (nsn != sn && nsn != NULL && nsn->states == 0 &&
nsn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, nsn);
}
return (PF_DROP);
@@ -3422,6 +3445,7 @@
struct ifqueue *ifq)
#endif
{
+ INIT_VNET_PF(curvnet);
struct pf_rule *nr = NULL;
struct pf_addr *saddr = pd->src, *daddr = pd->dst;
struct udphdr *uh = pd->hdr.udp;
@@ -3450,7 +3474,7 @@
PF_LOCK();
#endif
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+ r = TAILQ_FIRST(V_pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
if (direction == PF_OUT) {
bport = nport = uh->uh_sport;
@@ -3599,7 +3623,7 @@
/* check maximums */
if (r->max_states && (r->states >= r->max_states)) {
- pf_status.lcounters[LCNT_STATES]++;
+ V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
goto cleanup;
}
@@ -3623,16 +3647,16 @@
REASON_SET(&reason, PFRES_MEMORY);
cleanup:
if (sn != NULL && sn->states == 0 && sn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, sn);
}
if (nsn != sn && nsn != NULL && nsn->states == 0 &&
nsn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, nsn);
}
return (PF_DROP);
@@ -3714,6 +3738,7 @@
struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
struct ifqueue *ifq)
{
+ INIT_VNET_PF(curvnet);
struct pf_rule *nr = NULL;
struct pf_addr *saddr = pd->src, *daddr = pd->dst;
struct pf_rule *r, *a = NULL;
@@ -3767,7 +3792,7 @@
#endif /* INET6 */
}
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+ r = TAILQ_FIRST(V_pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
if (direction == PF_OUT) {
bport = nport = icmpid;
@@ -3905,7 +3930,7 @@
/* check maximums */
if (r->max_states && (r->states >= r->max_states)) {
- pf_status.lcounters[LCNT_STATES]++;
+ V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
goto cleanup;
}
@@ -3929,16 +3954,16 @@
REASON_SET(&reason, PFRES_MEMORY);
cleanup:
if (sn != NULL && sn->states == 0 && sn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, sn);
}
if (nsn != sn && nsn != NULL && nsn->states == 0 &&
nsn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, nsn);
}
return (PF_DROP);
@@ -4020,6 +4045,7 @@
struct pfi_kif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
struct pf_rule **am, struct pf_ruleset **rsm, struct ifqueue *ifq)
{
+ INIT_VNET_PF(curvnet);
struct pf_rule *nr = NULL;
struct pf_rule *r, *a = NULL;
struct pf_ruleset *ruleset = NULL;
@@ -4036,7 +4062,7 @@
return (PF_DROP);
}
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+ r = TAILQ_FIRST(V_pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
if (direction == PF_OUT) {
/* check outgoing packet for BINAT/NAT */
@@ -4185,7 +4211,7 @@
/* check maximums */
if (r->max_states && (r->states >= r->max_states)) {
- pf_status.lcounters[LCNT_STATES]++;
+ V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
goto cleanup;
}
@@ -4209,16 +4235,16 @@
REASON_SET(&reason, PFRES_MEMORY);
cleanup:
if (sn != NULL && sn->states == 0 && sn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, sn);
}
if (nsn != sn && nsn != NULL && nsn->states == 0 &&
nsn->expire == 0) {
- RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
- pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
- pf_status.src_nodes--;
+ RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
+ V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ V_pf_status.src_nodes--;
pool_put(&pf_src_tree_pl, nsn);
}
return (PF_DROP);
@@ -4285,6 +4311,7 @@
struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list