PERFORCE change 37405 for review
Sam Leffler
sam at FreeBSD.org
Tue Sep 2 16:27:09 PDT 2003
http://perforce.freebsd.org/chv.cgi?CH=37405
Change 37405 by sam at sam_ebb on 2003/09/02 16:26:47
push Giant up one level on the input path
Affected files ...
.. //depot/projects/netperf/sys/kern/kern_poll.c#2 edit
.. //depot/projects/netperf/sys/net/if_ppp.c#2 edit
.. //depot/projects/netperf/sys/net/netisr.c#2 edit
.. //depot/projects/netperf/sys/netatalk/aarp.c#2 edit
.. //depot/projects/netperf/sys/netatalk/ddp_input.c#2 edit
.. //depot/projects/netperf/sys/netatm/atm_subr.c#3 edit
.. //depot/projects/netperf/sys/netinet/if_ether.c#5 edit
.. //depot/projects/netperf/sys/netinet/ip_divert.c#3 edit
.. //depot/projects/netperf/sys/netinet/ip_dummynet.c#6 edit
.. //depot/projects/netperf/sys/netinet/ip_fw2.c#4 edit
.. //depot/projects/netperf/sys/netinet/ip_input.c#3 edit
.. //depot/projects/netperf/sys/netinet/ip_mroute.c#7 edit
.. //depot/projects/netperf/sys/netinet6/ip6_input.c#2 edit
.. //depot/projects/netperf/sys/netipx/ipx_input.c#2 edit
.. //depot/projects/netperf/sys/netnatm/natm.c#3 edit
Differences ...
==== //depot/projects/netperf/sys/kern/kern_poll.c#2 (text+ko) ====
@@ -182,11 +182,13 @@
};
static struct pollrec pr[POLL_LIST_LEN];
+static struct mtx poll_mtx;
static void
init_device_poll(void)
{
+ mtx_init(&poll_mtx, "device polling", NULL, MTX_DEF);
netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL);
netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL);
}
@@ -223,6 +225,7 @@
else
prev_t = t;
+ mtx_lock(&poll_mtx);
if (pending_polls > 100) {
/*
* Too much, assume it has stalled (not always true
@@ -242,6 +245,7 @@
}
if (pending_polls++ > 0)
lost_polls++;
+ mtx_unlock(&poll_mtx);
}
/*
@@ -252,15 +256,16 @@
{
int i;
- mtx_lock(&Giant);
+ mtx_assert(&Giant, MA_NOTOWNED);
+ mtx_lock(&poll_mtx);
if (count > poll_each_burst)
count = poll_each_burst;
for (i = 0 ; i < poll_handlers ; i++)
if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
(pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
pr[i].handler(pr[i].ifp, 0, count); /* quick check */
- mtx_unlock(&Giant);
+ mtx_unlock(&poll_mtx);
}
/*
@@ -288,10 +293,14 @@
int kern_load;
/* XXX run at splhigh() or equivalent */
+ mtx_assert(&Giant, MA_NOTOWNED);
+
+ mtx_lock(&poll_mtx);
phase = 5;
if (residual_burst > 0) {
schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
/* will run immediately on return, followed by netisrs */
+ mtx_unlock(&poll_mtx);
return;
}
/* here we can account time spent in netisr's in this tick */
@@ -322,12 +331,12 @@
schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
phase = 6;
}
+ mtx_unlock(&poll_mtx);
}
/*
* netisr_poll is scheduled by schednetisr when appropriate, typically once
- * per tick. It is called at splnet() so first thing to do is to upgrade to
- * splimp(), and call all registered handlers.
+ * per tick.
*/
static void
netisr_poll(void)
@@ -335,8 +344,10 @@
static int reg_frac_count;
int i, cycles;
enum poll_cmd arg = POLL_ONLY;
- mtx_lock(&Giant);
+
+ mtx_assert(&Giant, MA_NOTOWNED);
+ mtx_lock(&poll_mtx);
phase = 3;
if (residual_burst == 0) { /* first call in this tick */
microuptime(&poll_start_t);
@@ -394,7 +405,7 @@
}
/* on -stable, schednetisr(NETISR_POLLMORE); */
phase = 4;
- mtx_unlock(&Giant);
+ mtx_lock(&poll_mtx);
}
/*
@@ -408,8 +419,6 @@
int
ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
{
- int s;
-
if (polling == 0) /* polling disabled, cannot register */
return 0;
if (h == NULL || ifp == NULL) /* bad arguments */
@@ -419,7 +428,7 @@
if (ifp->if_flags & IFF_POLLING) /* already polling */
return 0;
- s = splhigh();
+ mtx_lock(&poll_mtx);
if (poll_handlers >= POLL_LIST_LEN) {
/*
* List full, cannot register more entries.
@@ -428,8 +437,8 @@
* this at runtime is expensive, and won't solve the problem
* anyways, so just report a few times and then give up.
*/
- static int verbose = 10 ;
- splx(s);
+ static int verbose = 10;
+ mtx_unlock(&poll_mtx);
if (verbose >0) {
printf("poll handlers list full, "
"maybe a broken driver ?\n");
@@ -442,7 +451,7 @@
pr[poll_handlers].ifp = ifp;
poll_handlers++;
ifp->if_flags |= IFF_POLLING;
- splx(s);
+ mtx_unlock(&poll_mtx);
if (idlepoll_sleeping)
wakeup(&idlepoll_sleeping);
return 1; /* polling enabled in next call */
@@ -459,9 +468,9 @@
{
int i;
- mtx_lock(&Giant);
+ mtx_lock(&poll_mtx);
if ( !ifp || !(ifp->if_flags & IFF_POLLING) ) {
- mtx_unlock(&Giant);
+ mtx_unlock(&poll_mtx);
return 0;
}
for (i = 0 ; i < poll_handlers ; i++)
@@ -469,8 +478,8 @@
break;
ifp->if_flags &= ~IFF_POLLING; /* found or not... */
if (i == poll_handlers) {
- mtx_unlock(&Giant);
- printf("ether_poll_deregister: ifp not found!!!\n");
+ mtx_unlock(&poll_mtx);
+ printf("%s: ifp not found!!!\n", __func__);
return 0;
}
poll_handlers--;
@@ -478,7 +487,7 @@
pr[i].handler = pr[poll_handlers].handler;
pr[i].ifp = pr[poll_handlers].ifp;
}
- mtx_unlock(&Giant);
+ mtx_unlock(&poll_mtx);
return 1;
}
@@ -499,10 +508,7 @@
for (;;) {
if (poll_in_idle_loop && poll_handlers > 0) {
idlepoll_sleeping = 0;
- mtx_lock(&Giant);
ether_poll(poll_each_burst);
- mtx_unlock(&Giant);
- mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
==== //depot/projects/netperf/sys/net/if_ppp.c#2 (text+ko) ====
@@ -1131,6 +1131,7 @@
int s;
struct mbuf *m;
+ mtx_lock(&Giant);
LIST_FOREACH(sc, &ppp_softc_list, sc_list) {
s = splimp();
if (!(sc->sc_flags & SC_TBUSY)
@@ -1152,6 +1153,7 @@
ppp_inproc(sc, m);
}
}
+ mtx_unlock(&Giant);
}
#ifdef PPP_COMPRESS
==== //depot/projects/netperf/sys/net/netisr.c#2 (text+ko) ====
@@ -245,7 +245,7 @@
{
mtx_init(&netisr_mtx, "netisr lock", NULL, MTX_DEF);
- if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih))
+ if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih))
panic("start_netisr");
}
SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL)
==== //depot/projects/netperf/sys/netatalk/aarp.c#2 (text+ko) ====
@@ -284,7 +284,9 @@
switch( ntohs( ar->ar_pro )) {
case ETHERTYPE_AT :
+ mtx_lock(&Giant);
at_aarpinput( ac, m );
+ mtx_unlock(&Giant);
return;
default:
==== //depot/projects/netperf/sys/netatalk/ddp_input.c#2 (text+ko) ====
@@ -43,7 +43,9 @@
/*
* Phase 2 packet handling
*/
+ mtx_lock(&Giant);
ddp_input(m, m->m_pkthdr.rcvif, NULL, 2);
+ mtx_unlock(&Giant);
return;
}
@@ -66,12 +68,14 @@
elhp = mtod(m, struct elaphdr *);
m_adj(m, SZ_ELAPHDR);
+ mtx_lock(&Giant);
if (elhp->el_type == ELAP_DDPEXTEND) {
ddp_input(m, m->m_pkthdr.rcvif, NULL, 1);
} else {
bcopy((caddr_t)elhp, (caddr_t)&elh, SZ_ELAPHDR);
ddp_input(m, m->m_pkthdr.rcvif, &elh, 1);
}
+ mtx_unlock(&Giant);
return;
}
==== //depot/projects/netperf/sys/netatm/atm_subr.c#3 (text+ko) ====
@@ -557,6 +557,7 @@
atm_intr_func_t func;
void *token;
+ mtx_lock(&Giant);
/*
* Get function to call and token value
*/
@@ -580,6 +581,7 @@
* Drain any deferred calls
*/
STACK_DRAIN();
+ mtx_unlock(&Giant);
}
/*
==== //depot/projects/netperf/sys/netinet/if_ether.c#5 (text+ko) ====
@@ -507,6 +507,7 @@
struct arphdr *ar;
if (!arpinit_done) {
+ /* NB: this race should not matter */
arpinit_done = 1;
callout_reset(&arp_callout, hz, arptimer, NULL);
}
==== //depot/projects/netperf/sys/netinet/ip_divert.c#3 (text+ko) ====
@@ -158,6 +158,8 @@
u_int16_t nport;
struct sockaddr_in divsrc;
+ mtx_assert(&Giant, MA_NOTOWNED);
+
/* Sanity check */
KASSERT(port != 0, ("%s: port=0", __func__));
==== //depot/projects/netperf/sys/netinet/ip_dummynet.c#6 (text+ko) ====
@@ -1122,6 +1122,8 @@
is_pipe = (fwa->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE;
#endif
+ mtx_assert(&Giant, MA_NOTOWNED);
+
pipe_nr &= 0xffff ;
DUMMYNET_LOCK();
==== //depot/projects/netperf/sys/netinet/ip_fw2.c#4 (text+ko) ====
@@ -1396,6 +1396,8 @@
ipfw_dyn_rule *q = NULL;
struct ip_fw_chain *chain = &layer3_chain;
+ mtx_assert(&Giant, MA_NOTOWNED);
+
if (m->m_flags & M_SKIP_FIREWALL)
return 0; /* accept */
/*
==== //depot/projects/netperf/sys/netinet/ip_input.c#3 (text+ko) ====
@@ -313,6 +313,8 @@
int s, error;
#endif /* FAST_IPSEC */
+ mtx_assert(&Giant, MA_NOTOWNED);
+
args.eh = NULL;
args.oif = NULL;
args.rule = NULL;
@@ -468,11 +470,14 @@
* in the list may have previously cleared it.
*/
m0 = m;
+ /* XXX locking */
pfh = pfil_hook_get(PFIL_IN, &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
for (; pfh; pfh = TAILQ_NEXT(pfh, pfil_link))
if (pfh->pfil_func) {
+ mtx_lock(&Giant); /* XXX */
rv = pfh->pfil_func(ip, hlen,
m->m_pkthdr.rcvif, 0, &m0);
+ mtx_unlock(&Giant); /* XXX */
if (rv)
return;
m = m0;
@@ -945,6 +950,7 @@
* Switch out to protocol's input routine.
*/
ipstat.ips_delivered++;
+ mtx_lock(&Giant); /* XXX */
if (args.next_hop && ip->ip_p == IPPROTO_TCP) {
/* TCP needs IPFORWARD info if available */
struct m_hdr tag;
@@ -958,6 +964,7 @@
(struct mbuf *)&tag, hlen);
} else
(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
+ mtx_unlock(&Giant); /* XXX */
return;
bad:
m_freem(m);
==== //depot/projects/netperf/sys/netinet/ip_mroute.c#7 (text+ko) ====
@@ -1302,6 +1302,8 @@
int error;
vifi_t vifi;
+ mtx_assert(&Giant, MA_NOTOWNED);
+
if (mrtdebug & DEBUG_FORWARD)
log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n",
(u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr),
==== //depot/projects/netperf/sys/netinet6/ip6_input.c#2 (text+ko) ====
@@ -247,6 +247,9 @@
int rv;
#endif /* PFIL_HOOKS */
+ mtx_assert(&Giant, MA_NOTOWNED);
+ mtx_lock(&Giant);
+
#ifdef IPSEC
/*
* should the inner packet be considered authentic?
@@ -326,6 +329,7 @@
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == 0) {
ip6stat.ip6s_toosmall++;
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ mtx_unlock(&Giant);
return;
}
}
@@ -352,8 +356,10 @@
if (pfh->pfil_func) {
rv = pfh->pfil_func(ip6, sizeof(*ip6),
m->m_pkthdr.rcvif, 0, &m0);
- if (rv)
+ if (rv) {
+ mtx_unlock(&Giant);
return;
+ }
m = m0;
if (m == NULL)
return;
@@ -374,8 +380,10 @@
m_freem(m);
m = NULL;
}
- if (!m)
+ if (!m) {
+ mtx_unlock(&Giant);
return;
+ }
}
/*
@@ -469,6 +477,7 @@
icmp6_error(m, ICMP6_DST_UNREACH,
ICMP6_DST_UNREACH_ADDR, 0);
/* m is already freed */
+ mtx_unlock(&Giant);
return;
}
@@ -671,6 +680,7 @@
#if 0 /*touches NULL pointer*/
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
#endif
+ mtx_unlock(&Giant);
return; /* m have already been freed */
}
@@ -694,6 +704,7 @@
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
(caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
+ mtx_unlock(&Giant);
return;
}
#ifndef PULLDOWN_TEST
@@ -704,6 +715,7 @@
sizeof(struct ip6_hbh));
if (hbh == NULL) {
ip6stat.ip6s_tooshort++;
+ mtx_unlock(&Giant);
return;
}
#endif
@@ -752,14 +764,17 @@
if (ip6_mrouter && ip6_mforward(ip6, m->m_pkthdr.rcvif, m)) {
ip6stat.ip6s_cantforward++;
m_freem(m);
+ mtx_unlock(&Giant);
return;
}
if (!ours) {
m_freem(m);
+ mtx_unlock(&Giant);
return;
}
} else if (!ours) {
ip6_forward(m, 0);
+ mtx_unlock(&Giant);
return;
}
@@ -835,9 +850,11 @@
nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &off, nxt);
}
+ mtx_unlock(&Giant);
return;
bad:
m_freem(m);
+ mtx_unlock(&Giant);
}
/*
==== //depot/projects/netperf/sys/netipx/ipx_input.c#2 (text+ko) ====
@@ -133,6 +133,7 @@
struct ipx_ifaddr *ia;
int len;
+ mtx_lock(&Giant);
/*
* If no IPX addresses have been set yet but the interfaces
* are receiving, can't do anything with incoming packets yet.
@@ -191,6 +192,7 @@
if (ipx->ipx_pt == IPXPROTO_NETBIOS) {
if (ipxnetbios) {
ipx_output_type20(m);
+ mtx_unlock(&Giant);
return;
} else
goto bad;
@@ -226,6 +228,7 @@
*/
if (ipx->ipx_tc < IPX_MAXHOPS) {
ipx_forward(m);
+ mtx_unlock(&Giant);
return;
}
}
@@ -241,6 +244,7 @@
if (ia == NULL) {
ipx_forward(m);
+ mtx_unlock(&Giant);
return;
}
}
@@ -258,16 +262,19 @@
switch (ipx->ipx_pt) {
case IPXPROTO_SPX:
spx_input(m, ipxp);
+ mtx_unlock(&Giant);
return;
}
ipx_input(m, ipxp);
} else
goto bad;
+ mtx_unlock(&Giant);
return;
bad:
m_freem(m);
+ mtx_unlock(&Giant);
}
void
==== //depot/projects/netperf/sys/netnatm/natm.c#3 (text+ko) ====
@@ -685,6 +685,8 @@
struct socket *so;
struct natmpcb *npcb;
+ mtx_lock(&Giant);
+
#ifdef DIAGNOSTIC
M_ASSERTPKTHDR(m);
#endif
@@ -700,12 +702,12 @@
m_freem(m);
if (npcb->npcb_inq == 0)
FREE(npcb, M_PCB); /* done! */
- return;
+ goto done;
}
if (npcb->npcb_flags & NPCB_FREE) {
m_freem(m); /* drop */
- return;
+ goto done;
}
#ifdef NEED_TO_RESTORE_IFP
@@ -730,6 +732,8 @@
#endif
m_freem(m);
}
+done:
+ mtx_unlock(&Giant);
}
/*
More information about the p4-projects
mailing list