svn commit: r192744 - head/sys/netipx
Robert Watson
rwatson at FreeBSD.org
Mon May 25 09:09:44 UTC 2009
Author: rwatson
Date: Mon May 25 09:09:43 2009
New Revision: 192744
URL: http://svn.freebsd.org/changeset/base/192744
Log:
Copy spx_usrreq.c to spx_reass.c in order to apply similar file layout
changes to IPX/SPX that were applied to TCP/IP in the creation of
tcp_reass.c.
MFC after: 1 month
Added:
head/sys/netipx/spx_reass.c (props changed)
- copied unchanged from r192743, head/sys/netipx/spx_usrreq.c
Copied: head/sys/netipx/spx_reass.c (from r192743, head/sys/netipx/spx_usrreq.c)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ head/sys/netipx/spx_reass.c Mon May 25 09:09:43 2009 (r192744, copy of r192743, head/sys/netipx/spx_usrreq.c)
@@ -0,0 +1,2132 @@
+/*-
+ * Copyright (c) 1984, 1985, 1986, 1987, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2004-2006 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (c) 1995, Mike Mitchell
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)spx_usrreq.h
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/signalvar.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sx.h>
+#include <sys/systm.h>
+
+#include <net/route.h>
+#include <netinet/tcp_fsm.h>
+
+#include <netipx/ipx.h>
+#include <netipx/ipx_pcb.h>
+#include <netipx/ipx_var.h>
+#include <netipx/spx.h>
+#include <netipx/spx_debug.h>
+#include <netipx/spx_timer.h>
+#include <netipx/spx_var.h>
+
+/*
+ * SPX protocol implementation.
+ */
+static struct mtx spx_mtx; /* Protects only spx_iss. */
+static u_short spx_iss;
+static u_short spx_newchecks[50];
+static int spx_hardnosed;
+static int spx_use_delack = 0;
+static int traceallspxs = 0;
+static struct spx_istat spx_istat;
+static int spxrexmtthresh = 3;
+
+#define SPX_LOCK_INIT() mtx_init(&spx_mtx, "spx_mtx", NULL, MTX_DEF)
+#define SPX_LOCK() mtx_lock(&spx_mtx)
+#define SPX_UNLOCK() mtx_unlock(&spx_mtx)
+
+/* Following was struct spxstat spxstat; */
+#ifndef spxstat
+#define spxstat spx_istat.newstats
+#endif
+
+static const int spx_backoff[SPX_MAXRXTSHIFT+1] =
+ { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
+
+static void spx_close(struct spxpcb *cb);
+static void spx_disconnect(struct spxpcb *cb);
+static void spx_drop(struct spxpcb *cb, int errno);
+static int spx_output(struct spxpcb *cb, struct mbuf *m0);
+static int spx_reass(struct spxpcb *cb, struct spx *si);
+static void spx_setpersist(struct spxpcb *cb);
+static void spx_template(struct spxpcb *cb);
+static void spx_timers(struct spxpcb *cb, int timer);
+static void spx_usrclosed(struct spxpcb *cb);
+
+static void spx_usr_abort(struct socket *so);
+static int spx_accept(struct socket *so, struct sockaddr **nam);
+static int spx_attach(struct socket *so, int proto, struct thread *td);
+static int spx_bind(struct socket *so, struct sockaddr *nam, struct thread *td);
+static void spx_usr_close(struct socket *so);
+static int spx_connect(struct socket *so, struct sockaddr *nam,
+ struct thread *td);
+static void spx_detach(struct socket *so);
+static void spx_pcbdetach(struct ipxpcb *ipxp);
+static int spx_usr_disconnect(struct socket *so);
+static int spx_listen(struct socket *so, int backlog, struct thread *td);
+static int spx_rcvd(struct socket *so, int flags);
+static int spx_rcvoob(struct socket *so, struct mbuf *m, int flags);
+static int spx_send(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *addr, struct mbuf *control,
+ struct thread *td);
+static int spx_shutdown(struct socket *so);
+static int spx_sp_attach(struct socket *so, int proto, struct thread *td);
+
+struct pr_usrreqs spx_usrreqs = {
+ .pru_abort = spx_usr_abort,
+ .pru_accept = spx_accept,
+ .pru_attach = spx_attach,
+ .pru_bind = spx_bind,
+ .pru_connect = spx_connect,
+ .pru_control = ipx_control,
+ .pru_detach = spx_detach,
+ .pru_disconnect = spx_usr_disconnect,
+ .pru_listen = spx_listen,
+ .pru_peeraddr = ipx_peeraddr,
+ .pru_rcvd = spx_rcvd,
+ .pru_rcvoob = spx_rcvoob,
+ .pru_send = spx_send,
+ .pru_shutdown = spx_shutdown,
+ .pru_sockaddr = ipx_sockaddr,
+ .pru_close = spx_usr_close,
+};
+
+struct pr_usrreqs spx_usrreq_sps = {
+ .pru_abort = spx_usr_abort,
+ .pru_accept = spx_accept,
+ .pru_attach = spx_sp_attach,
+ .pru_bind = spx_bind,
+ .pru_connect = spx_connect,
+ .pru_control = ipx_control,
+ .pru_detach = spx_detach,
+ .pru_disconnect = spx_usr_disconnect,
+ .pru_listen = spx_listen,
+ .pru_peeraddr = ipx_peeraddr,
+ .pru_rcvd = spx_rcvd,
+ .pru_rcvoob = spx_rcvoob,
+ .pru_send = spx_send,
+ .pru_shutdown = spx_shutdown,
+ .pru_sockaddr = ipx_sockaddr,
+ .pru_close = spx_usr_close,
+};
+
+static __inline void
+spx_insque(struct spx_q *element, struct spx_q *head)
+{
+
+ element->si_next = head->si_next;
+ element->si_prev = head;
+ head->si_next = element;
+ element->si_next->si_prev = element;
+}
+
+static __inline void
+spx_remque(struct spx_q *element)
+{
+
+ element->si_next->si_prev = element->si_prev;
+ element->si_prev->si_next = element->si_next;
+ element->si_prev = NULL;
+}
+
+void
+spx_init(void)
+{
+
+ SPX_LOCK_INIT();
+ spx_iss = 1; /* WRONG !! should fish it out of TODR */
+}
+
+void
+spx_input(struct mbuf *m, struct ipxpcb *ipxp)
+{
+ struct spxpcb *cb;
+ struct spx *si = mtod(m, struct spx *);
+ struct socket *so;
+ struct spx spx_savesi;
+ int dropsocket = 0;
+ short ostate = 0;
+
+ spxstat.spxs_rcvtotal++;
+ KASSERT(ipxp != NULL, ("spx_input: ipxpcb == NULL"));
+
+ /*
+ * spx_input() assumes that the caller will hold both the pcb list
+ * lock and also the ipxp lock. spx_input() will release both before
+ * returning, and may in fact trade in the ipxp lock for another pcb
+ * lock following sonewconn().
+ */
+ IPX_LIST_LOCK_ASSERT();
+ IPX_LOCK_ASSERT(ipxp);
+
+ cb = ipxtospxpcb(ipxp);
+ KASSERT(cb != NULL, ("spx_input: cb == NULL"));
+
+ if (ipxp->ipxp_flags & IPXP_DROPPED)
+ goto drop;
+
+ if (m->m_len < sizeof(*si)) {
+ if ((m = m_pullup(m, sizeof(*si))) == NULL) {
+ IPX_UNLOCK(ipxp);
+ IPX_LIST_UNLOCK();
+ spxstat.spxs_rcvshort++;
+ return;
+ }
+ si = mtod(m, struct spx *);
+ }
+ si->si_seq = ntohs(si->si_seq);
+ si->si_ack = ntohs(si->si_ack);
+ si->si_alo = ntohs(si->si_alo);
+
+ so = ipxp->ipxp_socket;
+ KASSERT(so != NULL, ("spx_input: so == NULL"));
+
+ if (so->so_options & SO_DEBUG || traceallspxs) {
+ ostate = cb->s_state;
+ spx_savesi = *si;
+ }
+ if (so->so_options & SO_ACCEPTCONN) {
+ struct spxpcb *ocb = cb;
+
+ so = sonewconn(so, 0);
+ if (so == NULL)
+ goto drop;
+
+ /*
+ * This is ugly, but ....
+ *
+ * Mark socket as temporary until we're committed to keeping
+ * it. The code at ``drop'' and ``dropwithreset'' check the
+ * flag dropsocket to see if the temporary socket created
+ * here should be discarded. We mark the socket as
+ * discardable until we're committed to it below in
+ * TCPS_LISTEN.
+ *
+ * XXXRW: In the new world order of real kernel parallelism,
+ * temporarily allocating the socket when we're "not sure"
+ * seems like a bad idea, as we might race to remove it if
+ * the listen socket is closed...?
+ *
+ * We drop the lock of the listen socket ipxp, and acquire
+ * the lock of the new socket ippx.
+ */
+ dropsocket++;
+ IPX_UNLOCK(ipxp);
+ ipxp = (struct ipxpcb *)so->so_pcb;
+ IPX_LOCK(ipxp);
+ ipxp->ipxp_laddr = si->si_dna;
+ cb = ipxtospxpcb(ipxp);
+ cb->s_mtu = ocb->s_mtu; /* preserve sockopts */
+ cb->s_flags = ocb->s_flags; /* preserve sockopts */
+ cb->s_flags2 = ocb->s_flags2; /* preserve sockopts */
+ cb->s_state = TCPS_LISTEN;
+ }
+ IPX_LOCK_ASSERT(ipxp);
+
+ /*
+ * Packet received on connection. Reset idle time and keep-alive
+ * timer.
+ */
+ cb->s_idle = 0;
+ cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
+
+ switch (cb->s_state) {
+ case TCPS_LISTEN:{
+ struct sockaddr_ipx *sipx, ssipx;
+ struct ipx_addr laddr;
+
+ /*
+ * If somebody here was carying on a conversation and went
+ * away, and his pen pal thinks he can still talk, we get the
+ * misdirected packet.
+ */
+ if (spx_hardnosed && (si->si_did != 0 || si->si_seq != 0)) {
+ spx_istat.gonawy++;
+ goto dropwithreset;
+ }
+ sipx = &ssipx;
+ bzero(sipx, sizeof *sipx);
+ sipx->sipx_len = sizeof(*sipx);
+ sipx->sipx_family = AF_IPX;
+ sipx->sipx_addr = si->si_sna;
+ laddr = ipxp->ipxp_laddr;
+ if (ipx_nullhost(laddr))
+ ipxp->ipxp_laddr = si->si_dna;
+ if (ipx_pcbconnect(ipxp, (struct sockaddr *)sipx, &thread0)) {
+ ipxp->ipxp_laddr = laddr;
+ spx_istat.noconn++;
+ goto drop;
+ }
+ spx_template(cb);
+ dropsocket = 0; /* committed to socket */
+ cb->s_did = si->si_sid;
+ cb->s_rack = si->si_ack;
+ cb->s_ralo = si->si_alo;
+#define THREEWAYSHAKE
+#ifdef THREEWAYSHAKE
+ cb->s_state = TCPS_SYN_RECEIVED;
+ cb->s_force = 1 + SPXT_KEEP;
+ spxstat.spxs_accepts++;
+ cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
+ }
+ break;
+
+ case TCPS_SYN_RECEIVED: {
+ /*
+ * This state means that we have heard a response to our
+ * acceptance of their connection. It is probably logically
+ * unnecessary in this implementation.
+ */
+ if (si->si_did != cb->s_sid) {
+ spx_istat.wrncon++;
+ goto drop;
+ }
+#endif
+ ipxp->ipxp_fport = si->si_sport;
+ cb->s_timer[SPXT_REXMT] = 0;
+ cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
+ soisconnected(so);
+ cb->s_state = TCPS_ESTABLISHED;
+ spxstat.spxs_accepts++;
+ }
+ break;
+
+ case TCPS_SYN_SENT:
+ /*
+ * This state means that we have gotten a response to our
+ * attempt to establish a connection. We fill in the data
+ * from the other side, telling us which port to respond to,
+ * instead of the well-known one we might have sent to in the
+ * first place. We also require that this is a response to
+ * our connection id.
+ */
+ if (si->si_did != cb->s_sid) {
+ spx_istat.notme++;
+ goto drop;
+ }
+ spxstat.spxs_connects++;
+ cb->s_did = si->si_sid;
+ cb->s_rack = si->si_ack;
+ cb->s_ralo = si->si_alo;
+ cb->s_dport = ipxp->ipxp_fport = si->si_sport;
+ cb->s_timer[SPXT_REXMT] = 0;
+ cb->s_flags |= SF_ACKNOW;
+ soisconnected(so);
+ cb->s_state = TCPS_ESTABLISHED;
+
+ /*
+ * Use roundtrip time of connection request for initial rtt.
+ */
+ if (cb->s_rtt) {
+ cb->s_srtt = cb->s_rtt << 3;
+ cb->s_rttvar = cb->s_rtt << 1;
+ SPXT_RANGESET(cb->s_rxtcur,
+ ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
+ SPXTV_MIN, SPXTV_REXMTMAX);
+ cb->s_rtt = 0;
+ }
+ }
+
+ if (so->so_options & SO_DEBUG || traceallspxs)
+ spx_trace(SA_INPUT, (u_char)ostate, cb, &spx_savesi, 0);
+
+ m->m_len -= sizeof(struct ipx);
+ m->m_pkthdr.len -= sizeof(struct ipx);
+ m->m_data += sizeof(struct ipx);
+
+ if (spx_reass(cb, si))
+ m_freem(m);
+ if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT)))
+ spx_output(cb, NULL);
+ cb->s_flags &= ~(SF_WIN|SF_RXT);
+ IPX_UNLOCK(ipxp);
+ IPX_LIST_UNLOCK();
+ return;
+
+dropwithreset:
+ IPX_LOCK_ASSERT(ipxp);
+ if (cb == NULL || (cb->s_ipxpcb->ipxp_socket->so_options & SO_DEBUG ||
+ traceallspxs))
+ spx_trace(SA_DROP, (u_char)ostate, cb, &spx_savesi, 0);
+ IPX_UNLOCK(ipxp);
+ if (dropsocket) {
+ struct socket *head;
+ ACCEPT_LOCK();
+ KASSERT((so->so_qstate & SQ_INCOMP) != 0,
+ ("spx_input: nascent socket not SQ_INCOMP on soabort()"));
+ head = so->so_head;
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ so->so_qstate &= ~SQ_INCOMP;
+ so->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(so);
+ }
+ IPX_LIST_UNLOCK();
+ m_freem(m);
+ return;
+
+drop:
+ IPX_LOCK_ASSERT(ipxp);
+ if (cb->s_ipxpcb->ipxp_socket->so_options & SO_DEBUG || traceallspxs)
+ spx_trace(SA_DROP, (u_char)ostate, cb, &spx_savesi, 0);
+ IPX_UNLOCK(ipxp);
+ IPX_LIST_UNLOCK();
+ m_freem(m);
+}
+
+/*
+ * This is structurally similar to the tcp reassembly routine but its
+ * function is somewhat different: it merely queues packets up, and
+ * suppresses duplicates.
+ */
+static int
+spx_reass(struct spxpcb *cb, struct spx *si)
+{
+ struct spx_q *q;
+ struct mbuf *m;
+ struct socket *so = cb->s_ipxpcb->ipxp_socket;
+ char packetp = cb->s_flags & SF_HI;
+ int incr;
+ char wakeup = 0;
+
+ IPX_LOCK_ASSERT(cb->s_ipxpcb);
+
+ if (si == SI(0))
+ goto present;
+
+ /*
+ * Update our news from them.
+ */
+ if (si->si_cc & SPX_SA)
+ cb->s_flags |= (spx_use_delack ? SF_DELACK : SF_ACKNOW);
+ if (SSEQ_GT(si->si_alo, cb->s_ralo))
+ cb->s_flags |= SF_WIN;
+ if (SSEQ_LEQ(si->si_ack, cb->s_rack)) {
+ if ((si->si_cc & SPX_SP) && cb->s_rack != (cb->s_smax + 1)) {
+ spxstat.spxs_rcvdupack++;
+
+ /*
+ * If this is a completely duplicate ack and other
+ * conditions hold, we assume a packet has been
+ * dropped and retransmit it exactly as in
+ * tcp_input().
+ */
+ if (si->si_ack != cb->s_rack ||
+ si->si_alo != cb->s_ralo)
+ cb->s_dupacks = 0;
+ else if (++cb->s_dupacks == spxrexmtthresh) {
+ u_short onxt = cb->s_snxt;
+ int cwnd = cb->s_cwnd;
+
+ cb->s_snxt = si->si_ack;
+ cb->s_cwnd = CUNIT;
+ cb->s_force = 1 + SPXT_REXMT;
+ spx_output(cb, NULL);
+ cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
+ cb->s_rtt = 0;
+ if (cwnd >= 4 * CUNIT)
+ cb->s_cwnd = cwnd / 2;
+ if (SSEQ_GT(onxt, cb->s_snxt))
+ cb->s_snxt = onxt;
+ return (1);
+ }
+ } else
+ cb->s_dupacks = 0;
+ goto update_window;
+ }
+ cb->s_dupacks = 0;
+
+ /*
+ * If our correspondent acknowledges data we haven't sent TCP would
+ * drop the packet after acking. We'll be a little more permissive.
+ */
+ if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) {
+ spxstat.spxs_rcvacktoomuch++;
+ si->si_ack = cb->s_smax + 1;
+ }
+ spxstat.spxs_rcvackpack++;
+
+ /*
+ * If transmit timer is running and timed sequence number was acked,
+ * update smoothed round trip time. See discussion of algorithm in
+ * tcp_input.c
+ */
+ if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) {
+ spxstat.spxs_rttupdated++;
+ if (cb->s_srtt != 0) {
+ short delta;
+ delta = cb->s_rtt - (cb->s_srtt >> 3);
+ if ((cb->s_srtt += delta) <= 0)
+ cb->s_srtt = 1;
+ if (delta < 0)
+ delta = -delta;
+ delta -= (cb->s_rttvar >> 2);
+ if ((cb->s_rttvar += delta) <= 0)
+ cb->s_rttvar = 1;
+ } else {
+ /*
+ * No rtt measurement yet.
+ */
+ cb->s_srtt = cb->s_rtt << 3;
+ cb->s_rttvar = cb->s_rtt << 1;
+ }
+ cb->s_rtt = 0;
+ cb->s_rxtshift = 0;
+ SPXT_RANGESET(cb->s_rxtcur,
+ ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
+ SPXTV_MIN, SPXTV_REXMTMAX);
+ }
+
+ /*
+ * If all outstanding data is acked, stop retransmit timer and
+ * remember to restart (more output or persist). If there is more
+ * data to be acked, restart retransmit timer, using current
+ * (possibly backed-off) value;
+ */
+ if (si->si_ack == cb->s_smax + 1) {
+ cb->s_timer[SPXT_REXMT] = 0;
+ cb->s_flags |= SF_RXT;
+ } else if (cb->s_timer[SPXT_PERSIST] == 0)
+ cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
+
+ /*
+ * When new data is acked, open the congestion window. If the window
+ * gives us less than ssthresh packets in flight, open exponentially
+ * (maxseg at a time). Otherwise open linearly (maxseg^2 / cwnd at a
+ * time).
+ */
+ incr = CUNIT;
+ if (cb->s_cwnd > cb->s_ssthresh)
+ incr = max(incr * incr / cb->s_cwnd, 1);
+ cb->s_cwnd = min(cb->s_cwnd + incr, cb->s_cwmx);
+
+ /*
+ * Trim Acked data from output queue.
+ */
+ SOCKBUF_LOCK(&so->so_snd);
+ while ((m = so->so_snd.sb_mb) != NULL) {
+ if (SSEQ_LT((mtod(m, struct spx *))->si_seq, si->si_ack))
+ sbdroprecord_locked(&so->so_snd);
+ else
+ break;
+ }
+ sowwakeup_locked(so);
+ cb->s_rack = si->si_ack;
+update_window:
+ if (SSEQ_LT(cb->s_snxt, cb->s_rack))
+ cb->s_snxt = cb->s_rack;
+ if (SSEQ_LT(cb->s_swl1, si->si_seq) || ((cb->s_swl1 == si->si_seq &&
+ (SSEQ_LT(cb->s_swl2, si->si_ack))) ||
+ (cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo)))) {
+ /* keep track of pure window updates */
+ if ((si->si_cc & SPX_SP) && cb->s_swl2 == si->si_ack
+ && SSEQ_LT(cb->s_ralo, si->si_alo)) {
+ spxstat.spxs_rcvwinupd++;
+ spxstat.spxs_rcvdupack--;
+ }
+ cb->s_ralo = si->si_alo;
+ cb->s_swl1 = si->si_seq;
+ cb->s_swl2 = si->si_ack;
+ cb->s_swnd = (1 + si->si_alo - si->si_ack);
+ if (cb->s_swnd > cb->s_smxw)
+ cb->s_smxw = cb->s_swnd;
+ cb->s_flags |= SF_WIN;
+ }
+
+ /*
+ * If this packet number is higher than that which we have allocated
+ * refuse it, unless urgent.
+ */
+ if (SSEQ_GT(si->si_seq, cb->s_alo)) {
+ if (si->si_cc & SPX_SP) {
+ spxstat.spxs_rcvwinprobe++;
+ return (1);
+ } else
+ spxstat.spxs_rcvpackafterwin++;
+ if (si->si_cc & SPX_OB) {
+ if (SSEQ_GT(si->si_seq, cb->s_alo + 60))
+ return (1); /* else queue this packet; */
+ } else {
+#ifdef BROKEN
+ /*
+ * XXXRW: This is broken on at least one count:
+ * spx_close() will free the ipxp and related parts,
+ * which are then touched by spx_input() after the
+ * return from spx_reass().
+ */
+ /*struct socket *so = cb->s_ipxpcb->ipxp_socket;
+ if (so->so_state && SS_NOFDREF) {
+ spx_close(cb);
+ } else
+ would crash system*/
+#endif
+ spx_istat.notyet++;
+ return (1);
+ }
+ }
+
+ /*
+ * If this is a system packet, we don't need to queue it up, and
+ * won't update acknowledge #.
+ */
+ if (si->si_cc & SPX_SP)
+ return (1);
+
+ /*
+ * We have already seen this packet, so drop.
+ */
+ if (SSEQ_LT(si->si_seq, cb->s_ack)) {
+ spx_istat.bdreas++;
+ spxstat.spxs_rcvduppack++;
+ if (si->si_seq == cb->s_ack - 1)
+ spx_istat.lstdup++;
+ return (1);
+ }
+
+ /*
+ * Loop through all packets queued up to insert in appropriate
+ * sequence.
+ */
+ for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
+ if (si->si_seq == SI(q)->si_seq) {
+ spxstat.spxs_rcvduppack++;
+ return (1);
+ }
+ if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) {
+ spxstat.spxs_rcvoopack++;
+ break;
+ }
+ }
+ spx_insque((struct spx_q *)si, q->si_prev);
+
+ /*
+ * If this packet is urgent, inform process
+ */
+ if (si->si_cc & SPX_OB) {
+ cb->s_iobc = ((char *)si)[1 + sizeof(*si)];
+ sohasoutofband(so);
+ cb->s_oobflags |= SF_IOOB;
+ }
+present:
+#define SPINC sizeof(struct spxhdr)
+ SOCKBUF_LOCK(&so->so_rcv);
+
+ /*
+ * Loop through all packets queued up to update acknowledge number,
+ * and present all acknowledged data to user; if in packet interface
+ * mode, show packet headers.
+ */
+ for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
+ if (SI(q)->si_seq == cb->s_ack) {
+ cb->s_ack++;
+ m = dtom(q);
+ if (SI(q)->si_cc & SPX_OB) {
+ cb->s_oobflags &= ~SF_IOOB;
+ if (so->so_rcv.sb_cc)
+ so->so_oobmark = so->so_rcv.sb_cc;
+ else
+ so->so_rcv.sb_state |= SBS_RCVATMARK;
+ }
+ q = q->si_prev;
+ spx_remque(q->si_next);
+ wakeup = 1;
+ spxstat.spxs_rcvpack++;
+#ifdef SF_NEWCALL
+ if (cb->s_flags2 & SF_NEWCALL) {
+ struct spxhdr *sp = mtod(m, struct spxhdr *);
+ u_char dt = sp->spx_dt;
+ spx_newchecks[4]++;
+ if (dt != cb->s_rhdr.spx_dt) {
+ struct mbuf *mm =
+ m_getclr(M_DONTWAIT, MT_CONTROL);
+ spx_newchecks[0]++;
+ if (mm != NULL) {
+ u_short *s =
+ mtod(mm, u_short *);
+ cb->s_rhdr.spx_dt = dt;
+ mm->m_len = 5; /*XXX*/
+ s[0] = 5;
+ s[1] = 1;
+ *(u_char *)(&s[2]) = dt;
+ sbappend_locked(&so->so_rcv, mm);
+ }
+ }
+ if (sp->spx_cc & SPX_OB) {
+ MCHTYPE(m, MT_OOBDATA);
+ spx_newchecks[1]++;
+ so->so_oobmark = 0;
+ so->so_rcv.sb_state &= ~SBS_RCVATMARK;
+ }
+ if (packetp == 0) {
+ m->m_data += SPINC;
+ m->m_len -= SPINC;
+ m->m_pkthdr.len -= SPINC;
+ }
+ if ((sp->spx_cc & SPX_EM) || packetp) {
+ sbappendrecord_locked(&so->so_rcv, m);
+ spx_newchecks[9]++;
+ } else
+ sbappend_locked(&so->so_rcv, m);
+ } else
+#endif
+ if (packetp)
+ sbappendrecord_locked(&so->so_rcv, m);
+ else {
+ cb->s_rhdr = *mtod(m, struct spxhdr *);
+ m->m_data += SPINC;
+ m->m_len -= SPINC;
+ m->m_pkthdr.len -= SPINC;
+ sbappend_locked(&so->so_rcv, m);
+ }
+ } else
+ break;
+ }
+ if (wakeup)
+ sorwakeup_locked(so);
+ else
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ return (0);
+}
+
+void
+spx_ctlinput(int cmd, struct sockaddr *arg_as_sa, void *dummy)
+{
+
+ /* Currently, nothing. */
+}
+
+static int
+spx_output(struct spxpcb *cb, struct mbuf *m0)
+{
+ struct socket *so = cb->s_ipxpcb->ipxp_socket;
+ struct mbuf *m;
+ struct spx *si = NULL;
+ struct sockbuf *sb = &so->so_snd;
+ int len = 0, win, rcv_win;
+ short span, off, recordp = 0;
+ u_short alo;
+ int error = 0, sendalot;
+#ifdef notdef
+ int idle;
+#endif
+ struct mbuf *mprev;
+
+ IPX_LOCK_ASSERT(cb->s_ipxpcb);
+
+ if (m0 != NULL) {
+ int mtu = cb->s_mtu;
+ int datalen;
+
+ /*
+ * Make sure that packet isn't too big.
+ */
+ for (m = m0; m != NULL; m = m->m_next) {
+ mprev = m;
+ len += m->m_len;
+ if (m->m_flags & M_EOR)
+ recordp = 1;
+ }
+ datalen = (cb->s_flags & SF_HO) ?
+ len - sizeof(struct spxhdr) : len;
+ if (datalen > mtu) {
+ if (cb->s_flags & SF_PI) {
+ m_freem(m0);
+ return (EMSGSIZE);
+ } else {
+ int oldEM = cb->s_cc & SPX_EM;
+
+ cb->s_cc &= ~SPX_EM;
+ while (len > mtu) {
+ m = m_copym(m0, 0, mtu, M_DONTWAIT);
+ if (m == NULL) {
+ cb->s_cc |= oldEM;
+ m_freem(m0);
+ return (ENOBUFS);
+ }
+ if (cb->s_flags & SF_NEWCALL) {
+ struct mbuf *mm = m;
+ spx_newchecks[7]++;
+ while (mm != NULL) {
+ mm->m_flags &= ~M_EOR;
+ mm = mm->m_next;
+ }
+ }
+ error = spx_output(cb, m);
+ if (error) {
+ cb->s_cc |= oldEM;
+ m_freem(m0);
+ return (error);
+ }
+ m_adj(m0, mtu);
+ len -= mtu;
+ }
+ cb->s_cc |= oldEM;
+ }
+ }
+
+ /*
+ * Force length even, by adding a "garbage byte" if
+ * necessary.
+ */
+ if (len & 1) {
+ m = mprev;
+ if (M_TRAILINGSPACE(m) >= 1)
+ m->m_len++;
+ else {
+ struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA);
+
+ if (m1 == NULL) {
+ m_freem(m0);
+ return (ENOBUFS);
+ }
+ m1->m_len = 1;
+ *(mtod(m1, u_char *)) = 0;
+ m->m_next = m1;
+ }
+ }
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(m0);
+ return (ENOBUFS);
+ }
+
+ /*
+ * Fill in mbuf with extended SP header and addresses and
+ * length put into network format.
+ */
+ MH_ALIGN(m, sizeof(struct spx));
+ m->m_len = sizeof(struct spx);
+ m->m_next = m0;
+ si = mtod(m, struct spx *);
+ si->si_i = *cb->s_ipx;
+ si->si_s = cb->s_shdr;
+ if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) {
+ struct spxhdr *sh;
+ if (m0->m_len < sizeof(*sh)) {
+ if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) {
+ m_free(m);
+ m_freem(m0);
+ return (EINVAL);
+ }
+ m->m_next = m0;
+ }
+ sh = mtod(m0, struct spxhdr *);
+ si->si_dt = sh->spx_dt;
+ si->si_cc |= sh->spx_cc & SPX_EM;
+ m0->m_len -= sizeof(*sh);
+ m0->m_data += sizeof(*sh);
+ len -= sizeof(*sh);
+ }
+ len += sizeof(*si);
+ if ((cb->s_flags2 & SF_NEWCALL) && recordp) {
+ si->si_cc |= SPX_EM;
+ spx_newchecks[8]++;
+ }
+ if (cb->s_oobflags & SF_SOOB) {
+ /*
+ * Per jqj at cornell: Make sure OB packets convey
+ * exactly 1 byte. If the packet is 1 byte or
+ * larger, we have already guaranted there to be at
+ * least one garbage byte for the checksum, and extra
+ * bytes shouldn't hurt!
+ */
+ if (len > sizeof(*si)) {
+ si->si_cc |= SPX_OB;
+ len = (1 + sizeof(*si));
+ }
+ }
+ si->si_len = htons((u_short)len);
+ m->m_pkthdr.len = ((len - 1) | 1) + 1;
+
+ /*
+ * Queue stuff up for output.
+ */
+ sbappendrecord(sb, m);
+ cb->s_seq++;
+ }
+#ifdef notdef
+ idle = (cb->s_smax == (cb->s_rack - 1));
+#endif
+again:
+ sendalot = 0;
+ off = cb->s_snxt - cb->s_rack;
+ win = min(cb->s_swnd, (cb->s_cwnd / CUNIT));
+
+ /*
+ * If in persist timeout with window of 0, send a probe. Otherwise,
+ * if window is small but non-zero and timer expired, send what we
+ * can and go into transmit state.
+ */
+ if (cb->s_force == 1 + SPXT_PERSIST) {
+ if (win != 0) {
+ cb->s_timer[SPXT_PERSIST] = 0;
+ cb->s_rxtshift = 0;
+ }
+ }
+ span = cb->s_seq - cb->s_rack;
+ len = min(span, win) - off;
+
+ if (len < 0) {
+ /*
+ * Window shrank after we went into it. If window shrank to
+ * 0, cancel pending restransmission and pull s_snxt back to
+ * (closed) window. We will enter persist state below. If
+ * the widndow didn't close completely, just wait for an ACK.
+ */
+ len = 0;
+ if (win == 0) {
+ cb->s_timer[SPXT_REXMT] = 0;
+ cb->s_snxt = cb->s_rack;
+ }
+ }
+ if (len > 1)
+ sendalot = 1;
+ rcv_win = sbspace(&so->so_rcv);
+
+ /*
+ * Send if we owe peer an ACK.
+ */
+ if (cb->s_oobflags & SF_SOOB) {
+ /*
+ * Must transmit this out of band packet.
+ */
+ cb->s_oobflags &= ~ SF_SOOB;
+ sendalot = 1;
+ spxstat.spxs_sndurg++;
+ goto found;
+ }
+ if (cb->s_flags & SF_ACKNOW)
+ goto send;
+ if (cb->s_state < TCPS_ESTABLISHED)
+ goto send;
+
+ /*
+ * Silly window can't happen in spx. Code from TCP deleted.
+ */
+ if (len)
+ goto send;
+
+ /*
+ * Compare available window to amount of window known to peer (as
+ * advertised window less next expected input.) If the difference is
+ * at least two packets or at least 35% of the mximum possible
+ * window, then want to send a window update to peer.
+ */
+ if (rcv_win > 0) {
+ u_short delta = 1 + cb->s_alo - cb->s_ack;
+ int adv = rcv_win - (delta * cb->s_mtu);
+
+ if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) ||
+ (100 * adv / so->so_rcv.sb_hiwat >= 35)) {
+ spxstat.spxs_sndwinup++;
+ cb->s_flags |= SF_ACKNOW;
+ goto send;
+ }
+
+ }
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list