git: f983298883c1 - stable/13 - socket: Rename sb(un)lock() and interlock with listen(2)
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 07 Oct 2021 14:05:00 UTC
The branch stable/13 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=f983298883c1d6c6b1970bb9e19a172932aed3d3 commit f983298883c1d6c6b1970bb9e19a172932aed3d3 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2021-09-07 18:49:31 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2021-10-07 13:56:47 +0000 socket: Rename sb(un)lock() and interlock with listen(2) In preparation for moving sockbuf locks into the containing socket, provide alternative macros for the sockbuf I/O locks: SOCK_IO_SEND_(UN)LOCK() and SOCK_IO_RECV_(UN)LOCK(). These operate on a socket rather than a socket buffer. Note that these locks are used only to prevent concurrent readers and writters from interleaving I/O. When locking for I/O, return an error if the socket is a listening socket. Currently the check is racy since the sockbuf sx locks are destroyed during the transition to a listening socket, but that will no longer be true after some follow-up changes. Modify a few places to check for errors from sblock()/SOCK_IO_(SEND|RECV)_LOCK() where they were not before. In particular, add checks to sendfile() and sorflush(). Reviewed by: tuexen, gallatin Sponsored by: The FreeBSD Foundation (cherry picked from commit f94acf52a408316ed61ba94705d7ec0a69ba5ec8) --- sys/dev/cxgbe/tom/t4_cpl_io.c | 18 ++++----- sys/dev/hyperv/hvsock/hv_sock.c | 31 ++++++--------- sys/kern/kern_sendfile.c | 8 ++-- sys/kern/uipc_ktls.c | 10 ++--- sys/kern/uipc_sockbuf.c | 28 ------------- sys/kern/uipc_socket.c | 55 +++++++++++++++++++++----- sys/netinet/sctputil.c | 19 +++++---- sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c | 11 +++--- sys/sys/sockbuf.h | 3 -- sys/sys/socketvar.h | 14 ++++++- 10 files changed, 104 insertions(+), 93 deletions(-) diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c index ee40d0646b71..2ffbbb042131 100644 --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -2064,14 +2064,14 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) /* Inline sosend_generic(). */ - error = sblock(sb, SBL_WAIT); + error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); MPASS(error == 0); sendanother: SOCKBUF_LOCK(sb); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); if ((so->so_options & SO_NOSIGPIPE) == 0) { PROC_LOCK(job->userproc); kern_psignal(job->userproc, SIGPIPE); @@ -2084,12 +2084,12 @@ sendanother: error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); goto out; } if ((so->so_state & SS_ISCONNECTED) == 0) { SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); error = ENOTCONN; goto out; } @@ -2102,13 +2102,13 @@ sendanother: */ if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); error = ECANCELED; goto out; } TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); goto out; } @@ -2135,7 +2135,7 @@ sendanother: m = alloc_aiotx_mbuf(job, len); if (m == NULL) { - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); error = EFAULT; goto out; } @@ -2146,7 +2146,7 @@ sendanother: INP_WLOCK(inp); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { INP_WUNLOCK(inp); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); error = ECONNRESET; goto out; } @@ -2168,7 +2168,7 @@ sendanother: INP_WUNLOCK(inp); if (sendmore) goto sendanother; - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); if (error) goto out; diff --git a/sys/dev/hyperv/hvsock/hv_sock.c b/sys/dev/hyperv/hvsock/hv_sock.c index bdc8974afdbd..5ca00d9f242f 100644 --- a/sys/dev/hyperv/hvsock/hv_sock.c +++ b/sys/dev/hyperv/hvsock/hv_sock.c @@ -662,18 +662,17 @@ hvs_trans_soreceive(struct socket *so, struct sockaddr **paddr, if (uio->uio_resid == 0 || uio->uio_rw != UIO_READ) return (EINVAL); - sb = &so->so_rcv; - orig_resid = uio->uio_resid; /* Prevent other readers from entering the socket. */ - error = sblock(sb, SBLOCKWAIT(flags)); + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) { HVSOCK_DBG(HVSOCK_DBG_ERR, - "%s: sblock returned error = %d\n", __func__, error); + "%s: soiolock returned error = %d\n", __func__, error); return (error); } + sb = &so->so_rcv; SOCKBUF_LOCK(sb); cbarg.uio = uio; @@ -777,8 +776,7 @@ hvs_trans_soreceive(struct socket *so, struct sockaddr **paddr, out: SOCKBUF_UNLOCK(sb); - - sbunlock(sb); + SOCK_IO_RECV_UNLOCK(so); /* We recieved a FIN in this call */ if (so->so_error == ESHUTDOWN) { @@ -821,18 +819,17 @@ hvs_trans_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if (uio->uio_resid == 0 || uio->uio_rw != UIO_WRITE) return (EINVAL); - sb = &so->so_snd; - orig_resid = uio->uio_resid; /* Prevent other writers from entering the socket. */ - error = sblock(sb, SBLOCKWAIT(flags)); + error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) { HVSOCK_DBG(HVSOCK_DBG_ERR, - "%s: sblock returned error = %d\n", __func__, error); + "%s: soiolocak returned error = %d\n", __func__, error); return (error); } + sb = &so->so_snd; SOCKBUF_LOCK(sb); if ((sb->sb_state & SBS_CANTSENDMORE) || @@ -891,7 +888,7 @@ hvs_trans_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, out: SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_SEND_UNLOCK(so); return (error); } @@ -1678,7 +1675,7 @@ hvsock_detach(device_t dev) { struct hvsock_sc *sc = (struct hvsock_sc *)device_get_softc(dev); struct socket *so; - int error, retry; + int retry; if (bootverbose) device_printf(dev, "hvsock_detach called.\n"); @@ -1707,8 +1704,7 @@ hvsock_detach(device_t dev) */ if (so) { retry = 0; - while ((error = sblock(&so->so_rcv, 0)) == - EWOULDBLOCK) { + while (SOCK_IO_RECV_LOCK(so, 0) == EWOULDBLOCK) { /* * Someone is reading, rx br is busy */ @@ -1719,8 +1715,7 @@ hvsock_detach(device_t dev) "retry = %d\n", retry++); } retry = 0; - while ((error = sblock(&so->so_snd, 0)) == - EWOULDBLOCK) { + while (SOCK_IO_SEND_LOCK(so, 0) == EWOULDBLOCK) { /* * Someone is sending, tx br is busy */ @@ -1738,8 +1733,8 @@ hvsock_detach(device_t dev) sc->pcb = NULL; if (so) { - sbunlock(&so->so_rcv); - sbunlock(&so->so_snd); + SOCK_IO_RECV_UNLOCK(so); + SOCK_IO_SEND_UNLOCK(so); so->so_pcb = NULL; } diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c index ac1072ca2406..d3043d16f4ec 100644 --- a/sys/kern/kern_sendfile.c +++ b/sys/kern/kern_sendfile.c @@ -741,7 +741,9 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, * XXXRW: Historically this has assumed non-interruptibility, so now * we implement that, but possibly shouldn't. */ - (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); + error = SOCK_IO_SEND_LOCK(so, SBL_WAIT | SBL_NOINTR); + if (error != 0) + goto out; #ifdef KERN_TLS tls = ktls_hold(so->so_snd.sb_tls_info); #endif @@ -1211,7 +1213,7 @@ prepend_header: * Send trailers. Wimp out and use writev(2). */ if (trl_uio != NULL) { - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); error = kern_writev(td, sockfd, trl_uio); if (error == 0) sbytes += td->td_retval[0]; @@ -1219,7 +1221,7 @@ prepend_header: } done: - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); out: /* * If there was no error we have to clear td->td_retval[0] diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c index 2605fb5b70b7..c6fc33b3109c 100644 --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -1108,7 +1108,7 @@ ktls_enable_tx(struct socket *so, struct tls_enable *en) return (error); } - error = sblock(&so->so_snd, SBL_WAIT); + error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); if (error) { ktls_cleanup(tls); return (error); @@ -1128,7 +1128,7 @@ ktls_enable_tx(struct socket *so, struct tls_enable *en) so->so_snd.sb_flags |= SB_TLS_IFNET; SOCKBUF_UNLOCK(&so->so_snd); INP_WUNLOCK(inp); - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); counter_u64_add(ktls_offload_total, 1); @@ -1229,7 +1229,7 @@ ktls_set_tx_mode(struct socket *so, int mode) return (error); } - error = sblock(&so->so_snd, SBL_WAIT); + error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); if (error) { counter_u64_add(ktls_switch_failed, 1); ktls_free(tls_new); @@ -1244,7 +1244,7 @@ ktls_set_tx_mode(struct socket *so, int mode) */ if (tls != so->so_snd.sb_tls_info) { counter_u64_add(ktls_switch_failed, 1); - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); ktls_free(tls_new); ktls_free(tls); INP_WLOCK(inp); @@ -1256,7 +1256,7 @@ ktls_set_tx_mode(struct socket *so, int mode) if (tls_new->mode != TCP_TLS_MODE_SW) so->so_snd.sb_flags |= SB_TLS_IFNET; SOCKBUF_UNLOCK(&so->so_snd); - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); /* * Drop two references on 'tls'. The first is for the diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c index b2202fe15192..bb179043682e 100644 --- a/sys/kern/uipc_sockbuf.c +++ b/sys/kern/uipc_sockbuf.c @@ -475,34 +475,6 @@ sbwait(struct sockbuf *sb) sb->sb_timeo, 0, 0)); } -int -sblock(struct sockbuf *sb, int flags) -{ - - KASSERT((flags & SBL_VALID) == flags, - ("sblock: flags invalid (0x%x)", flags)); - - if (flags & SBL_WAIT) { - if ((sb->sb_flags & SB_NOINTR) || - (flags & SBL_NOINTR)) { - sx_xlock(&sb->sb_sx); - return (0); - } - return (sx_xlock_sig(&sb->sb_sx)); - } else { - if (sx_try_xlock(&sb->sb_sx) == 0) - return (EWOULDBLOCK); - return (0); - } -} - -void -sbunlock(struct sockbuf *sb) -{ - - sx_xunlock(&sb->sb_sx); -} - /* * Wakeup processes waiting on a socket buffer. Do asynchronous notification * via SIGIO if the socket has the SS_ASYNC flag set. diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index 77c23859cf33..0ba8387763e1 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -1581,7 +1581,7 @@ sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, if (control != NULL) clen = control->m_len; - error = sblock(&so->so_snd, SBLOCKWAIT(flags)); + error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) goto out; @@ -1779,7 +1779,7 @@ restart: } while (resid); release: - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); out: #ifdef KERN_TLS if (tls != NULL) @@ -1926,7 +1926,7 @@ soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, (*pr->pr_usrreqs->pru_rcvd)(so, 0); } - error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) return (error); @@ -2380,7 +2380,7 @@ dontblock: if (flagsp != NULL) *flagsp |= flags; release: - sbunlock(&so->so_rcv); + SOCK_IO_RECV_UNLOCK(so); return (error); } @@ -2427,7 +2427,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, #endif /* Prevent other readers from entering the socket. */ - error = sblock(sb, SBLOCKWAIT(flags)); + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) return (error); SOCKBUF_LOCK(sb); @@ -2435,7 +2435,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, #ifdef KERN_TLS if (sb->sb_tls_info != NULL) { SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_RECV_UNLOCK(so); return (soreceive_generic(so, psa, uio, mp0, controlp, flagsp)); } @@ -2598,11 +2598,10 @@ deliver: if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: - SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_RECV_UNLOCK(so); return (error); } @@ -2869,6 +2868,7 @@ sorflush(struct socket *so) struct sockbuf *sb = &so->so_rcv; struct protosw *pr = so->so_proto; struct socket aso; + int error; VNET_SO_ASSERT(so); @@ -2886,7 +2886,9 @@ sorflush(struct socket *so) * despite any existing socket disposition on interruptable waiting. */ socantrcvmore(so); - (void) sblock(sb, SBL_WAIT | SBL_NOINTR); + error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR); + KASSERT(error == 0, ("%s: cannot lock sock %p recv buffer", + __func__, so)); /* * Invalidate/clear most of the sockbuf structure, but leave selinfo @@ -2900,7 +2902,7 @@ sorflush(struct socket *so) bzero(&sb->sb_startzero, sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_RECV_UNLOCK(so); /* * Dispose of special rights and flush the copied socket. Don't call @@ -4093,6 +4095,39 @@ soisdisconnected(struct socket *so) wakeup(&so->so_timeo); } +int +soiolock(struct socket *so, struct sx *sx, int flags) +{ + int error; + + KASSERT((flags & SBL_VALID) == flags, + ("soiolock: invalid flags %#x", flags)); + + if ((flags & SBL_WAIT) != 0) { + if ((flags & SBL_NOINTR) != 0) { + sx_xlock(sx); + } else { + error = sx_xlock_sig(sx); + if (error != 0) + return (error); + } + } else if (!sx_try_xlock(sx)) { + return (EWOULDBLOCK); + } + + if (__predict_false(SOLISTENING(so))) { + sx_xunlock(sx); + return (ENOTCONN); + } + return (0); +} + +void +soiounlock(struct sx *sx) +{ + sx_xunlock(sx); +} + /* * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. */ diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c index b01ace4d1fb9..a598741f062f 100644 --- a/sys/netinet/sctputil.c +++ b/sys/netinet/sctputil.c @@ -4796,10 +4796,10 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, old_so = old_inp->sctp_socket; new_so = new_inp->sctp_socket; TAILQ_INIT(&tmp_queue); - error = sblock(&old_so->so_rcv, waitflags); + error = SOCK_IO_RECV_LOCK(old_so, waitflags); if (error) { /* - * Gak, can't get sblock, we have a problem. data will be + * Gak, can't get I/O lock, we have a problem. data will be * left stranded.. and we don't dare look at it since the * other thread may be reading something. Oh well, its a * screwed up app that does a peeloff OR a accept while @@ -4831,9 +4831,8 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, } } SCTP_INP_READ_UNLOCK(old_inp); - /* Remove the sb-lock on the old socket */ - - sbunlock(&old_so->so_rcv); + /* Remove the recv-lock on the old socket */ + SOCK_IO_RECV_UNLOCK(old_so); /* Now we move them over to the new socket buffer */ SCTP_INP_READ_LOCK(new_inp); TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { @@ -5586,7 +5585,7 @@ sctp_sorecvmsg(struct socket *so, rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); } - error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); if (error) { goto release_unlocked; } @@ -6234,8 +6233,8 @@ get_more_data: } /* * We need to wait for more data a few things: - We don't - * sbunlock() so we don't get someone else reading. - We - * must be sure to account for the case where what is added + * release the I/O lock so we don't get someone else reading. + * - We must be sure to account for the case where what is added * is NOT to our control when we wakeup. */ @@ -6383,7 +6382,7 @@ release: hold_sblock = 0; } - sbunlock(&so->so_rcv); + SOCK_IO_RECV_UNLOCK(so); sockbuf_lock = 0; release_unlocked: @@ -6418,7 +6417,7 @@ out: SOCKBUF_UNLOCK(&so->so_rcv); } if (sockbuf_lock) { - sbunlock(&so->so_rcv); + SOCK_IO_RECV_UNLOCK(so); } if (freecnt_applied) { diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c index 62e12f3b477c..973372b21761 100644 --- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c @@ -1103,7 +1103,7 @@ sdp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, td->td_ru.ru_msgsnd++; ssk = sdp_sk(so); - error = sblock(&so->so_snd, SBLOCKWAIT(flags)); + error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); if (error) goto out; @@ -1194,7 +1194,7 @@ restart: } while (resid); release: - sbunlock(&so->so_snd); + SOCK_IO_SEND_UNLOCK(so); out: if (top != NULL) m_freem(top); @@ -1265,9 +1265,9 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio, ssk = sdp_sk(so); /* Prevent other readers from entering the socket. */ - error = sblock(sb, SBLOCKWAIT(flags)); + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); if (error) - goto out; + return (error); SOCKBUF_LOCK(sb); /* Easy one, no space to copyout anything. */ @@ -1421,11 +1421,10 @@ deliver: if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: - SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); - sbunlock(sb); + SOCK_IO_RECV_UNLOCK(so); return (error); } diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h index 4c56f4eaf234..a263555b3a21 100644 --- a/sys/sys/sockbuf.h +++ b/sys/sys/sockbuf.h @@ -78,7 +78,6 @@ struct selinfo; * * Locking key to struct sockbuf: * (a) locked by SOCKBUF_LOCK(). - * (b) locked by sblock() */ struct sockbuf { struct mtx sb_mtx; /* sockbuf lock */ @@ -183,8 +182,6 @@ struct mbuf * struct mbuf * sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff); int sbwait(struct sockbuf *sb); -int sblock(struct sockbuf *sb, int flags); -void sbunlock(struct sockbuf *sb); void sballoc(struct sockbuf *, struct mbuf *); void sbfree(struct sockbuf *, struct mbuf *); void sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m); diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h index 6452716d36de..e75251bc054b 100644 --- a/sys/sys/socketvar.h +++ b/sys/sys/socketvar.h @@ -249,14 +249,24 @@ struct socket { */ /* - * Flags to sblock(). + * Flags to soiolock(). */ #define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ #define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ #define SBL_VALID (SBL_WAIT | SBL_NOINTR) + #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) +#define SOCK_IO_SEND_LOCK(so, flags) \ + soiolock((so), &(so)->so_snd.sb_sx, (flags)) +#define SOCK_IO_SEND_UNLOCK(so) \ + soiounlock(&(so)->so_snd.sb_sx) +#define SOCK_IO_RECV_LOCK(so, flags) \ + soiolock((so), &(so)->so_rcv.sb_sx, (flags)) +#define SOCK_IO_RECV_UNLOCK(so) \ + soiounlock(&(so)->so_rcv.sb_sx) + /* * Do we need to notify the other side when I/O is possible? */ @@ -486,6 +496,8 @@ void socantsendmore(struct socket *so); void socantsendmore_locked(struct socket *so); void soroverflow(struct socket *so); void soroverflow_locked(struct socket *so); +int soiolock(struct socket *so, struct sx *sx, int flags); +void soiounlock(struct sx *sx); /* * Accept filter functions (duh).