svn commit: r367713 - head/sys/kern
Konstantin Belousov
kostikbel at gmail.com
Tue Nov 17 01:58:34 UTC 2020
On Mon, Nov 16, 2020 at 03:09:19AM +0000, Mateusz Guzik wrote:
> Author: mjg
> Date: Mon Nov 16 03:09:18 2020
> New Revision: 367713
> URL: https://svnweb.freebsd.org/changeset/base/367713
>
> Log:
> select: replace reference counting with memory barriers in selfd
>
> Refcounting was added to combat a race between selfdfree and doselwakup,
> but it adds avoidable overhead.
>
> selfdfree detects it can free the object by ->sf_si == NULL, thus we can
> ensure that the condition only holds after all accesses are completed.
>
> Modified:
> head/sys/kern/sys_generic.c
>
> Modified: head/sys/kern/sys_generic.c
> ==============================================================================
> --- head/sys/kern/sys_generic.c Sun Nov 15 22:49:28 2020 (r367712)
> +++ head/sys/kern/sys_generic.c Mon Nov 16 03:09:18 2020 (r367713)
> @@ -156,7 +156,6 @@ struct selfd {
> struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
> struct seltd *sf_td; /* (k) owning seltd. */
> void *sf_cookie; /* (k) fd or pollfd. */
> - u_int sf_refs;
> };
>
> MALLOC_DEFINE(M_SELFD, "selfd", "selfd");
> @@ -1704,16 +1703,17 @@ static void
> selfdfree(struct seltd *stp, struct selfd *sfp)
> {
> STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
> - if (sfp->sf_si != NULL) {
> + /*
> + * Paired with doselwakeup.
> + */
> + if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) {
This could be != 0.
> mtx_lock(sfp->sf_mtx);
> if (sfp->sf_si != NULL) {
> TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
> - refcount_release(&sfp->sf_refs);
> }
> mtx_unlock(sfp->sf_mtx);
> }
> - if (refcount_release(&sfp->sf_refs))
> - free(sfp, M_SELFD);
> + free(sfp, M_SELFD);
What guarantees that doselwakeup() finished with sfp ?
> }
>
> /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
> @@ -1766,7 +1766,6 @@ selrecord(struct thread *selector, struct selinfo *sip
> */
> sfp->sf_si = sip;
> sfp->sf_mtx = mtxp;
> - refcount_init(&sfp->sf_refs, 2);
> STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
> /*
> * Now that we've locked the sip, check for initialization.
> @@ -1820,14 +1819,15 @@ doselwakeup(struct selinfo *sip, int pri)
> * sf_si seltdclear will know to ignore this si.
> */
> TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
> - sfp->sf_si = NULL;
> stp = sfp->sf_td;
> + /*
> + * Paired with selfdfree.
> + */
> + atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL);
> mtx_lock(&stp->st_mtx);
> stp->st_flags |= SELTD_PENDING;
> cv_broadcastpri(&stp->st_wait, pri);
> mtx_unlock(&stp->st_mtx);
> - if (refcount_release(&sfp->sf_refs))
> - free(sfp, M_SELFD);
> }
> mtx_unlock(sip->si_mtx);
> }
More information about the svn-src-all
mailing list