svn commit: r238907 - projects/calloutng/sys/kern
Andre Oppermann
andre at freebsd.org
Mon Sep 17 07:25:53 UTC 2012
Hello Attilio,
could you integrate and test this patch from Isilon as well:
Add INVARIANT and WITNESS support to rm_lock locks and optimize the
synchronization path by replacing a LIST of active readers with a
TAILQ.
Obtained from: Isilon
Submitted by: mlaier
http://svn.freebsd.org/changeset/base/234648
You're far deeper into locking than I am.
Thanks
--
Andre
On 13.09.2012 03:36, Attilio Rao wrote:
> On Thu, Aug 2, 2012 at 10:07 PM, John Baldwin <jhb at freebsd.org> wrote:
>> On Thursday, August 02, 2012 4:56:03 pm Attilio Rao wrote:
>>> On 7/30/12, John Baldwin <jhb at freebsd.org> wrote:
>>>> --- //depot/projects/smpng/sys/kern/kern_rmlock.c 2012-03-25
>>>> 18:45:29.000000000 0000
>>>> +++ //depot/user/jhb/lock/kern/kern_rmlock.c 2012-06-18 21:20:58.000000000
>>>> 0000
>>>> @@ -70,6 +70,9 @@
>>>> }
>>>>
>>>> static void assert_rm(const struct lock_object *lock, int what);
>>>> +#ifdef DDB
>>>> +static void db_show_rm(const struct lock_object *lock);
>>>> +#endif
>>>> static void lock_rm(struct lock_object *lock, int how);
>>>> #ifdef KDTRACE_HOOKS
>>>> static int owner_rm(const struct lock_object *lock, struct thread
>>>> **owner);
>>>
>>> While here, did you consider also:
>>> - Abstracting compiler_memory_barrier() into a MI, compiler dependent function?
>>> - Fix rm_queue with DCPU possibly
>>
>> Mostly I just wanted to fill in missing functionality and fixup the
>> RM_SLEEPABLE bits a bit.
>
> So what do you think about the following patch? If you agree I will
> send to pho@ for testing in a batch with other patches.
>
> Thanks,
> Attilio
>
>
> Subject: [PATCH 7/7] Reimplement pc_rm_queue as a dynamic entry for per-cpu
> members.
>
> Sparse notes:
> o rm_tracker_remove() doesn't seem to need the pc argument also
> in the current code
> o The stop ptr value changes from &pc->pc_rm_queue to NULL when
> scanning the cpus list
>
> Signed-off-by: Attilio Rao <attilio at pcbsd-2488.(none)>
> ---
> sys/kern/kern_rmlock.c | 42 ++++++++++++++++++++++--------------------
> sys/kern/subr_pcpu.c | 2 --
> sys/sys/_rmlock.h | 10 +++++-----
> sys/sys/pcpu.h | 18 ------------------
> 4 files changed, 27 insertions(+), 45 deletions(-)
>
> diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
> index ef1920b..c6ba65a 100644
> --- a/sys/kern/kern_rmlock.c
> +++ b/sys/kern/kern_rmlock.c
> @@ -76,6 +76,8 @@ static int owner_rm(const struct lock_object
> *lock, struct thread **owner);
> #endif
> static int unlock_rm(struct lock_object *lock);
>
> +static DPCPU_DEFINE(struct rm_queue, rm_queue);
> +
> struct lock_class lock_class_rm = {
> .lc_name = "rm",
> .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
> @@ -133,24 +135,24 @@ MTX_SYSINIT(rm_spinlock, &rm_spinlock,
> "rm_spinlock", MTX_SPIN);
> * interrupt on the *local* cpu.
> */
> static void inline
> -rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
> +rm_tracker_add(struct rm_queue *pcpu_rm_queue, struct rm_priotracker *tracker)
> {
> struct rm_queue *next;
>
> /* Initialize all tracker pointers */
> - tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
> - next = pc->pc_rm_queue.rmq_next;
> + tracker->rmp_cpuQueue.rmq_prev = NULL;
> + next = pcpu_rm_queue->rmq_next;
> tracker->rmp_cpuQueue.rmq_next = next;
>
> /* rmq_prev is not used during froward traversal. */
> next->rmq_prev = &tracker->rmp_cpuQueue;
>
> /* Update pointer to first element. */
> - pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
> + pcpu_rm_queue->rmq_next = &tracker->rmp_cpuQueue;
> }
>
> static void inline
> -rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
> +rm_tracker_remove(struct rm_priotracker *tracker)
> {
> struct rm_queue *next, *prev;
>
> @@ -167,13 +169,12 @@ rm_tracker_remove(struct pcpu *pc, struct
> rm_priotracker *tracker)
> static void
> rm_cleanIPI(void *arg)
> {
> - struct pcpu *pc;
> struct rmlock *rm = arg;
> struct rm_priotracker *tracker;
> - struct rm_queue *queue;
> - pc = pcpu_find(curcpu);
> + struct rm_queue *queue, *pcpu_rm_queue;
> + pcpu_rm_queue = DPCPU_PTR(rm_queue);
>
> - for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
> + for (queue = pcpu_rm_queue->rmq_next; queue != NULL;
> queue = queue->rmq_next) {
> tracker = (struct rm_priotracker *)queue;
> if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
> @@ -256,11 +257,12 @@ static int
> _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
> {
> struct pcpu *pc;
> - struct rm_queue *queue;
> + struct rm_queue *queue, *pcpu_rm_queue;
> struct rm_priotracker *atracker;
>
> critical_enter();
> pc = pcpu_find(curcpu);
> + pcpu_rm_queue = DPCPU_PTR(rm_queue);
>
> /* Check if we just need to do a proper critical_exit. */
> if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
> @@ -269,12 +271,12 @@ _rm_rlock_hard(struct rmlock *rm, struct
> rm_priotracker *tracker, int trylock)
> }
>
> /* Remove our tracker from the per-cpu list. */
> - rm_tracker_remove(pc, tracker);
> + rm_tracker_remove(tracker);
>
> /* Check to see if the IPI granted us the lock after all. */
> if (tracker->rmp_flags) {
> /* Just add back tracker - we hold the lock. */
> - rm_tracker_add(pc, tracker);
> + rm_tracker_add(pcpu_rm_queue, tracker);
> critical_exit();
> return (1);
> }
> @@ -288,8 +290,8 @@ _rm_rlock_hard(struct rmlock *rm, struct
> rm_priotracker *tracker, int trylock)
> * Just grant the lock if this thread already has a tracker
> * for this lock on the per-cpu queue.
> */
> - for (queue = pc->pc_rm_queue.rmq_next;
> - queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
> + for (queue = pcpu_rm_queue->rmq_next; queue != NULL;
> + queue = queue->rmq_next) {
> atracker = (struct rm_priotracker *)queue;
> if ((atracker->rmp_rmlock == rm) &&
> (atracker->rmp_thread == tracker->rmp_thread)) {
> @@ -298,7 +300,7 @@ _rm_rlock_hard(struct rmlock *rm, struct
> rm_priotracker *tracker, int trylock)
> tracker, rmp_qentry);
> tracker->rmp_flags = RMPF_ONQUEUE;
> mtx_unlock_spin(&rm_spinlock);
> - rm_tracker_add(pc, tracker);
> + rm_tracker_add(pcpu_rm_queue, tracker);
> critical_exit();
> return (1);
> }
> @@ -326,7 +328,7 @@ _rm_rlock_hard(struct rmlock *rm, struct
> rm_priotracker *tracker, int trylock)
> critical_enter();
> pc = pcpu_find(curcpu);
> CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
> - rm_tracker_add(pc, tracker);
> + rm_tracker_add(pcpu_rm_queue, tracker);
> sched_pin();
> critical_exit();
>
> @@ -342,6 +344,7 @@ int
> _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
> {
> struct thread *td = curthread;
> + struct rm_queue *pcpu_rm_queue;
> struct pcpu *pc;
>
> if (SCHEDULER_STOPPED())
> @@ -356,8 +359,9 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker
> *tracker, int trylock)
> compiler_memory_barrier();
>
> pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
> + pcpu_rm_queue = DPCPU_ID_PTR(td->td_oncpu, rm_queue);
>
> - rm_tracker_add(pc, tracker);
> + rm_tracker_add(pcpu_rm_queue, tracker);
>
> sched_pin();
>
> @@ -413,15 +417,13 @@ _rm_unlock_hard(struct thread *td,struct
> rm_priotracker *tracker)
> void
> _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
> {
> - struct pcpu *pc;
> struct thread *td = tracker->rmp_thread;
>
> if (SCHEDULER_STOPPED())
> return;
>
> td->td_critnest++; /* critical_enter(); */
> - pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
> - rm_tracker_remove(pc, tracker);
> + rm_tracker_remove(tracker);
> td->td_critnest--;
> sched_unpin();
>
> diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
> index 505a4df..279295e 100644
> --- a/sys/kern/subr_pcpu.c
> +++ b/sys/kern/subr_pcpu.c
> @@ -90,8 +90,6 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
> cpuid_to_pcpu[cpuid] = pcpu;
> STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
> cpu_pcpu_init(pcpu, cpuid, size);
> - pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
> - pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
> }
>
> void
> diff --git a/sys/sys/_rmlock.h b/sys/sys/_rmlock.h
> index 15d6c49..51bb03e 100644
> --- a/sys/sys/_rmlock.h
> +++ b/sys/sys/_rmlock.h
> @@ -32,11 +32,6 @@
> #ifndef _SYS__RMLOCK_H_
> #define _SYS__RMLOCK_H_
>
> -/*
> - * XXXUPS remove as soon as we have per cpu variable
> - * linker sets and can define rm_queue in _rm_lock.h
> -*/
> -#include <sys/pcpu.h>
> /*
> * Mostly reader/occasional writer lock.
> */
> @@ -55,6 +50,11 @@ struct rmlock {
> #define rm_lock_mtx _rm_lock._rm_lock_mtx
> #define rm_lock_sx _rm_lock._rm_lock_sx
>
> +struct rm_queue {
> + struct rm_queue *volatile rmq_next;
> + struct rm_queue *volatile rmq_prev;
> +};
> +
> struct rm_priotracker {
> struct rm_queue rmp_cpuQueue; /* Must be first */
> struct rmlock *rmp_rmlock;
> diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h
> index 4a4ec00..78ba04a 100644
> --- a/sys/sys/pcpu.h
> +++ b/sys/sys/pcpu.h
> @@ -137,15 +137,6 @@ extern uintptr_t dpcpu_off[];
>
> #endif /* _KERNEL */
>
> -/*
> - * XXXUPS remove as soon as we have per cpu variable
> - * linker sets and can define rm_queue in _rm_lock.h
> - */
> -struct rm_queue {
> - struct rm_queue* volatile rmq_next;
> - struct rm_queue* volatile rmq_prev;
> -};
> -
> /*
> * This structure maps out the global data that needs to be kept on a
> * per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
> @@ -169,15 +160,6 @@ struct pcpu {
> void *pc_netisr; /* netisr SWI cookie */
> int pc_dnweight; /* vm_page_dontneed() */
> int pc_domain; /* Memory domain. */
> -
> - /*
> - * Stuff for read mostly lock
> - *
> - * XXXUPS remove as soon as we have per cpu variable
> - * linker sets.
> - */
> - struct rm_queue pc_rm_queue;
> -
> uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
>
> /*
>
More information about the svn-src-projects
mailing list