subr_turnstile.c

Go to the documentation of this file.
00001 /*-
00002  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
00003  *
00004  * Redistribution and use in source and binary forms, with or without
00005  * modification, are permitted provided that the following conditions
00006  * are met:
00007  * 1. Redistributions of source code must retain the above copyright
00008  *    notice, this list of conditions and the following disclaimer.
00009  * 2. Redistributions in binary form must reproduce the above copyright
00010  *    notice, this list of conditions and the following disclaimer in the
00011  *    documentation and/or other materials provided with the distribution.
00012  * 3. Berkeley Software Design Inc's name may not be used to endorse or
00013  *    promote products derived from this software without specific prior
00014  *    written permission.
00015  *
00016  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
00017  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00018  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00019  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
00020  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
00021  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
00022  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
00023  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00024  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
00025  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
00026  * SUCH DAMAGE.
00027  *
00028  *      from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
00029  *      and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
00030  */
00031 
00032 /*
00033  * Implementation of turnstiles used to hold queue of threads blocked on
00034  * non-sleepable locks.  Sleepable locks use condition variables to
00035  * implement their queues.  Turnstiles differ from a sleep queue in that
00036  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
00037  * when one thread is enqueued onto a turnstile, it can lend its priority
00038  * to the owning thread.
00039  *
00040  * We wish to avoid bloating locks with an embedded turnstile and we do not
00041  * want to use back-pointers in the locks for the same reason.  Thus, we
00042  * use a similar approach to that of Solaris 7 as described in Solaris
00043  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
00044  * in a hash table based on the address of the lock.  Each entry in the
00045  * hash table is a linked-lists of turnstiles and is called a turnstile
00046  * chain.  Each chain contains a spin mutex that protects all of the
00047  * turnstiles in the chain.
00048  *
00049  * Each time a thread is created, a turnstile is malloc'd and attached to
00050  * that thread.  When a thread blocks on a lock, if it is the first thread
00051  * to block, it lends its turnstile to the lock.  If the lock already has
00052  * a turnstile, then it gives its turnstile to the lock's turnstile's free
00053  * list.  When a thread is woken up, it takes a turnstile from the free list
00054  * if there are any other waiters.  If it is the only thread blocked on the
00055  * lock, then it reclaims the turnstile associated with the lock and removes
00056  * it from the hash table.
00057  *
00058  * XXX: We should probably implement some sort of sleep queue that condition
00059  * variables and sleepqueue's share.  On Solaris condition variables are
00060  * implemented using a hash table of sleep queues similar to our current
00061  * sleep queues.  We might want to investigate doing that ourselves.
00062  */
00063 
00064 #include <sys/cdefs.h>
00065 __FBSDID("$FreeBSD: src/sys/kern/subr_turnstile.c,v 1.134.2.1 2003/12/11 20:01:52 jhb Exp $");
00066 
00067 #include <sys/param.h>
00068 #include <sys/systm.h>
00069 #include <sys/kernel.h>
00070 #include <sys/ktr.h>
00071 #include <sys/lock.h>
00072 #include <sys/malloc.h>
00073 #include <sys/mutex.h>
00074 #include <sys/proc.h>
00075 #include <sys/queue.h>
00076 #include <sys/resourcevar.h>
00077 #include <sys/turnstile.h>
00078 #include <sys/sched.h>
00079 
00080 /*
00081  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
00082  * number chosen because the sleep queue's use the same value for the
00083  * shift.  Basically, we ignore the lower 8 bits of the address.
00084  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
00085  */
00086 #define TC_TABLESIZE    128                     /* Must be power of 2. */
00087 #define TC_MASK         (TC_TABLESIZE - 1)
00088 #define TC_SHIFT        8
00089 #define TC_HASH(lock)   (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
00090 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
00091 
00092 /*
00093  * There are three different lists of turnstiles as follows.  The list
00094  * connected by ts_link entries is a per-thread list of all the turnstiles
00095  * attached to locks that we own.  This is used to fixup our priority when
00096  * a lock is released.  The other two lists use the ts_hash entries.  The
00097  * first of these two is turnstile chain list that a turnstile is on when
00098  * it is attached to a lock.  The second list to use ts_hash is the free
00099  * list hung off a turnstile that is attached to a lock.
00100  *
00101  * Each turnstile contains two lists of threads.  The ts_blocked list is
00102  * a linked list of threads blocked on the turnstile's lock.  The
00103  * ts_pending list is a linked list of threads previously awoken by
00104  * turnstile_signal() or turnstile_wait() that are waiting to be put on
00105  * the run queue.
00106  *
00107  * Locking key:
00108  *  c - turnstile chain lock
00109  *  q - td_contested lock
00110  */
00111 struct turnstile {
00112         TAILQ_HEAD(, thread) ts_blocked;        /* (c + q) Blocked threads. */
00113         TAILQ_HEAD(, thread) ts_pending;        /* (c) Pending threads. */
00114         LIST_ENTRY(turnstile) ts_hash;          /* (c) Chain and free list. */
00115         LIST_ENTRY(turnstile) ts_link;          /* (q) Contested locks. */
00116         LIST_HEAD(, turnstile) ts_free;         /* (c) Free turnstiles. */
00117         struct lock_object *ts_lockobj;         /* (c) Lock we reference. */
00118         struct thread *ts_owner;                /* (c + q) Who owns the lock. */
00119 };
00120 
00121 struct turnstile_chain {
00122         LIST_HEAD(, turnstile) tc_turnstiles;   /* List of turnstiles. */
00123         struct mtx tc_lock;                     /* Spin lock for this chain. */
00124 };
00125 
00126 static struct mtx td_contested_lock;
00127 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
00128 
00129 MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
00130 
00131 /*
00132  * Prototypes for non-exported routines.
00133  */
00134 static void     init_turnstile0(void *dummy);
00135 static void     propagate_priority(struct thread *);
00136 static void     turnstile_setowner(struct turnstile *ts, struct thread *owner);
00137 
00138 /*
00139  * Walks the chain of turnstiles and their owners to propagate the priority
00140  * of the thread being blocked to all the threads holding locks that have to
00141  * release their locks before this thread can run again.
00142  */
00143 static void
00144 propagate_priority(struct thread *td)
00145 {
00146         struct turnstile_chain *tc;
00147         struct turnstile *ts;
00148         struct thread *td1;
00149         int pri;
00150 
00151         mtx_assert(&sched_lock, MA_OWNED);
00152         pri = td->td_priority;
00153         ts = td->td_blocked;
00154         for (;;) {
00155                 td = ts->ts_owner;
00156 
00157                 if (td == NULL) {
00158                         /*
00159                          * This really isn't quite right. Really
00160                          * ought to bump priority of thread that
00161                          * next acquires the lock.
00162                          */
00163                         return;
00164                 }
00165 
00166                 MPASS(td->td_proc != NULL);
00167                 MPASS(td->td_proc->p_magic == P_MAGIC);
00168 
00169                 /*
00170                  * XXX: The owner of a turnstile can be stale if it is the
00171                  * first thread to grab a slock of a sx lock.  In that case
00172                  * it is possible for us to be at SSLEEP or some other
00173                  * weird state.  We should probably just return if the state
00174                  * isn't SRUN or SLOCK.
00175                  */
00176                 KASSERT(!TD_IS_SLEEPING(td),
00177                     ("sleeping thread (pid %d) owns a non-sleepable lock",
00178                     td->td_proc->p_pid));
00179 
00180                 /*
00181                  * If this thread already has higher priority than the
00182                  * thread that is being blocked, we are finished.
00183                  */
00184                 if (td->td_priority <= pri)
00185                         return;
00186 
00187                 /*
00188                  * If lock holder is actually running, just bump priority.
00189                  */
00190                 if (TD_IS_RUNNING(td)) {
00191                         td->td_priority = pri;
00192                         return;
00193                 }
00194 
00195 #ifndef SMP
00196                 /*
00197                  * For UP, we check to see if td is curthread (this shouldn't
00198                  * ever happen however as it would mean we are in a deadlock.)
00199                  */
00200                 KASSERT(td != curthread, ("Deadlock detected"));
00201 #endif
00202 
00203                 /*
00204                  * If on run queue move to new run queue, and quit.
00205                  * XXXKSE this gets a lot more complicated under threads
00206                  * but try anyhow.
00207                  */
00208                 if (TD_ON_RUNQ(td)) {
00209                         MPASS(td->td_blocked == NULL);
00210                         sched_prio(td, pri);
00211                         return;
00212                 }
00213 
00214                 /*
00215                  * Bump this thread's priority.
00216                  */
00217                 td->td_priority = pri;
00218 
00219                 /*
00220                  * If we aren't blocked on a lock, we should be.
00221                  */
00222                 KASSERT(TD_ON_LOCK(td), (
00223                     "process %d(%s):%d holds %s but isn't blocked on a lock\n",
00224                     td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
00225                     ts->ts_lockobj->lo_name));
00226 
00227                 /*
00228                  * Pick up the lock that td is blocked on.
00229                  */
00230                 ts = td->td_blocked;
00231                 MPASS(ts != NULL);
00232                 tc = TC_LOOKUP(ts->ts_lockobj);
00233                 mtx_lock_spin(&tc->tc_lock);
00234 
00235                 /*
00236                  * This thread may not be blocked on this turnstile anymore
00237                  * but instead might already be woken up on another CPU
00238                  * that is waiting on sched_lock in turnstile_unpend() to
00239                  * finish waking this thread up.  We can detect this case
00240                  * by checking to see if this thread has been given a
00241                  * turnstile by either turnstile_signal() or
00242                  * turnstile_wakeup().  In this case, treat the thread as
00243                  * if it was already running.
00244                  */
00245                 if (td->td_turnstile != NULL) {
00246                         mtx_unlock_spin(&tc->tc_lock);
00247                         return;
00248                 }
00249 
00250                 /*
00251                  * Check if the thread needs to be moved up on
00252                  * the blocked chain.  It doesn't need to be moved
00253                  * if it is already at the head of the list or if
00254                  * the item in front of it still has a higher priority.
00255                  */
00256                 if (td == TAILQ_FIRST(&ts->ts_blocked)) {
00257                         mtx_unlock_spin(&tc->tc_lock);
00258                         continue;
00259                 }
00260 
00261                 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
00262                 if (td1->td_priority <= pri) {
00263                         mtx_unlock_spin(&tc->tc_lock);
00264                         continue;
00265                 }
00266 
00267                 /*
00268                  * Remove thread from blocked chain and determine where
00269                  * it should be moved up to.  Since we know that td1 has
00270                  * a lower priority than td, we know that at least one
00271                  * thread in the chain has a lower priority and that
00272                  * td1 will thus not be NULL after the loop.
00273                  */
00274                 mtx_lock_spin(&td_contested_lock);
00275                 TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
00276                 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) {
00277                         MPASS(td1->td_proc->p_magic == P_MAGIC);
00278                         if (td1->td_priority > pri)
00279                                 break;
00280                 }
00281 
00282                 MPASS(td1 != NULL);
00283                 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
00284                 mtx_unlock_spin(&td_contested_lock);
00285                 CTR4(KTR_LOCK,
00286                     "propagate_priority: td %p moved before %p on [%p] %s",
00287                     td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name);
00288                 mtx_unlock_spin(&tc->tc_lock);
00289         }
00290 }
00291 
00292 /*
00293  * Early initialization of turnstiles.  This is not done via a SYSINIT()
00294  * since this needs to be initialized very early when mutexes are first
00295  * initialized.
00296  */
00297 void
00298 init_turnstiles(void)
00299 {
00300         int i;
00301 
00302         for (i = 0; i < TC_TABLESIZE; i++) {
00303                 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
00304                 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
00305                     NULL, MTX_SPIN);
00306         }
00307         mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
00308         thread0.td_turnstile = NULL;
00309 }
00310 
00311 static void
00312 init_turnstile0(void *dummy)
00313 {
00314 
00315         thread0.td_turnstile = turnstile_alloc();
00316 }
00317 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
00318 
00319 /*
00320  * Set the owner of the lock this turnstile is attached to.
00321  */
00322 static void
00323 turnstile_setowner(struct turnstile *ts, struct thread *owner)
00324 {
00325 
00326         mtx_assert(&td_contested_lock, MA_OWNED);
00327         MPASS(owner->td_proc->p_magic == P_MAGIC);
00328         MPASS(ts->ts_owner == NULL);
00329         ts->ts_owner = owner;
00330         LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
00331 }
00332 
00333 /*
00334  * Malloc a turnstile for a new thread, initialize it and return it.
00335  */
00336 struct turnstile *
00337 turnstile_alloc(void)
00338 {
00339         struct turnstile *ts;
00340 
00341         ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
00342         TAILQ_INIT(&ts->ts_blocked);
00343         TAILQ_INIT(&ts->ts_pending);
00344         LIST_INIT(&ts->ts_free);
00345         return (ts);
00346 }
00347 
00348 /*
00349  * Free a turnstile when a thread is destroyed.
00350  */
00351 void
00352 turnstile_free(struct turnstile *ts)
00353 {
00354 
00355         MPASS(ts != NULL);
00356         MPASS(TAILQ_EMPTY(&ts->ts_blocked));
00357         MPASS(TAILQ_EMPTY(&ts->ts_pending));
00358         free(ts, M_TURNSTILE);
00359 }
00360 
00361 /*
00362  * Look up the turnstile for a lock in the hash table locking the associated
00363  * turnstile chain along the way.  Return with the turnstile chain locked.
00364  * If no turnstile is found in the hash table, NULL is returned.
00365  */
00366 struct turnstile *
00367 turnstile_lookup(struct lock_object *lock)
00368 {
00369         struct turnstile_chain *tc;
00370         struct turnstile *ts;
00371 
00372         tc = TC_LOOKUP(lock);
00373         mtx_lock_spin(&tc->tc_lock);
00374         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
00375                 if (ts->ts_lockobj == lock)
00376                         return (ts);
00377         return (NULL);
00378 }
00379 
00380 /*
00381  * Unlock the turnstile chain associated with a given lock.
00382  */
00383 void
00384 turnstile_release(struct lock_object *lock)
00385 {
00386         struct turnstile_chain *tc;
00387 
00388         tc = TC_LOOKUP(lock);
00389         mtx_unlock_spin(&tc->tc_lock);
00390 }
00391 
00392 /*
00393  * Take ownership of a turnstile and adjust the priority of the new
00394  * owner appropriately.
00395  */
00396 void
00397 turnstile_claim(struct turnstile *ts)
00398 {
00399         struct turnstile_chain *tc;
00400         struct thread *td, *owner;
00401 
00402         tc = TC_LOOKUP(ts->ts_lockobj);
00403         mtx_assert(&tc->tc_lock, MA_OWNED);
00404 
00405         owner = curthread;
00406         mtx_lock_spin(&td_contested_lock);
00407         turnstile_setowner(ts, owner);
00408         mtx_unlock_spin(&td_contested_lock);
00409 
00410         td = TAILQ_FIRST(&ts->ts_blocked);
00411         MPASS(td != NULL);
00412         MPASS(td->td_proc->p_magic == P_MAGIC);
00413         mtx_unlock_spin(&tc->tc_lock);
00414 
00415         /*
00416          * Update the priority of the new owner if needed.
00417          */
00418         mtx_lock_spin(&sched_lock);
00419         if (td->td_priority < owner->td_priority)
00420                 owner->td_priority = td->td_priority; 
00421         mtx_unlock_spin(&sched_lock);
00422 }
00423 
00424 /*
00425  * Block the current thread on the turnstile ts.  This function will context
00426  * switch and not return until this thread has been woken back up.  This
00427  * function must be called with the appropriate turnstile chain locked and
00428  * will return with it unlocked.
00429  */
00430 void
00431 turnstile_wait(struct turnstile *ts, struct lock_object *lock,
00432     struct thread *owner)
00433 {
00434         struct turnstile_chain *tc;
00435         struct thread *td, *td1;
00436 
00437         td = curthread;
00438         tc = TC_LOOKUP(lock);
00439         mtx_assert(&tc->tc_lock, MA_OWNED);
00440         MPASS(td->td_turnstile != NULL);
00441         MPASS(owner != NULL);
00442         MPASS(owner->td_proc->p_magic == P_MAGIC);
00443 
00444         /* If the passed in turnstile is NULL, use this thread's turnstile. */
00445         if (ts == NULL) {
00446                 ts = td->td_turnstile;
00447                 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
00448                 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
00449                     ("thread's turnstile has pending threads"));
00450                 KASSERT(TAILQ_EMPTY(&ts->ts_blocked),
00451                     ("thread's turnstile has a non-empty queue"));
00452                 KASSERT(LIST_EMPTY(&ts->ts_free),
00453                     ("thread's turnstile has a non-empty free list"));
00454                 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
00455                 ts->ts_lockobj = lock;
00456                 mtx_lock_spin(&td_contested_lock);
00457                 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
00458                 turnstile_setowner(ts, owner);
00459                 mtx_unlock_spin(&td_contested_lock);
00460         } else {
00461                 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq)
00462                         if (td1->td_priority > td->td_priority)
00463                                 break;
00464                 mtx_lock_spin(&td_contested_lock);
00465                 if (td1 != NULL)
00466                         TAILQ_INSERT_BEFORE(td1, td, td_lockq);
00467                 else
00468                         TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
00469                 mtx_unlock_spin(&td_contested_lock);
00470                 MPASS(td->td_turnstile != NULL);
00471                 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
00472                 MPASS(owner == ts->ts_owner);
00473         }
00474         td->td_turnstile = NULL;
00475         mtx_unlock_spin(&tc->tc_lock);
00476 
00477         mtx_lock_spin(&sched_lock);
00478         /*
00479          * Handle race condition where a thread on another CPU that owns
00480          * lock 'lock' could have woken us in between us dropping the
00481          * turnstile chain lock and acquiring the sched_lock.
00482          */
00483         if (td->td_flags & TDF_TSNOBLOCK) {
00484                 td->td_flags &= ~TDF_TSNOBLOCK;
00485                 mtx_unlock_spin(&sched_lock);
00486                 return;
00487         }
00488                 
00489 #ifdef notyet
00490         /*
00491          * If we're borrowing an interrupted thread's VM context, we
00492          * must clean up before going to sleep.
00493          */
00494         if (td->td_ithd != NULL) {
00495                 struct ithd *it = td->td_ithd;
00496 
00497                 if (it->it_interrupted) {
00498                         if (LOCK_LOG_TEST(lock, 0))
00499                                 CTR3(KTR_LOCK, "%s: %p interrupted %p",
00500                                     __func__, it, it->it_interrupted);
00501                         intr_thd_fixup(it);
00502                 }
00503         }
00504 #endif
00505 
00506         /* Save who we are blocked on and switch. */
00507         td->td_blocked = ts;
00508         td->td_lockname = lock->lo_name;
00509         TD_SET_LOCK(td);
00510         propagate_priority(td);
00511 
00512         if (LOCK_LOG_TEST(lock, 0))
00513                 CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
00514                     lock, lock->lo_name);
00515 
00516         td->td_proc->p_stats->p_ru.ru_nvcsw++;
00517         mi_switch();
00518 
00519         if (LOCK_LOG_TEST(lock, 0))
00520                 CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
00521                     __func__, td, lock, lock->lo_name);
00522 
00523         mtx_unlock_spin(&sched_lock);
00524 }
00525 
00526 /*
00527  * Pick the highest priority thread on this turnstile and put it on the
00528  * pending list.  This must be called with the turnstile chain locked.
00529  */
00530 int
00531 turnstile_signal(struct turnstile *ts)
00532 {
00533         struct turnstile_chain *tc;
00534         struct thread *td;
00535         int empty;
00536 
00537         MPASS(ts != NULL);
00538         MPASS(curthread->td_proc->p_magic == P_MAGIC);
00539         MPASS(ts->ts_owner == curthread);
00540         tc = TC_LOOKUP(ts->ts_lockobj);
00541         mtx_assert(&tc->tc_lock, MA_OWNED);
00542 
00543         /*
00544          * Pick the highest priority thread blocked on this lock and
00545          * move it to the pending list.
00546          */
00547         td = TAILQ_FIRST(&ts->ts_blocked);
00548         MPASS(td->td_proc->p_magic == P_MAGIC);
00549         mtx_lock_spin(&td_contested_lock);
00550         TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
00551         mtx_unlock_spin(&td_contested_lock);
00552         TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
00553 
00554         /*
00555          * If the turnstile is now empty, remove it from its chain and
00556          * give it to the about-to-be-woken thread.  Otherwise take a
00557          * turnstile from the free list and give it to the thread.
00558          */
00559         empty = TAILQ_EMPTY(&ts->ts_blocked);
00560         if (empty)
00561                 MPASS(LIST_EMPTY(&ts->ts_free));
00562         else
00563                 ts = LIST_FIRST(&ts->ts_free);
00564         LIST_REMOVE(ts, ts_hash);
00565         td->td_turnstile = ts;
00566 
00567         return (empty);
00568 }
00569         
00570 /*
00571  * Put all blocked threads on the pending list.  This must be called with
00572  * the turnstile chain locked.
00573  */
00574 void
00575 turnstile_wakeup(struct turnstile *ts)
00576 {
00577         struct turnstile_chain *tc;
00578         struct turnstile *ts1;
00579         struct thread *td;
00580 
00581         MPASS(ts != NULL);
00582         MPASS(curthread->td_proc->p_magic == P_MAGIC);
00583         MPASS(ts->ts_owner == curthread);
00584         tc = TC_LOOKUP(ts->ts_lockobj);
00585         mtx_assert(&tc->tc_lock, MA_OWNED);
00586 
00587         /*
00588          * Transfer the blocked list to the pending list.
00589          */
00590         mtx_lock_spin(&td_contested_lock);
00591         TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq);
00592         mtx_unlock_spin(&td_contested_lock);
00593 
00594         /*
00595          * Give a turnstile to each thread.  The last thread gets
00596          * this turnstile.
00597          */
00598         TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
00599                 if (LIST_EMPTY(&ts->ts_free)) {
00600                         MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
00601                         ts1 = ts;
00602                 } else
00603                         ts1 = LIST_FIRST(&ts->ts_free);
00604                 LIST_REMOVE(ts1, ts_hash);
00605                 td->td_turnstile = ts1;
00606         }
00607 }
00608 
00609 /*
00610  * Wakeup all threads on the pending list and adjust the priority of the
00611  * current thread appropriately.  This must be called with the turnstile
00612  * chain locked.
00613  */
00614 void
00615 turnstile_unpend(struct turnstile *ts)
00616 {
00617         TAILQ_HEAD( ,thread) pending_threads;
00618         struct turnstile_chain *tc;
00619         struct thread *td;
00620         int cp, pri;
00621 
00622         MPASS(ts != NULL);
00623         MPASS(ts->ts_owner == curthread);
00624         tc = TC_LOOKUP(ts->ts_lockobj);
00625         mtx_assert(&tc->tc_lock, MA_OWNED);
00626         MPASS(!TAILQ_EMPTY(&ts->ts_pending));
00627 
00628         /*
00629          * Move the list of pending threads out of the turnstile and
00630          * into a local variable.
00631          */
00632         TAILQ_INIT(&pending_threads);
00633         TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
00634 #ifdef INVARIANTS
00635         if (TAILQ_EMPTY(&ts->ts_blocked))
00636                 ts->ts_lockobj = NULL;
00637 #endif
00638 
00639         /*
00640          * Remove the turnstile from this thread's list of contested locks
00641          * since this thread doesn't own it anymore.  New threads will
00642          * not be blocking on the turnstile until it is claimed by a new
00643          * owner.
00644          */
00645         mtx_lock_spin(&td_contested_lock);
00646         ts->ts_owner = NULL;
00647         LIST_REMOVE(ts, ts_link);
00648         mtx_unlock_spin(&td_contested_lock);
00649         mtx_unlock_spin(&tc->tc_lock);
00650 
00651         /*
00652          * Adjust the priority of curthread based on other contested
00653          * locks it owns.  Don't lower the priority below the base
00654          * priority however.
00655          */
00656         td = curthread;
00657         pri = PRI_MAX;
00658         mtx_lock_spin(&sched_lock);
00659         mtx_lock_spin(&td_contested_lock);
00660         LIST_FOREACH(ts, &td->td_contested, ts_link) {
00661                 cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority;
00662                 if (cp < pri)
00663                         pri = cp;
00664         }
00665         mtx_unlock_spin(&td_contested_lock);
00666         if (pri > td->td_base_pri)
00667                 pri = td->td_base_pri;
00668         td->td_priority = pri;
00669 
00670         /*
00671          * Wake up all the pending threads.  If a thread is not blocked
00672          * on a lock, then it is currently executing on another CPU in
00673          * turnstile_wait() or sitting on a run queue waiting to resume
00674          * in turnstile_wait().  Set a flag to force it to try to acquire
00675          * the lock again instead of blocking.
00676          */
00677         while (!TAILQ_EMPTY(&pending_threads)) {
00678                 td = TAILQ_FIRST(&pending_threads);
00679                 TAILQ_REMOVE(&pending_threads, td, td_lockq);
00680                 MPASS(td->td_proc->p_magic == P_MAGIC);
00681                 if (TD_ON_LOCK(td)) {
00682                         td->td_blocked = NULL;
00683                         td->td_lockname = NULL;
00684                         TD_CLR_LOCK(td);
00685                         MPASS(TD_CAN_RUN(td));
00686                         setrunqueue(td);
00687                 } else {
00688                         td->td_flags |= TDF_TSNOBLOCK;
00689                         MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
00690                 }
00691         }
00692         mtx_unlock_spin(&sched_lock);
00693 }
00694 
00695 /*
00696  * Return the first thread in a turnstile.
00697  */
00698 struct thread *
00699 turnstile_head(struct turnstile *ts)
00700 {
00701 #ifdef INVARIANTS
00702         struct turnstile_chain *tc;
00703 
00704         MPASS(ts != NULL);
00705         tc = TC_LOOKUP(ts->ts_lockobj);
00706         mtx_assert(&tc->tc_lock, MA_OWNED);
00707 #endif
00708         return (TAILQ_FIRST(&ts->ts_blocked));
00709 }
00710 
00711 /*
00712  * Returns true if a turnstile is empty.
00713  */
00714 int
00715 turnstile_empty(struct turnstile *ts)
00716 {
00717 #ifdef INVARIANTS
00718         struct turnstile_chain *tc;
00719 
00720         MPASS(ts != NULL);
00721         tc = TC_LOOKUP(ts->ts_lockobj);
00722         mtx_assert(&tc->tc_lock, MA_OWNED);
00723 #endif
00724         return (TAILQ_EMPTY(&ts->ts_blocked));
00725 }

Generated on Tue Feb 5 14:01:31 2008 for FreeBSB/sys/kern by  doxygen 1.5.4