Back to index

glibc  2.9
pthread_mutex_lock.c
Go to the documentation of this file.
00001 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <assert.h>
00021 #include <errno.h>
00022 #include <stdlib.h>
00023 #include <unistd.h>
00024 #include <not-cancel.h>
00025 #include "pthreadP.h"
00026 #include <lowlevellock.h>
00027 
00028 
00029 #ifndef LLL_MUTEX_LOCK
00030 # define LLL_MUTEX_LOCK(mutex) \
00031   lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
00032 # define LLL_MUTEX_TRYLOCK(mutex) \
00033   lll_trylock ((mutex)->__data.__lock)
00034 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
00035   lll_robust_lock ((mutex)->__data.__lock, id, \
00036                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
00037 #endif
00038 
00039 
00040 int
00041 __pthread_mutex_lock (mutex)
00042      pthread_mutex_t *mutex;
00043 {
00044   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
00045 
00046   int oldval;
00047   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
00048 
00049   int retval = 0;
00050   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
00051                          PTHREAD_MUTEX_TIMED_NP))
00052     {
00053       /* Recursive mutex.  */
00054     case PTHREAD_MUTEX_RECURSIVE_NP:
00055       /* Check whether we already hold the mutex.  */
00056       if (mutex->__data.__owner == id)
00057        {
00058          /* Just bump the counter.  */
00059          if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00060            /* Overflow of the counter.  */
00061            return EAGAIN;
00062 
00063          ++mutex->__data.__count;
00064 
00065          return 0;
00066        }
00067 
00068       /* We have to get the mutex.  */
00069       LLL_MUTEX_LOCK (mutex);
00070 
00071       assert (mutex->__data.__owner == 0);
00072       mutex->__data.__count = 1;
00073       break;
00074 
00075       /* Error checking mutex.  */
00076     case PTHREAD_MUTEX_ERRORCHECK_NP:
00077       /* Check whether we already hold the mutex.  */
00078       if (__builtin_expect (mutex->__data.__owner == id, 0))
00079        return EDEADLK;
00080 
00081       /* FALLTHROUGH */
00082 
00083     case PTHREAD_MUTEX_TIMED_NP:
00084     simple:
00085       /* Normal mutex.  */
00086       LLL_MUTEX_LOCK (mutex);
00087       assert (mutex->__data.__owner == 0);
00088       break;
00089 
00090     case PTHREAD_MUTEX_ADAPTIVE_NP:
00091       if (! __is_smp)
00092        goto simple;
00093 
00094       if (LLL_MUTEX_TRYLOCK (mutex) != 0)
00095        {
00096          int cnt = 0;
00097          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
00098                           mutex->__data.__spins * 2 + 10);
00099          do
00100            {
00101              if (cnt++ >= max_cnt)
00102               {
00103                 LLL_MUTEX_LOCK (mutex);
00104                 break;
00105               }
00106 
00107 #ifdef BUSY_WAIT_NOP
00108              BUSY_WAIT_NOP;
00109 #endif
00110            }
00111          while (LLL_MUTEX_TRYLOCK (mutex) != 0);
00112 
00113          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
00114        }
00115       assert (mutex->__data.__owner == 0);
00116       break;
00117 
00118     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
00119     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
00120     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
00121     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
00122       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00123                    &mutex->__data.__list.__next);
00124 
00125       oldval = mutex->__data.__lock;
00126       do
00127        {
00128        again:
00129          if ((oldval & FUTEX_OWNER_DIED) != 0)
00130            {
00131              /* The previous owner died.  Try locking the mutex.  */
00132              int newval = id;
00133 #ifdef NO_INCR
00134              newval |= FUTEX_WAITERS;
00135 #else
00136              newval |= (oldval & FUTEX_WAITERS);
00137 #endif
00138 
00139              newval
00140               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00141                                                  newval, oldval);
00142 
00143              if (newval != oldval)
00144               {
00145                 oldval = newval;
00146                 goto again;
00147               }
00148 
00149              /* We got the mutex.  */
00150              mutex->__data.__count = 1;
00151              /* But it is inconsistent unless marked otherwise.  */
00152              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00153 
00154              ENQUEUE_MUTEX (mutex);
00155              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00156 
00157              /* Note that we deliberately exit here.  If we fall
00158                through to the end of the function __nusers would be
00159                incremented which is not correct because the old
00160                owner has to be discounted.  If we are not supposed
00161                to increment __nusers we actually have to decrement
00162                it here.  */
00163 #ifdef NO_INCR
00164              --mutex->__data.__nusers;
00165 #endif
00166 
00167              return EOWNERDEAD;
00168            }
00169 
00170          /* Check whether we already hold the mutex.  */
00171          if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00172            {
00173              int kind = PTHREAD_MUTEX_TYPE (mutex);
00174              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
00175               {
00176                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00177                              NULL);
00178                 return EDEADLK;
00179               }
00180 
00181              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
00182               {
00183                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00184                              NULL);
00185 
00186                 /* Just bump the counter.  */
00187                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00188                   /* Overflow of the counter.  */
00189                   return EAGAIN;
00190 
00191                 ++mutex->__data.__count;
00192 
00193                 return 0;
00194               }
00195            }
00196 
00197          oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
00198 
00199          if (__builtin_expect (mutex->__data.__owner
00200                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00201            {
00202              /* This mutex is now not recoverable.  */
00203              mutex->__data.__count = 0;
00204              lll_unlock (mutex->__data.__lock,
00205                        PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
00206              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00207              return ENOTRECOVERABLE;
00208            }
00209        }
00210       while ((oldval & FUTEX_OWNER_DIED) != 0);
00211 
00212       mutex->__data.__count = 1;
00213       ENQUEUE_MUTEX (mutex);
00214       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00215       break;
00216 
00217     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
00218     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
00219     case PTHREAD_MUTEX_PI_NORMAL_NP:
00220     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
00221     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
00222     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
00223     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
00224     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
00225       {
00226        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00227        int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
00228 
00229        if (robust)
00230          /* Note: robust PI futexes are signaled by setting bit 0.  */
00231          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00232                       (void *) (((uintptr_t) &mutex->__data.__list.__next)
00233                                | 1));
00234 
00235        oldval = mutex->__data.__lock;
00236 
00237        /* Check whether we already hold the mutex.  */
00238        if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00239          {
00240            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00241              {
00242               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00243               return EDEADLK;
00244              }
00245 
00246            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00247              {
00248               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00249 
00250               /* Just bump the counter.  */
00251               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00252                 /* Overflow of the counter.  */
00253                 return EAGAIN;
00254 
00255               ++mutex->__data.__count;
00256 
00257               return 0;
00258              }
00259          }
00260 
00261        int newval = id;
00262 #ifdef NO_INCR
00263        newval |= FUTEX_WAITERS;
00264 #endif
00265        oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00266                                                 newval, 0);
00267 
00268        if (oldval != 0)
00269          {
00270            /* The mutex is locked.  The kernel will now take care of
00271               everything.  */
00272            int private = (robust
00273                         ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
00274                         : PTHREAD_MUTEX_PSHARED (mutex));
00275            INTERNAL_SYSCALL_DECL (__err);
00276            int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00277                                   __lll_private_flag (FUTEX_LOCK_PI,
00278                                                    private), 1, 0);
00279 
00280            if (INTERNAL_SYSCALL_ERROR_P (e, __err)
00281               && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
00282                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
00283              {
00284               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
00285                      || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
00286                          && kind != PTHREAD_MUTEX_RECURSIVE_NP));
00287               /* ESRCH can happen only for non-robust PI mutexes where
00288                  the owner of the lock died.  */
00289               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
00290 
00291               /* Delay the thread indefinitely.  */
00292               while (1)
00293                 pause_not_cancel ();
00294              }
00295 
00296            oldval = mutex->__data.__lock;
00297 
00298            assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
00299          }
00300 
00301        if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
00302          {
00303            atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
00304 
00305            /* We got the mutex.  */
00306            mutex->__data.__count = 1;
00307            /* But it is inconsistent unless marked otherwise.  */
00308            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00309 
00310            ENQUEUE_MUTEX_PI (mutex);
00311            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00312 
00313            /* Note that we deliberately exit here.  If we fall
00314               through to the end of the function __nusers would be
00315               incremented which is not correct because the old owner
00316               has to be discounted.  If we are not supposed to
00317               increment __nusers we actually have to decrement it here.  */
00318 #ifdef NO_INCR
00319            --mutex->__data.__nusers;
00320 #endif
00321 
00322            return EOWNERDEAD;
00323          }
00324 
00325        if (robust
00326            && __builtin_expect (mutex->__data.__owner
00327                              == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00328          {
00329            /* This mutex is now not recoverable.  */
00330            mutex->__data.__count = 0;
00331 
00332            INTERNAL_SYSCALL_DECL (__err);
00333            INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00334                            __lll_private_flag (FUTEX_UNLOCK_PI,
00335                                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
00336 ),
00337                            0, 0);
00338 
00339            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00340            return ENOTRECOVERABLE;
00341          }
00342 
00343        mutex->__data.__count = 1;
00344        if (robust)
00345          {
00346            ENQUEUE_MUTEX_PI (mutex);
00347            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00348          }
00349       }
00350       break;
00351 
00352     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
00353     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
00354     case PTHREAD_MUTEX_PP_NORMAL_NP:
00355     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
00356       {
00357        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00358 
00359        oldval = mutex->__data.__lock;
00360 
00361        /* Check whether we already hold the mutex.  */
00362        if (mutex->__data.__owner == id)
00363          {
00364            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00365              return EDEADLK;
00366 
00367            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00368              {
00369               /* Just bump the counter.  */
00370               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00371                 /* Overflow of the counter.  */
00372                 return EAGAIN;
00373 
00374               ++mutex->__data.__count;
00375 
00376               return 0;
00377              }
00378          }
00379 
00380        int oldprio = -1, ceilval;
00381        do
00382          {
00383            int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
00384                        >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00385 
00386            if (__pthread_current_priority () > ceiling)
00387              {
00388               if (oldprio != -1)
00389                 __pthread_tpp_change_priority (oldprio, -1);
00390               return EINVAL;
00391              }
00392 
00393            retval = __pthread_tpp_change_priority (oldprio, ceiling);
00394            if (retval)
00395              return retval;
00396 
00397            ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00398            oldprio = ceiling;
00399 
00400            oldval
00401              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00402 #ifdef NO_INCR
00403                                                ceilval | 2,
00404 #else
00405                                                ceilval | 1,
00406 #endif
00407                                                ceilval);
00408 
00409            if (oldval == ceilval)
00410              break;
00411 
00412            do
00413              {
00414               oldval
00415                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00416                                                   ceilval | 2,
00417                                                   ceilval | 1);
00418 
00419               if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
00420                 break;
00421 
00422               if (oldval != ceilval)
00423                 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
00424                               PTHREAD_MUTEX_PSHARED (mutex));
00425              }
00426            while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00427                                                  ceilval | 2, ceilval)
00428                  != ceilval);
00429          }
00430        while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
00431 
00432        assert (mutex->__data.__owner == 0);
00433        mutex->__data.__count = 1;
00434       }
00435       break;
00436 
00437     default:
00438       /* Correct code cannot set any other type.  */
00439       return EINVAL;
00440     }
00441 
00442   /* Record the ownership.  */
00443   mutex->__data.__owner = id;
00444 #ifndef NO_INCR
00445   ++mutex->__data.__nusers;
00446 #endif
00447 
00448   return retval;
00449 }
00450 #ifndef __pthread_mutex_lock
00451 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
00452 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
00453 #endif