Back to index

glibc  2.9
pthread_mutex_timedlock.c
Go to the documentation of this file.
00001 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <assert.h>
00021 #include <errno.h>
00022 #include <time.h>
00023 #include "pthreadP.h"
00024 #include <lowlevellock.h>
00025 #include <not-cancel.h>
00026 
00027 
00028 int
00029 pthread_mutex_timedlock (mutex, abstime)
00030      pthread_mutex_t *mutex;
00031      const struct timespec *abstime;
00032 {
00033   int oldval;
00034   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
00035   int result = 0;
00036 
00037   /* We must not check ABSTIME here.  If the thread does not block
00038      abstime must not be checked for a valid value.  */
00039 
00040   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
00041                          PTHREAD_MUTEX_TIMED_NP))
00042     {
00043       /* Recursive mutex.  */
00044     case PTHREAD_MUTEX_RECURSIVE_NP:
00045       /* Check whether we already hold the mutex.  */
00046       if (mutex->__data.__owner == id)
00047        {
00048          /* Just bump the counter.  */
00049          if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00050            /* Overflow of the counter.  */
00051            return EAGAIN;
00052 
00053          ++mutex->__data.__count;
00054 
00055          goto out;
00056        }
00057 
00058       /* We have to get the mutex.  */
00059       result = lll_timedlock (mutex->__data.__lock, abstime,
00060                            PTHREAD_MUTEX_PSHARED (mutex));
00061 
00062       if (result != 0)
00063        goto out;
00064 
00065       /* Only locked once so far.  */
00066       mutex->__data.__count = 1;
00067       break;
00068 
00069       /* Error checking mutex.  */
00070     case PTHREAD_MUTEX_ERRORCHECK_NP:
00071       /* Check whether we already hold the mutex.  */
00072       if (__builtin_expect (mutex->__data.__owner == id, 0))
00073        return EDEADLK;
00074 
00075       /* FALLTHROUGH */
00076 
00077     case PTHREAD_MUTEX_TIMED_NP:
00078     simple:
00079       /* Normal mutex.  */
00080       result = lll_timedlock (mutex->__data.__lock, abstime,
00081                            PTHREAD_MUTEX_PSHARED (mutex));
00082       break;
00083 
00084     case PTHREAD_MUTEX_ADAPTIVE_NP:
00085       if (! __is_smp)
00086        goto simple;
00087 
00088       if (lll_trylock (mutex->__data.__lock) != 0)
00089        {
00090          int cnt = 0;
00091          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
00092                           mutex->__data.__spins * 2 + 10);
00093          do
00094            {
00095              if (cnt++ >= max_cnt)
00096               {
00097                 result = lll_timedlock (mutex->__data.__lock, abstime,
00098                                      PTHREAD_MUTEX_PSHARED (mutex));
00099                 break;
00100               }
00101 
00102 #ifdef BUSY_WAIT_NOP
00103              BUSY_WAIT_NOP;
00104 #endif
00105            }
00106          while (lll_trylock (mutex->__data.__lock) != 0);
00107 
00108          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
00109        }
00110       break;
00111 
00112     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
00113     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
00114     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
00115     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
00116       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00117                    &mutex->__data.__list.__next);
00118 
00119       oldval = mutex->__data.__lock;
00120       do
00121        {
00122        again:
00123          if ((oldval & FUTEX_OWNER_DIED) != 0)
00124            {
00125              /* The previous owner died.  Try locking the mutex.  */
00126              int newval = id | (oldval & FUTEX_WAITERS);
00127 
00128              newval
00129               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00130                                                  newval, oldval);
00131              if (newval != oldval)
00132               {
00133                 oldval = newval;
00134                 goto again;
00135               }
00136 
00137              /* We got the mutex.  */
00138              mutex->__data.__count = 1;
00139              /* But it is inconsistent unless marked otherwise.  */
00140              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00141 
00142              ENQUEUE_MUTEX (mutex);
00143              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00144 
00145              /* Note that we deliberately exit here.  If we fall
00146                through to the end of the function __nusers would be
00147                incremented which is not correct because the old
00148                owner has to be discounted.  */
00149              return EOWNERDEAD;
00150            }
00151 
00152          /* Check whether we already hold the mutex.  */
00153          if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00154            {
00155              int kind = PTHREAD_MUTEX_TYPE (mutex);
00156              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
00157               {
00158                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00159                              NULL);
00160                 return EDEADLK;
00161               }
00162 
00163              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
00164               {
00165                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00166                              NULL);
00167 
00168                 /* Just bump the counter.  */
00169                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00170                   /* Overflow of the counter.  */
00171                   return EAGAIN;
00172 
00173                 ++mutex->__data.__count;
00174 
00175                 return 0;
00176               }
00177            }
00178 
00179          result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
00180                                     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
00181 
00182          if (__builtin_expect (mutex->__data.__owner
00183                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00184            {
00185              /* This mutex is now not recoverable.  */
00186              mutex->__data.__count = 0;
00187              lll_unlock (mutex->__data.__lock,
00188                        PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
00189              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00190              return ENOTRECOVERABLE;
00191            }
00192 
00193          if (result == ETIMEDOUT || result == EINVAL)
00194            goto out;
00195 
00196          oldval = result;
00197        }
00198       while ((oldval & FUTEX_OWNER_DIED) != 0);
00199 
00200       mutex->__data.__count = 1;
00201       ENQUEUE_MUTEX (mutex);
00202       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00203       break;
00204 
00205     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
00206     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
00207     case PTHREAD_MUTEX_PI_NORMAL_NP:
00208     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
00209     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
00210     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
00211     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
00212     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
00213       {
00214        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00215        int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
00216 
00217        if (robust)
00218          /* Note: robust PI futexes are signaled by setting bit 0.  */
00219          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00220                       (void *) (((uintptr_t) &mutex->__data.__list.__next)
00221                                | 1));
00222 
00223        oldval = mutex->__data.__lock;
00224 
00225        /* Check whether we already hold the mutex.  */
00226        if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00227          {
00228            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00229              {
00230               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00231               return EDEADLK;
00232              }
00233 
00234            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00235              {
00236               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00237 
00238               /* Just bump the counter.  */
00239               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00240                 /* Overflow of the counter.  */
00241                 return EAGAIN;
00242 
00243               ++mutex->__data.__count;
00244 
00245               return 0;
00246              }
00247          }
00248 
00249        oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00250                                                 id, 0);
00251 
00252        if (oldval != 0)
00253          {
00254            /* The mutex is locked.  The kernel will now take care of
00255               everything.  The timeout value must be a relative value.
00256               Convert it.  */
00257            int private = (robust
00258                         ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
00259                         : PTHREAD_MUTEX_PSHARED (mutex));
00260            INTERNAL_SYSCALL_DECL (__err);
00261 
00262            int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00263                                   __lll_private_flag (FUTEX_LOCK_PI,
00264                                                    private), 1,
00265                                   abstime);
00266            if (INTERNAL_SYSCALL_ERROR_P (e, __err))
00267              {
00268               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
00269                 return ETIMEDOUT;
00270 
00271               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
00272                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
00273                 {
00274                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
00275                          || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
00276                             && kind != PTHREAD_MUTEX_RECURSIVE_NP));
00277                   /* ESRCH can happen only for non-robust PI mutexes where
00278                      the owner of the lock died.  */
00279                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
00280                          || !robust);
00281 
00282                   /* Delay the thread until the timeout is reached.
00283                      Then return ETIMEDOUT.  */
00284                   struct timespec reltime;
00285                   struct timespec now;
00286 
00287                   INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
00288                                   &now);
00289                   reltime.tv_sec = abstime->tv_sec - now.tv_sec;
00290                   reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
00291                   if (reltime.tv_nsec < 0)
00292                     {
00293                      reltime.tv_nsec += 1000000000;
00294                      --reltime.tv_sec;
00295                     }
00296                   if (reltime.tv_sec >= 0)
00297                     while (nanosleep_not_cancel (&reltime, &reltime) != 0)
00298                      continue;
00299 
00300                   return ETIMEDOUT;
00301                 }
00302 
00303               return INTERNAL_SYSCALL_ERRNO (e, __err);
00304              }
00305 
00306            oldval = mutex->__data.__lock;
00307 
00308            assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
00309          }
00310 
00311        if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
00312          {
00313            atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
00314 
00315            /* We got the mutex.  */
00316            mutex->__data.__count = 1;
00317            /* But it is inconsistent unless marked otherwise.  */
00318            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00319 
00320            ENQUEUE_MUTEX_PI (mutex);
00321            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00322 
00323            /* Note that we deliberately exit here.  If we fall
00324               through to the end of the function __nusers would be
00325               incremented which is not correct because the old owner
00326               has to be discounted.  */
00327            return EOWNERDEAD;
00328          }
00329 
00330        if (robust
00331            && __builtin_expect (mutex->__data.__owner
00332                              == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00333          {
00334            /* This mutex is now not recoverable.  */
00335            mutex->__data.__count = 0;
00336 
00337            INTERNAL_SYSCALL_DECL (__err);
00338            INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00339                            __lll_private_flag (FUTEX_UNLOCK_PI,
00340                                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
00341                            0, 0);
00342 
00343            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00344            return ENOTRECOVERABLE;
00345          }
00346 
00347        mutex->__data.__count = 1;
00348        if (robust)
00349          {
00350            ENQUEUE_MUTEX_PI (mutex);
00351            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00352          }
00353        }
00354       break;
00355 
00356     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
00357     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
00358     case PTHREAD_MUTEX_PP_NORMAL_NP:
00359     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
00360       {
00361        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00362 
00363        oldval = mutex->__data.__lock;
00364 
00365        /* Check whether we already hold the mutex.  */
00366        if (mutex->__data.__owner == id)
00367          {
00368            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00369              return EDEADLK;
00370 
00371            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00372              {
00373               /* Just bump the counter.  */
00374               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00375                 /* Overflow of the counter.  */
00376                 return EAGAIN;
00377 
00378               ++mutex->__data.__count;
00379 
00380               return 0;
00381              }
00382          }
00383 
00384        int oldprio = -1, ceilval;
00385        do
00386          {
00387            int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
00388                        >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00389 
00390            if (__pthread_current_priority () > ceiling)
00391              {
00392               result = EINVAL;
00393              failpp:
00394               if (oldprio != -1)
00395                 __pthread_tpp_change_priority (oldprio, -1);
00396               return result;
00397              }
00398 
00399            result = __pthread_tpp_change_priority (oldprio, ceiling);
00400            if (result)
00401              return result;
00402 
00403            ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00404            oldprio = ceiling;
00405 
00406            oldval
00407              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00408                                                ceilval | 1, ceilval);
00409 
00410            if (oldval == ceilval)
00411              break;
00412 
00413            do
00414              {
00415               oldval
00416                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00417                                                   ceilval | 2,
00418                                                   ceilval | 1);
00419 
00420               if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
00421                 break;
00422 
00423               if (oldval != ceilval)
00424                 {
00425                   /* Reject invalid timeouts.  */
00426                   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
00427                     {
00428                      result = EINVAL;
00429                      goto failpp;
00430                     }
00431 
00432                   struct timeval tv;
00433                   struct timespec rt;
00434 
00435                   /* Get the current time.  */
00436                   (void) __gettimeofday (&tv, NULL);
00437 
00438                   /* Compute relative timeout.  */
00439                   rt.tv_sec = abstime->tv_sec - tv.tv_sec;
00440                   rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
00441                   if (rt.tv_nsec < 0)
00442                     {
00443                      rt.tv_nsec += 1000000000;
00444                      --rt.tv_sec;
00445                     }
00446 
00447                   /* Already timed out?  */
00448                   if (rt.tv_sec < 0)
00449                     {
00450                      result = ETIMEDOUT;
00451                      goto failpp;
00452                     }
00453 
00454                   lll_futex_timed_wait (&mutex->__data.__lock,
00455                                      ceilval | 2, &rt,
00456                                      PTHREAD_MUTEX_PSHARED (mutex));
00457                 }
00458              }
00459            while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00460                                                  ceilval | 2, ceilval)
00461                  != ceilval);
00462          }
00463        while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
00464 
00465        assert (mutex->__data.__owner == 0);
00466        mutex->__data.__count = 1;
00467       }
00468       break;
00469 
00470     default:
00471       /* Correct code cannot set any other type.  */
00472       return EINVAL;
00473     }
00474 
00475   if (result == 0)
00476     {
00477       /* Record the ownership.  */
00478       mutex->__data.__owner = id;
00479       ++mutex->__data.__nusers;
00480     }
00481 
00482  out:
00483   return result;
00484 }