Back to index

glibc  2.9
pthread_mutex_trylock.c
Go to the documentation of this file.
00001 /* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <assert.h>
00021 #include <errno.h>
00022 #include <stdlib.h>
00023 #include "pthreadP.h"
00024 #include <lowlevellock.h>
00025 
00026 
00027 int
00028 __pthread_mutex_trylock (mutex)
00029      pthread_mutex_t *mutex;
00030 {
00031   int oldval;
00032   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
00033 
00034   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
00035                          PTHREAD_MUTEX_TIMED_NP))
00036     {
00037       /* Recursive mutex.  */
00038     case PTHREAD_MUTEX_RECURSIVE_NP:
00039       /* Check whether we already hold the mutex.  */
00040       if (mutex->__data.__owner == id)
00041        {
00042          /* Just bump the counter.  */
00043          if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00044            /* Overflow of the counter.  */
00045            return EAGAIN;
00046 
00047          ++mutex->__data.__count;
00048          return 0;
00049        }
00050 
00051       if (lll_trylock (mutex->__data.__lock) == 0)
00052        {
00053          /* Record the ownership.  */
00054          mutex->__data.__owner = id;
00055          mutex->__data.__count = 1;
00056          ++mutex->__data.__nusers;
00057          return 0;
00058        }
00059       break;
00060 
00061     case PTHREAD_MUTEX_ERRORCHECK_NP:
00062     case PTHREAD_MUTEX_TIMED_NP:
00063     case PTHREAD_MUTEX_ADAPTIVE_NP:
00064       /* Normal mutex.  */
00065       if (lll_trylock (mutex->__data.__lock) != 0)
00066        break;
00067 
00068       /* Record the ownership.  */
00069       mutex->__data.__owner = id;
00070       ++mutex->__data.__nusers;
00071 
00072       return 0;
00073 
00074     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
00075     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
00076     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
00077     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
00078       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00079                    &mutex->__data.__list.__next);
00080 
00081       oldval = mutex->__data.__lock;
00082       do
00083        {
00084        again:
00085          if ((oldval & FUTEX_OWNER_DIED) != 0)
00086            {
00087              /* The previous owner died.  Try locking the mutex.  */
00088              int newval = id | (oldval & FUTEX_WAITERS);
00089 
00090              newval
00091               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00092                                                  newval, oldval);
00093 
00094              if (newval != oldval)
00095               {
00096                 oldval = newval;
00097                 goto again;
00098               }
00099 
00100              /* We got the mutex.  */
00101              mutex->__data.__count = 1;
00102              /* But it is inconsistent unless marked otherwise.  */
00103              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00104 
00105              ENQUEUE_MUTEX (mutex);
00106              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00107 
00108              /* Note that we deliberately exist here.  If we fall
00109                through to the end of the function __nusers would be
00110                incremented which is not correct because the old
00111                owner has to be discounted.  */
00112              return EOWNERDEAD;
00113            }
00114 
00115          /* Check whether we already hold the mutex.  */
00116          if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00117            {
00118              int kind = PTHREAD_MUTEX_TYPE (mutex);
00119              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
00120               {
00121                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00122                              NULL);
00123                 return EDEADLK;
00124               }
00125 
00126              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
00127               {
00128                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00129                              NULL);
00130 
00131                 /* Just bump the counter.  */
00132                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00133                   /* Overflow of the counter.  */
00134                   return EAGAIN;
00135 
00136                 ++mutex->__data.__count;
00137 
00138                 return 0;
00139               }
00140            }
00141 
00142          oldval = lll_robust_trylock (mutex->__data.__lock, id);
00143          if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
00144            {
00145              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00146 
00147              return EBUSY;
00148            }
00149 
00150          if (__builtin_expect (mutex->__data.__owner
00151                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00152            {
00153              /* This mutex is now not recoverable.  */
00154              mutex->__data.__count = 0;
00155              if (oldval == id)
00156               lll_unlock (mutex->__data.__lock,
00157                          PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
00158              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00159              return ENOTRECOVERABLE;
00160            }
00161        }
00162       while ((oldval & FUTEX_OWNER_DIED) != 0);
00163 
00164       ENQUEUE_MUTEX (mutex);
00165       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00166 
00167       mutex->__data.__owner = id;
00168       ++mutex->__data.__nusers;
00169       mutex->__data.__count = 1;
00170 
00171       return 0;
00172 
00173     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
00174     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
00175     case PTHREAD_MUTEX_PI_NORMAL_NP:
00176     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
00177     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
00178     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
00179     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
00180     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
00181       {
00182        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00183        int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
00184 
00185        if (robust)
00186          /* Note: robust PI futexes are signaled by setting bit 0.  */
00187          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
00188                       (void *) (((uintptr_t) &mutex->__data.__list.__next)
00189                                | 1));
00190 
00191        oldval = mutex->__data.__lock;
00192 
00193        /* Check whether we already hold the mutex.  */
00194        if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
00195          {
00196            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00197              {
00198               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00199               return EDEADLK;
00200              }
00201 
00202            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00203              {
00204               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00205 
00206               /* Just bump the counter.  */
00207               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00208                 /* Overflow of the counter.  */
00209                 return EAGAIN;
00210 
00211               ++mutex->__data.__count;
00212 
00213               return 0;
00214              }
00215          }
00216 
00217        oldval
00218          = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00219                                            id, 0);
00220 
00221        if (oldval != 0)
00222          {
00223            if ((oldval & FUTEX_OWNER_DIED) == 0)
00224              {
00225               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00226 
00227               return EBUSY;
00228              }
00229 
00230            assert (robust);
00231 
00232            /* The mutex owner died.  The kernel will now take care of
00233               everything.  */
00234            int private = (robust
00235                         ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
00236                         : PTHREAD_MUTEX_PSHARED (mutex));
00237            INTERNAL_SYSCALL_DECL (__err);
00238            int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00239                                   __lll_private_flag (FUTEX_TRYLOCK_PI,
00240                                                    private), 0, 0);
00241 
00242            if (INTERNAL_SYSCALL_ERROR_P (e, __err)
00243               && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
00244              {
00245               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00246 
00247               return EBUSY;
00248              }
00249 
00250            oldval = mutex->__data.__lock;
00251          }
00252 
00253        if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
00254          {
00255            atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
00256 
00257            /* We got the mutex.  */
00258            mutex->__data.__count = 1;
00259            /* But it is inconsistent unless marked otherwise.  */
00260            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
00261 
00262            ENQUEUE_MUTEX (mutex);
00263            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00264 
00265            /* Note that we deliberately exit here.  If we fall
00266               through to the end of the function __nusers would be
00267               incremented which is not correct because the old owner
00268               has to be discounted.  */
00269            return EOWNERDEAD;
00270          }
00271 
00272        if (robust
00273            && __builtin_expect (mutex->__data.__owner
00274                              == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
00275          {
00276            /* This mutex is now not recoverable.  */
00277            mutex->__data.__count = 0;
00278 
00279            INTERNAL_SYSCALL_DECL (__err);
00280            INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
00281                            __lll_private_flag (FUTEX_UNLOCK_PI,
00282                                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
00283                            0, 0);
00284 
00285            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00286            return ENOTRECOVERABLE;
00287          }
00288 
00289        if (robust)
00290          {
00291            ENQUEUE_MUTEX_PI (mutex);
00292            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
00293          }
00294 
00295        mutex->__data.__owner = id;
00296        ++mutex->__data.__nusers;
00297        mutex->__data.__count = 1;
00298 
00299        return 0;
00300       }
00301 
00302     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
00303     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
00304     case PTHREAD_MUTEX_PP_NORMAL_NP:
00305     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
00306       {
00307        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
00308 
00309        oldval = mutex->__data.__lock;
00310 
00311        /* Check whether we already hold the mutex.  */
00312        if (mutex->__data.__owner == id)
00313          {
00314            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
00315              return EDEADLK;
00316 
00317            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
00318              {
00319               /* Just bump the counter.  */
00320               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
00321                 /* Overflow of the counter.  */
00322                 return EAGAIN;
00323 
00324               ++mutex->__data.__count;
00325 
00326               return 0;
00327              }
00328          }
00329 
00330        int oldprio = -1, ceilval;
00331        do
00332          {
00333            int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
00334                        >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00335 
00336            if (__pthread_current_priority () > ceiling)
00337              {
00338               if (oldprio != -1)
00339                 __pthread_tpp_change_priority (oldprio, -1);
00340               return EINVAL;
00341              }
00342 
00343            int retval = __pthread_tpp_change_priority (oldprio, ceiling);
00344            if (retval)
00345              return retval;
00346 
00347            ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
00348            oldprio = ceiling;
00349 
00350            oldval
00351              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
00352                                                ceilval | 1, ceilval);
00353 
00354            if (oldval == ceilval)
00355              break;
00356          }
00357        while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
00358 
00359        if (oldval != ceilval)
00360          {
00361            __pthread_tpp_change_priority (oldprio, -1);
00362            break;
00363          }
00364 
00365        assert (mutex->__data.__owner == 0);
00366        /* Record the ownership.  */
00367        mutex->__data.__owner = id;
00368        ++mutex->__data.__nusers;
00369        mutex->__data.__count = 1;
00370 
00371        return 0;
00372       }
00373       break;
00374 
00375     default:
00376       /* Correct code cannot set any other type.  */
00377       return EINVAL;
00378     }
00379 
00380   return EBUSY;
00381 }
00382 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)