Back to index

glibc  2.9
aio_misc.c
Go to the documentation of this file.
00001 /* Handle general operations.
00002    Copyright (C) 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2006, 2007
00003    Free Software Foundation, Inc.
00004    This file is part of the GNU C Library.
00005    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
00006 
00007    The GNU C Library is free software; you can redistribute it and/or
00008    modify it under the terms of the GNU Lesser General Public
00009    License as published by the Free Software Foundation; either
00010    version 2.1 of the License, or (at your option) any later version.
00011 
00012    The GNU C Library is distributed in the hope that it will be useful,
00013    but WITHOUT ANY WARRANTY; without even the implied warranty of
00014    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015    Lesser General Public License for more details.
00016 
00017    You should have received a copy of the GNU Lesser General Public
00018    License along with the GNU C Library; if not, write to the Free
00019    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00020    02111-1307 USA.  */
00021 
00022 #include <aio.h>
00023 #include <assert.h>
00024 #include <errno.h>
00025 #include <limits.h>
00026 #include <pthread.h>
00027 #include <stdlib.h>
00028 #include <unistd.h>
00029 #include <sys/stat.h>
00030 #include <sys/time.h>
00031 #include <aio_misc.h>
00032 
00033 #ifndef aio_create_helper_thread
00034 # define aio_create_helper_thread __aio_create_helper_thread
00035 
00036 extern inline int
00037 __aio_create_helper_thread (pthread_t *threadp, void *(*tf) (void *), void *arg)
00038 {
00039   pthread_attr_t attr;
00040 
00041   /* Make sure the thread is created detached.  */
00042   pthread_attr_init (&attr);
00043   pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
00044 
00045   int ret = pthread_create (threadp, &attr, tf, arg);
00046 
00047   (void) pthread_attr_destroy (&attr);
00048   return ret;
00049 }
00050 #endif
00051 
00052 static void add_request_to_runlist (struct requestlist *newrequest);
00053 
00054 /* Pool of request list entries.  */
00055 static struct requestlist **pool;
00056 
00057 /* Number of total and allocated pool entries.  */
00058 static size_t pool_max_size;
00059 static size_t pool_size;
00060 
00061 /* We implement a two dimensional array but allocate each row separately.
00062    The macro below determines how many entries should be used per row.
00063    It should better be a power of two.  */
00064 #define ENTRIES_PER_ROW     32
00065 
00066 /* How many rows we allocate at once.  */
00067 #define ROWS_STEP    8
00068 
00069 /* List of available entries.  */
00070 static struct requestlist *freelist;
00071 
00072 /* List of request waiting to be processed.  */
00073 static struct requestlist *runlist;
00074 
00075 /* Structure list of all currently processed requests.  */
00076 static struct requestlist *requests;
00077 
00078 /* Number of threads currently running.  */
00079 static int nthreads;
00080 
00081 /* Number of threads waiting for work to arrive. */
00082 static int idle_thread_count;
00083 
00084 
00085 /* These are the values used to optimize the use of AIO.  The user can
00086    overwrite them by using the `aio_init' function.  */
00087 static struct aioinit optim =
00088 {
00089   20,  /* int aio_threads;  Maximal number of threads.  */
00090   64,  /* int aio_num;             Number of expected simultanious requests. */
00091   0,
00092   0,
00093   0,
00094   0,
00095   1,
00096   0
00097 };
00098 
00099 
00100 /* Since the list is global we need a mutex protecting it.  */
00101 pthread_mutex_t __aio_requests_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
00102 
00103 /* When you add a request to the list and there are idle threads present,
00104    you signal this condition variable. When a thread finishes work, it waits
00105    on this condition variable for a time before it actually exits. */
00106 pthread_cond_t __aio_new_request_notification = PTHREAD_COND_INITIALIZER;
00107 
00108 
00109 /* Functions to handle request list pool.  */
00110 static struct requestlist *
00111 get_elem (void)
00112 {
00113   struct requestlist *result;
00114 
00115   if (freelist == NULL)
00116     {
00117       struct requestlist *new_row;
00118       int cnt;
00119 
00120       assert (sizeof (struct aiocb) == sizeof (struct aiocb64));
00121 
00122       if (pool_size + 1 >= pool_max_size)
00123        {
00124          size_t new_max_size = pool_max_size + ROWS_STEP;
00125          struct requestlist **new_tab;
00126 
00127          new_tab = (struct requestlist **)
00128            realloc (pool, new_max_size * sizeof (struct requestlist *));
00129 
00130          if (new_tab == NULL)
00131            return NULL;
00132 
00133          pool_max_size = new_max_size;
00134          pool = new_tab;
00135        }
00136 
00137       /* Allocate the new row.  */
00138       cnt = pool_size == 0 ? optim.aio_num : ENTRIES_PER_ROW;
00139       new_row = (struct requestlist *) calloc (cnt,
00140                                           sizeof (struct requestlist));
00141       if (new_row == NULL)
00142        return NULL;
00143 
00144       pool[pool_size++] = new_row;
00145 
00146       /* Put all the new entries in the freelist.  */
00147       do
00148        {
00149          new_row->next_prio = freelist;
00150          freelist = new_row++;
00151        }
00152       while (--cnt > 0);
00153     }
00154 
00155   result = freelist;
00156   freelist = freelist->next_prio;
00157 
00158   return result;
00159 }
00160 
00161 
00162 void
00163 internal_function
00164 __aio_free_request (struct requestlist *elem)
00165 {
00166   elem->running = no;
00167   elem->next_prio = freelist;
00168   freelist = elem;
00169 }
00170 
00171 
00172 struct requestlist *
00173 internal_function
00174 __aio_find_req (aiocb_union *elem)
00175 {
00176   struct requestlist *runp = requests;
00177   int fildes = elem->aiocb.aio_fildes;
00178 
00179   while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
00180     runp = runp->next_fd;
00181 
00182   if (runp != NULL)
00183     {
00184       if (runp->aiocbp->aiocb.aio_fildes != fildes)
00185        runp = NULL;
00186       else
00187        while (runp != NULL && runp->aiocbp != elem)
00188          runp = runp->next_prio;
00189     }
00190 
00191   return runp;
00192 }
00193 
00194 
00195 struct requestlist *
00196 internal_function
00197 __aio_find_req_fd (int fildes)
00198 {
00199   struct requestlist *runp = requests;
00200 
00201   while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
00202     runp = runp->next_fd;
00203 
00204   return (runp != NULL && runp->aiocbp->aiocb.aio_fildes == fildes
00205          ? runp : NULL);
00206 }
00207 
00208 
00209 void
00210 internal_function
00211 __aio_remove_request (struct requestlist *last, struct requestlist *req,
00212                     int all)
00213 {
00214   assert (req->running == yes || req->running == queued
00215          || req->running == done);
00216 
00217   if (last != NULL)
00218     last->next_prio = all ? NULL : req->next_prio;
00219   else
00220     {
00221       if (all || req->next_prio == NULL)
00222        {
00223          if (req->last_fd != NULL)
00224            req->last_fd->next_fd = req->next_fd;
00225          else
00226            requests = req->next_fd;
00227          if (req->next_fd != NULL)
00228            req->next_fd->last_fd = req->last_fd;
00229        }
00230       else
00231        {
00232          if (req->last_fd != NULL)
00233            req->last_fd->next_fd = req->next_prio;
00234          else
00235            requests = req->next_prio;
00236 
00237          if (req->next_fd != NULL)
00238            req->next_fd->last_fd = req->next_prio;
00239 
00240          req->next_prio->last_fd = req->last_fd;
00241          req->next_prio->next_fd = req->next_fd;
00242 
00243          /* Mark this entry as runnable.  */
00244          req->next_prio->running = yes;
00245        }
00246 
00247       if (req->running == yes)
00248        {
00249          struct requestlist *runp = runlist;
00250 
00251          last = NULL;
00252          while (runp != NULL)
00253            {
00254              if (runp == req)
00255               {
00256                 if (last == NULL)
00257                   runlist = runp->next_run;
00258                 else
00259                   last->next_run = runp->next_run;
00260                 break;
00261               }
00262              last = runp;
00263              runp = runp->next_run;
00264            }
00265        }
00266     }
00267 }
00268 
00269 
00270 /* The thread handler.  */
00271 static void *handle_fildes_io (void *arg);
00272 
00273 
00274 /* User optimization.  */
00275 void
00276 __aio_init (const struct aioinit *init)
00277 {
00278   /* Get the mutex.  */
00279   pthread_mutex_lock (&__aio_requests_mutex);
00280 
00281   /* Only allow writing new values if the table is not yet allocated.  */
00282   if (pool == NULL)
00283     {
00284       optim.aio_threads = init->aio_threads < 1 ? 1 : init->aio_threads;
00285       optim.aio_num = (init->aio_num < ENTRIES_PER_ROW
00286                      ? ENTRIES_PER_ROW
00287                      : init->aio_num & ~ENTRIES_PER_ROW);
00288     }
00289 
00290   if (init->aio_idle_time != 0)
00291     optim.aio_idle_time = init->aio_idle_time;
00292 
00293   /* Release the mutex.  */
00294   pthread_mutex_unlock (&__aio_requests_mutex);
00295 }
00296 weak_alias (__aio_init, aio_init)
00297 
00298 
00299 /* The main function of the async I/O handling.  It enqueues requests
00300    and if necessary starts and handles threads.  */
00301 struct requestlist *
00302 internal_function
00303 __aio_enqueue_request (aiocb_union *aiocbp, int operation)
00304 {
00305   int result = 0;
00306   int policy, prio;
00307   struct sched_param param;
00308   struct requestlist *last, *runp, *newp;
00309   int running = no;
00310 
00311   if (operation == LIO_SYNC || operation == LIO_DSYNC)
00312     aiocbp->aiocb.aio_reqprio = 0;
00313   else if (aiocbp->aiocb.aio_reqprio < 0
00314           || aiocbp->aiocb.aio_reqprio > AIO_PRIO_DELTA_MAX)
00315     {
00316       /* Invalid priority value.  */
00317       __set_errno (EINVAL);
00318       aiocbp->aiocb.__error_code = EINVAL;
00319       aiocbp->aiocb.__return_value = -1;
00320       return NULL;
00321     }
00322 
00323   /* Compute priority for this request.  */
00324   pthread_getschedparam (pthread_self (), &policy, &param);
00325   prio = param.sched_priority - aiocbp->aiocb.aio_reqprio;
00326 
00327   /* Get the mutex.  */
00328   pthread_mutex_lock (&__aio_requests_mutex);
00329 
00330   last = NULL;
00331   runp = requests;
00332   /* First look whether the current file descriptor is currently
00333      worked with.  */
00334   while (runp != NULL
00335         && runp->aiocbp->aiocb.aio_fildes < aiocbp->aiocb.aio_fildes)
00336     {
00337       last = runp;
00338       runp = runp->next_fd;
00339     }
00340 
00341   /* Get a new element for the waiting list.  */
00342   newp = get_elem ();
00343   if (newp == NULL)
00344     {
00345       pthread_mutex_unlock (&__aio_requests_mutex);
00346       __set_errno (EAGAIN);
00347       return NULL;
00348     }
00349   newp->aiocbp = aiocbp;
00350 #ifdef BROKEN_THREAD_SIGNALS
00351   newp->caller_pid = (aiocbp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL
00352                     ? getpid () : 0);
00353 #endif
00354   newp->waiting = NULL;
00355 
00356   aiocbp->aiocb.__abs_prio = prio;
00357   aiocbp->aiocb.__policy = policy;
00358   aiocbp->aiocb.aio_lio_opcode = operation;
00359   aiocbp->aiocb.__error_code = EINPROGRESS;
00360   aiocbp->aiocb.__return_value = 0;
00361 
00362   if (runp != NULL
00363       && runp->aiocbp->aiocb.aio_fildes == aiocbp->aiocb.aio_fildes)
00364     {
00365       /* The current file descriptor is worked on.  It makes no sense
00366         to start another thread since this new thread would fight
00367         with the running thread for the resources.  But we also cannot
00368         say that the thread processing this desriptor shall immediately
00369         after finishing the current job process this request if there
00370         are other threads in the running queue which have a higher
00371         priority.  */
00372 
00373       /* Simply enqueue it after the running one according to the
00374         priority.  */
00375       while (runp->next_prio != NULL
00376             && runp->next_prio->aiocbp->aiocb.__abs_prio >= prio)
00377        runp = runp->next_prio;
00378 
00379       newp->next_prio = runp->next_prio;
00380       runp->next_prio = newp;
00381 
00382       running = queued;
00383     }
00384   else
00385     {
00386       running = yes;
00387       /* Enqueue this request for a new descriptor.  */
00388       if (last == NULL)
00389        {
00390          newp->last_fd = NULL;
00391          newp->next_fd = requests;
00392          if (requests != NULL)
00393            requests->last_fd = newp;
00394          requests = newp;
00395        }
00396       else
00397        {
00398          newp->next_fd = last->next_fd;
00399          newp->last_fd = last;
00400          last->next_fd = newp;
00401          if (newp->next_fd != NULL)
00402            newp->next_fd->last_fd = newp;
00403        }
00404 
00405       newp->next_prio = NULL;
00406     }
00407 
00408   if (running == yes)
00409     {
00410       /* We try to create a new thread for this file descriptor.  The
00411         function which gets called will handle all available requests
00412         for this descriptor and when all are processed it will
00413         terminate.
00414 
00415         If no new thread can be created or if the specified limit of
00416         threads for AIO is reached we queue the request.  */
00417 
00418       /* See if we need to and are able to create a thread.  */
00419       if (nthreads < optim.aio_threads && idle_thread_count == 0)
00420        {
00421          pthread_t thid;
00422 
00423          running = newp->running = allocated;
00424 
00425          /* Now try to start a thread.  */
00426          if (aio_create_helper_thread (&thid, handle_fildes_io, newp) == 0)
00427            /* We managed to enqueue the request.  All errors which can
00428               happen now can be recognized by calls to `aio_return' and
00429               `aio_error'.  */
00430            ++nthreads;
00431          else
00432            {
00433              /* Reset the running flag.  The new request is not running.  */
00434              running = newp->running = yes;
00435 
00436              if (nthreads == 0)
00437               /* We cannot create a thread in the moment and there is
00438                  also no thread running.  This is a problem.  `errno' is
00439                  set to EAGAIN if this is only a temporary problem.  */
00440               result = -1;
00441            }
00442        }
00443     }
00444 
00445   /* Enqueue the request in the run queue if it is not yet running.  */
00446   if (running == yes && result == 0)
00447     {
00448       add_request_to_runlist (newp);
00449 
00450       /* If there is a thread waiting for work, then let it know that we
00451         have just given it something to do. */
00452       if (idle_thread_count > 0)
00453        pthread_cond_signal (&__aio_new_request_notification);
00454     }
00455 
00456   if (result == 0)
00457     newp->running = running;
00458   else
00459     {
00460       /* Something went wrong.  */
00461       __aio_free_request (newp);
00462       newp = NULL;
00463     }
00464 
00465   /* Release the mutex.  */
00466   pthread_mutex_unlock (&__aio_requests_mutex);
00467 
00468   return newp;
00469 }
00470 
00471 
00472 static void *
00473 handle_fildes_io (void *arg)
00474 {
00475   pthread_t self = pthread_self ();
00476   struct sched_param param;
00477   struct requestlist *runp = (struct requestlist *) arg;
00478   aiocb_union *aiocbp;
00479   int policy;
00480   int fildes;
00481 
00482   pthread_getschedparam (self, &policy, &param);
00483 
00484   do
00485     {
00486       /* If runp is NULL, then we were created to service the work queue
00487         in general, not to handle any particular request. In that case we
00488         skip the "do work" stuff on the first pass, and go directly to the
00489         "get work off the work queue" part of this loop, which is near the
00490         end. */
00491       if (runp == NULL)
00492        pthread_mutex_lock (&__aio_requests_mutex);
00493       else
00494        {
00495          /* Hopefully this request is marked as running.  */
00496          assert (runp->running == allocated);
00497 
00498          /* Update our variables.  */
00499          aiocbp = runp->aiocbp;
00500          fildes = aiocbp->aiocb.aio_fildes;
00501 
00502          /* Change the priority to the requested value (if necessary).  */
00503          if (aiocbp->aiocb.__abs_prio != param.sched_priority
00504              || aiocbp->aiocb.__policy != policy)
00505            {
00506              param.sched_priority = aiocbp->aiocb.__abs_prio;
00507              policy = aiocbp->aiocb.__policy;
00508              pthread_setschedparam (self, policy, &param);
00509            }
00510 
00511          /* Process request pointed to by RUNP.  We must not be disturbed
00512             by signals.  */
00513          if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_READ)
00514            {
00515              if (sizeof (off_t) != sizeof (off64_t)
00516                 && aiocbp->aiocb.aio_lio_opcode & 128)
00517               aiocbp->aiocb.__return_value =
00518                 TEMP_FAILURE_RETRY (__pread64 (fildes, (void *)
00519                                            aiocbp->aiocb64.aio_buf,
00520                                            aiocbp->aiocb64.aio_nbytes,
00521                                            aiocbp->aiocb64.aio_offset));
00522              else
00523               aiocbp->aiocb.__return_value =
00524                 TEMP_FAILURE_RETRY (pread (fildes,
00525                                         (void *) aiocbp->aiocb.aio_buf,
00526                                         aiocbp->aiocb.aio_nbytes,
00527                                         aiocbp->aiocb.aio_offset));
00528 
00529              if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
00530               /* The Linux kernel is different from others.  It returns
00531                  ESPIPE if using pread on a socket.  Other platforms
00532                  simply ignore the offset parameter and behave like
00533                  read.  */
00534               aiocbp->aiocb.__return_value =
00535                 TEMP_FAILURE_RETRY (read (fildes,
00536                                        (void *) aiocbp->aiocb64.aio_buf,
00537                                        aiocbp->aiocb64.aio_nbytes));
00538            }
00539          else if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_WRITE)
00540            {
00541              if (sizeof (off_t) != sizeof (off64_t)
00542                 && aiocbp->aiocb.aio_lio_opcode & 128)
00543               aiocbp->aiocb.__return_value =
00544                 TEMP_FAILURE_RETRY (__pwrite64 (fildes, (const void *)
00545                                             aiocbp->aiocb64.aio_buf,
00546                                             aiocbp->aiocb64.aio_nbytes,
00547                                             aiocbp->aiocb64.aio_offset));
00548              else
00549               aiocbp->aiocb.__return_value =
00550                 TEMP_FAILURE_RETRY (__libc_pwrite (fildes, (const void *)
00551                                          aiocbp->aiocb.aio_buf,
00552                                          aiocbp->aiocb.aio_nbytes,
00553                                          aiocbp->aiocb.aio_offset));
00554 
00555              if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
00556               /* The Linux kernel is different from others.  It returns
00557                  ESPIPE if using pwrite on a socket.  Other platforms
00558                  simply ignore the offset parameter and behave like
00559                  write.  */
00560               aiocbp->aiocb.__return_value =
00561                 TEMP_FAILURE_RETRY (write (fildes,
00562                                         (void *) aiocbp->aiocb64.aio_buf,
00563                                         aiocbp->aiocb64.aio_nbytes));
00564            }
00565          else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC)
00566            aiocbp->aiocb.__return_value =
00567              TEMP_FAILURE_RETRY (fdatasync (fildes));
00568          else if (aiocbp->aiocb.aio_lio_opcode == LIO_SYNC)
00569            aiocbp->aiocb.__return_value =
00570              TEMP_FAILURE_RETRY (fsync (fildes));
00571          else
00572            {
00573              /* This is an invalid opcode.  */
00574              aiocbp->aiocb.__return_value = -1;
00575              __set_errno (EINVAL);
00576            }
00577 
00578          /* Get the mutex.  */
00579          pthread_mutex_lock (&__aio_requests_mutex);
00580 
00581          /* In theory we would need here a write memory barrier since the
00582             callers test using aio_error() whether the request finished
00583             and once this value != EINPROGRESS the field __return_value
00584             must be committed to memory.
00585 
00586             But since the pthread_mutex_lock call involves write memory
00587             barriers as well it is not necessary.  */
00588 
00589          if (aiocbp->aiocb.__return_value == -1)
00590            aiocbp->aiocb.__error_code = errno;
00591          else
00592            aiocbp->aiocb.__error_code = 0;
00593 
00594          /* Send the signal to notify about finished processing of the
00595             request.  */
00596          __aio_notify (runp);
00597 
00598          /* For debugging purposes we reset the running flag of the
00599             finished request.  */
00600          assert (runp->running == allocated);
00601          runp->running = done;
00602 
00603          /* Now dequeue the current request.  */
00604          __aio_remove_request (NULL, runp, 0);
00605          if (runp->next_prio != NULL)
00606            add_request_to_runlist (runp->next_prio);
00607 
00608          /* Free the old element.  */
00609          __aio_free_request (runp);
00610        }
00611 
00612       runp = runlist;
00613 
00614       /* If the runlist is empty, then we sleep for a while, waiting for
00615         something to arrive in it. */
00616       if (runp == NULL && optim.aio_idle_time >= 0)
00617        {
00618          struct timeval now;
00619          struct timespec wakeup_time;
00620 
00621          ++idle_thread_count;
00622          gettimeofday (&now, NULL);
00623          wakeup_time.tv_sec = now.tv_sec + optim.aio_idle_time;
00624          wakeup_time.tv_nsec = now.tv_usec * 1000;
00625          if (wakeup_time.tv_nsec > 1000000000)
00626            {
00627              wakeup_time.tv_nsec -= 1000000000;
00628              ++wakeup_time.tv_sec;
00629            }
00630          pthread_cond_timedwait (&__aio_new_request_notification,
00631                               &__aio_requests_mutex,
00632                               &wakeup_time);
00633          --idle_thread_count;
00634          runp = runlist;
00635        }
00636 
00637       if (runp == NULL)
00638        --nthreads;
00639       else
00640        {
00641          assert (runp->running == yes);
00642          runp->running = allocated;
00643          runlist = runp->next_run;
00644 
00645          /* If we have a request to process, and there's still another in
00646             the run list, then we need to either wake up or create a new
00647             thread to service the request that is still in the run list. */
00648          if (runlist != NULL)
00649            {
00650              /* There are at least two items in the work queue to work on.
00651                If there are other idle threads, then we should wake them
00652                up for these other work elements; otherwise, we should try
00653                to create a new thread. */
00654              if (idle_thread_count > 0)
00655               pthread_cond_signal (&__aio_new_request_notification);
00656              else if (nthreads < optim.aio_threads)
00657               {
00658                 pthread_t thid;
00659                 pthread_attr_t attr;
00660 
00661                 /* Make sure the thread is created detached.  */
00662                 pthread_attr_init (&attr);
00663                 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
00664 
00665                 /* Now try to start a thread. If we fail, no big deal,
00666                    because we know that there is at least one thread (us)
00667                    that is working on AIO operations. */
00668                 if (pthread_create (&thid, &attr, handle_fildes_io, NULL)
00669                     == 0)
00670                   ++nthreads;
00671               }
00672            }
00673        }
00674 
00675       /* Release the mutex.  */
00676       pthread_mutex_unlock (&__aio_requests_mutex);
00677     }
00678   while (runp != NULL);
00679 
00680   return NULL;
00681 }
00682 
00683 
00684 /* Free allocated resources.  */
00685 libc_freeres_fn (free_res)
00686 {
00687   size_t row;
00688 
00689   for (row = 0; row < pool_max_size; ++row)
00690     free (pool[row]);
00691 
00692   free (pool);
00693 }
00694 
00695 
00696 /* Add newrequest to the runlist. The __abs_prio flag of newrequest must
00697    be correctly set to do this. Also, you had better set newrequest's
00698    "running" flag to "yes" before you release your lock or you'll throw an
00699    assertion. */
00700 static void
00701 add_request_to_runlist (struct requestlist *newrequest)
00702 {
00703   int prio = newrequest->aiocbp->aiocb.__abs_prio;
00704   struct requestlist *runp;
00705 
00706   if (runlist == NULL || runlist->aiocbp->aiocb.__abs_prio < prio)
00707     {
00708       newrequest->next_run = runlist;
00709       runlist = newrequest;
00710     }
00711   else
00712     {
00713       runp = runlist;
00714 
00715       while (runp->next_run != NULL
00716             && runp->next_run->aiocbp->aiocb.__abs_prio >= prio)
00717        runp = runp->next_run;
00718 
00719       newrequest->next_run = runp->next_run;
00720       runp->next_run = newrequest;
00721     }
00722 }