Back to index

glibc  2.9
dl-close.c
Go to the documentation of this file.
00001 /* Close a shared object opened by `_dl_open'.
00002    Copyright (C) 1996-2005, 2006, 2007 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <assert.h>
00021 #include <dlfcn.h>
00022 #include <errno.h>
00023 #include <libintl.h>
00024 #include <stddef.h>
00025 #include <stdio.h>
00026 #include <stdlib.h>
00027 #include <string.h>
00028 #include <unistd.h>
00029 #include <bits/libc-lock.h>
00030 #include <ldsodefs.h>
00031 #include <sys/types.h>
00032 #include <sys/mman.h>
00033 #include <sysdep-cancel.h>
00034 #include <tls.h>
00035 
00036 
00037 /* Type of the constructor functions.  */
00038 typedef void (*fini_t) (void);
00039 
00040 
00041 /* Special l_idx value used to indicate which objects remain loaded.  */
00042 #define IDX_STILL_USED -1
00043 
00044 
00045 /* Returns true we an non-empty was found.  */
00046 static bool
00047 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
00048                bool should_be_there)
00049 {
00050   if (idx - disp >= listp->len)
00051     {
00052       if (listp->next == NULL)
00053        {
00054          /* The index is not actually valid in the slotinfo list,
00055             because this object was closed before it was fully set
00056             up due to some error.  */
00057          assert (! should_be_there);
00058        }
00059       else
00060        {
00061          if (remove_slotinfo (idx, listp->next, disp + listp->len,
00062                             should_be_there))
00063            return true;
00064 
00065          /* No non-empty entry.  Search from the end of this element's
00066             slotinfo array.  */
00067          idx = disp + listp->len;
00068        }
00069     }
00070   else
00071     {
00072       struct link_map *old_map = listp->slotinfo[idx - disp].map;
00073 
00074       /* The entry might still be in its unused state if we are closing an
00075         object that wasn't fully set up.  */
00076       if (__builtin_expect (old_map != NULL, 1))
00077        {
00078          assert (old_map->l_tls_modid == idx);
00079 
00080          /* Mark the entry as unused. */
00081          listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
00082          listp->slotinfo[idx - disp].map = NULL;
00083        }
00084 
00085       /* If this is not the last currently used entry no need to look
00086         further.  */
00087       if (idx != GL(dl_tls_max_dtv_idx))
00088        return true;
00089     }
00090 
00091   while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
00092     {
00093       --idx;
00094 
00095       if (listp->slotinfo[idx - disp].map != NULL)
00096        {
00097          /* Found a new last used index.  */
00098          GL(dl_tls_max_dtv_idx) = idx;
00099          return true;
00100        }
00101     }
00102 
00103   /* No non-entry in this list element.  */
00104   return false;
00105 }
00106 
00107 
00108 void
00109 _dl_close_worker (struct link_map *map)
00110 {
00111   /* One less direct use.  */
00112   --map->l_direct_opencount;
00113 
00114   /* If _dl_close is called recursively (some destructor call dlclose),
00115      just record that the parent _dl_close will need to do garbage collection
00116      again and return.  */
00117   static enum { not_pending, pending, rerun } dl_close_state;
00118 
00119   if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
00120       || dl_close_state != not_pending)
00121     {
00122       if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
00123        dl_close_state = rerun;
00124 
00125       /* There are still references to this object.  Do nothing more.  */
00126       if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
00127        _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
00128                        map->l_name, map->l_direct_opencount);
00129 
00130       return;
00131     }
00132 
00133   Lmid_t nsid = map->l_ns;
00134   struct link_namespaces *ns = &GL(dl_ns)[nsid];
00135 
00136  retry:
00137   dl_close_state = pending;
00138 
00139   bool any_tls = false;
00140   const unsigned int nloaded = ns->_ns_nloaded;
00141   char used[nloaded];
00142   char done[nloaded];
00143   struct link_map *maps[nloaded];
00144 
00145   /* Run over the list and assign indexes to the link maps and enter
00146      them into the MAPS array.  */
00147   int idx = 0;
00148   for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
00149     {
00150       l->l_idx = idx;
00151       maps[idx] = l;
00152       ++idx;
00153     }
00154   assert (idx == nloaded);
00155 
00156   /* Prepare the bitmaps.  */
00157   memset (used, '\0', sizeof (used));
00158   memset (done, '\0', sizeof (done));
00159 
00160   /* Keep track of the lowest index link map we have covered already.  */
00161   int done_index = -1;
00162   while (++done_index < nloaded)
00163     {
00164       struct link_map *l = maps[done_index];
00165 
00166       if (done[done_index])
00167        /* Already handled.  */
00168        continue;
00169 
00170       /* Check whether this object is still used.  */
00171       if (l->l_type == lt_loaded
00172          && l->l_direct_opencount == 0
00173          && (l->l_flags_1 & DF_1_NODELETE) == 0
00174          && !used[done_index])
00175        continue;
00176 
00177       /* We need this object and we handle it now.  */
00178       done[done_index] = 1;
00179       used[done_index] = 1;
00180       /* Signal the object is still needed.  */
00181       l->l_idx = IDX_STILL_USED;
00182 
00183       /* Mark all dependencies as used.  */
00184       if (l->l_initfini != NULL)
00185        {
00186          struct link_map **lp = &l->l_initfini[1];
00187          while (*lp != NULL)
00188            {
00189              if ((*lp)->l_idx != IDX_STILL_USED)
00190               {
00191                 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
00192 
00193                 if (!used[(*lp)->l_idx])
00194                   {
00195                     used[(*lp)->l_idx] = 1;
00196                     if ((*lp)->l_idx - 1 < done_index)
00197                      done_index = (*lp)->l_idx - 1;
00198                   }
00199               }
00200 
00201              ++lp;
00202            }
00203        }
00204       /* And the same for relocation dependencies.  */
00205       if (l->l_reldeps != NULL)
00206        for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
00207          {
00208            struct link_map *jmap = l->l_reldeps->list[j];
00209 
00210            if (jmap->l_idx != IDX_STILL_USED)
00211              {
00212               assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
00213 
00214               if (!used[jmap->l_idx])
00215                 {
00216                   used[jmap->l_idx] = 1;
00217                   if (jmap->l_idx - 1 < done_index)
00218                     done_index = jmap->l_idx - 1;
00219                 }
00220              }
00221          }
00222     }
00223 
00224   /* Sort the entries.  */
00225   _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
00226 
00227   /* Call all termination functions at once.  */
00228 #ifdef SHARED
00229   bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
00230 #endif
00231   bool unload_any = false;
00232   bool scope_mem_left = false;
00233   unsigned int unload_global = 0;
00234   unsigned int first_loaded = ~0;
00235   for (unsigned int i = 0; i < nloaded; ++i)
00236     {
00237       struct link_map *imap = maps[i];
00238 
00239       /* All elements must be in the same namespace.  */
00240       assert (imap->l_ns == nsid);
00241 
00242       if (!used[i])
00243        {
00244          assert (imap->l_type == lt_loaded
00245                 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
00246 
00247          /* Call its termination function.  Do not do it for
00248             half-cooked objects.  */
00249          if (imap->l_init_called)
00250            {
00251              /* When debugging print a message first.  */
00252              if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
00253                                 0))
00254               _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
00255                               imap->l_name, nsid);
00256 
00257              if (imap->l_info[DT_FINI_ARRAY] != NULL)
00258               {
00259                 ElfW(Addr) *array =
00260                   (ElfW(Addr) *) (imap->l_addr
00261                                 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
00262                 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
00263                                  / sizeof (ElfW(Addr)));
00264 
00265                 while (sz-- > 0)
00266                   ((fini_t) array[sz]) ();
00267               }
00268 
00269              /* Next try the old-style destructor.  */
00270              if (imap->l_info[DT_FINI] != NULL)
00271               (*(void (*) (void)) DL_DT_FINI_ADDRESS
00272                (imap, ((void *) imap->l_addr
00273                       + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
00274            }
00275 
00276 #ifdef SHARED
00277          /* Auditing checkpoint: we have a new object.  */
00278          if (__builtin_expect (do_audit, 0))
00279            {
00280              struct audit_ifaces *afct = GLRO(dl_audit);
00281              for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
00282               {
00283                 if (afct->objclose != NULL)
00284                   /* Return value is ignored.  */
00285                   (void) afct->objclose (&imap->l_audit[cnt].cookie);
00286 
00287                 afct = afct->next;
00288               }
00289            }
00290 #endif
00291 
00292          /* This object must not be used anymore.  */
00293          imap->l_removed = 1;
00294 
00295          /* We indeed have an object to remove.  */
00296          unload_any = true;
00297 
00298          if (imap->l_global)
00299            ++unload_global;
00300 
00301          /* Remember where the first dynamically loaded object is.  */
00302          if (i < first_loaded)
00303            first_loaded = i;
00304        }
00305       /* Else used[i].  */
00306       else if (imap->l_type == lt_loaded)
00307        {
00308          struct r_scope_elem *new_list = NULL;
00309 
00310          if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
00311            {
00312              /* The object is still used.  But one of the objects we are
00313                unloading right now is responsible for loading it.  If
00314                the current object does not have it's own scope yet we
00315                have to create one.  This has to be done before running
00316                the finalizers.
00317 
00318                To do this count the number of dependencies.  */
00319              unsigned int cnt;
00320              for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
00321               ;
00322 
00323              /* We simply reuse the l_initfini list.  */
00324              imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
00325              imap->l_searchlist.r_nlist = cnt;
00326 
00327              new_list = &imap->l_searchlist;
00328            }
00329 
00330          /* Count the number of scopes which remain after the unload.
00331             When we add the local search list count it.  Always add
00332             one for the terminating NULL pointer.  */
00333          size_t remain = (new_list != NULL) + 1;
00334          bool removed_any = false;
00335          for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
00336            /* This relies on l_scope[] entries being always set either
00337               to its own l_symbolic_searchlist address, or some map's
00338               l_searchlist address.  */
00339            if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
00340              {
00341               struct link_map *tmap = (struct link_map *)
00342                 ((char *) imap->l_scope[cnt]
00343                  - offsetof (struct link_map, l_searchlist));
00344               assert (tmap->l_ns == nsid);
00345               if (tmap->l_idx == IDX_STILL_USED)
00346                 ++remain;
00347               else
00348                 removed_any = true;
00349              }
00350            else
00351              ++remain;
00352 
00353          if (removed_any)
00354            {
00355              /* Always allocate a new array for the scope.  This is
00356                necessary since we must be able to determine the last
00357                user of the current array.  If possible use the link map's
00358                memory.  */
00359              size_t new_size;
00360              struct r_scope_elem **newp;
00361 
00362 #define SCOPE_ELEMS(imap) \
00363   (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
00364 
00365              if (imap->l_scope != imap->l_scope_mem
00366                 && remain < SCOPE_ELEMS (imap))
00367               {
00368                 new_size = SCOPE_ELEMS (imap);
00369                 newp = imap->l_scope_mem;
00370               }
00371              else
00372               {
00373                 new_size = imap->l_scope_max;
00374                 newp = (struct r_scope_elem **)
00375                   malloc (new_size * sizeof (struct r_scope_elem *));
00376                 if (newp == NULL)
00377                   _dl_signal_error (ENOMEM, "dlclose", NULL,
00378                                   N_("cannot create scope list"));
00379               }
00380 
00381              /* Copy over the remaining scope elements.  */
00382              remain = 0;
00383              for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
00384               {
00385                 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
00386                   {
00387                     struct link_map *tmap = (struct link_map *)
00388                      ((char *) imap->l_scope[cnt]
00389                       - offsetof (struct link_map, l_searchlist));
00390                     if (tmap->l_idx != IDX_STILL_USED)
00391                      {
00392                        /* Remove the scope.  Or replace with own map's
00393                           scope.  */
00394                        if (new_list != NULL)
00395                          {
00396                            newp[remain++] = new_list;
00397                            new_list = NULL;
00398                          }
00399                        continue;
00400                      }
00401                   }
00402 
00403                 newp[remain++] = imap->l_scope[cnt];
00404               }
00405              newp[remain] = NULL;
00406 
00407              struct r_scope_elem **old = imap->l_scope;
00408 
00409              imap->l_scope = newp;
00410 
00411              /* No user anymore, we can free it now.  */
00412              if (old != imap->l_scope_mem)
00413               {
00414                 if (_dl_scope_free (old))
00415                   /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
00416                      no need to repeat it.  */
00417                   scope_mem_left = false;
00418               }
00419              else
00420               scope_mem_left = true;
00421 
00422              imap->l_scope_max = new_size;
00423            }
00424 
00425          /* The loader is gone, so mark the object as not having one.
00426             Note: l_idx != IDX_STILL_USED -> object will be removed.  */
00427          if (imap->l_loader != NULL
00428              && imap->l_loader->l_idx != IDX_STILL_USED)
00429            imap->l_loader = NULL;
00430 
00431          /* Remember where the first dynamically loaded object is.  */
00432          if (i < first_loaded)
00433            first_loaded = i;
00434        }
00435     }
00436 
00437   /* If there are no objects to unload, do nothing further.  */
00438   if (!unload_any)
00439     goto out;
00440 
00441 #ifdef SHARED
00442   /* Auditing checkpoint: we will start deleting objects.  */
00443   if (__builtin_expect (do_audit, 0))
00444     {
00445       struct link_map *head = ns->_ns_loaded;
00446       struct audit_ifaces *afct = GLRO(dl_audit);
00447       /* Do not call the functions for any auditing object.  */
00448       if (head->l_auditing == 0)
00449        {
00450          for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
00451            {
00452              if (afct->activity != NULL)
00453               afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
00454 
00455              afct = afct->next;
00456            }
00457        }
00458     }
00459 #endif
00460 
00461   /* Notify the debugger we are about to remove some loaded objects.  */
00462   struct r_debug *r = _dl_debug_initialize (0, nsid);
00463   r->r_state = RT_DELETE;
00464   _dl_debug_state ();
00465 
00466   if (unload_global)
00467     {
00468       /* Some objects are in the global scope list.  Remove them.  */
00469       struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
00470       unsigned int i;
00471       unsigned int j = 0;
00472       unsigned int cnt = ns_msl->r_nlist;
00473 
00474       while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
00475        --cnt;
00476 
00477       if (cnt + unload_global == ns_msl->r_nlist)
00478        /* Speed up removing most recently added objects.  */
00479        j = cnt;
00480       else
00481        for (i = 0; i < cnt; i++)
00482          if (ns_msl->r_list[i]->l_removed == 0)
00483            {
00484              if (i != j)
00485               ns_msl->r_list[j] = ns_msl->r_list[i];
00486              j++;
00487            }
00488       ns_msl->r_nlist = j;
00489     }
00490 
00491   if (!RTLD_SINGLE_THREAD_P
00492       && (unload_global
00493          || scope_mem_left
00494          || (GL(dl_scope_free_list) != NULL
00495              && GL(dl_scope_free_list)->count)))
00496     {
00497       THREAD_GSCOPE_WAIT ();
00498 
00499       /* Now we can free any queued old scopes.  */
00500       struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
00501       if (fsl != NULL)
00502        while (fsl->count > 0)
00503          free (fsl->list[--fsl->count]);
00504     }
00505 
00506   size_t tls_free_start;
00507   size_t tls_free_end;
00508   tls_free_start = tls_free_end = NO_TLS_OFFSET;
00509 
00510   /* Check each element of the search list to see if all references to
00511      it are gone.  */
00512   for (unsigned int i = first_loaded; i < nloaded; ++i)
00513     {
00514       struct link_map *imap = maps[i];
00515       if (!used[i])
00516        {
00517          assert (imap->l_type == lt_loaded);
00518 
00519          /* That was the last reference, and this was a dlopen-loaded
00520             object.  We can unmap it.  */
00521 
00522          /* Remove the object from the dtv slotinfo array if it uses TLS.  */
00523          if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
00524            {
00525              any_tls = true;
00526 
00527              if (GL(dl_tls_dtv_slotinfo_list) != NULL
00528                 && ! remove_slotinfo (imap->l_tls_modid,
00529                                    GL(dl_tls_dtv_slotinfo_list), 0,
00530                                    imap->l_init_called))
00531               /* All dynamically loaded modules with TLS are unloaded.  */
00532               GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
00533 
00534              if (imap->l_tls_offset != NO_TLS_OFFSET
00535                 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
00536               {
00537                 /* Collect a contiguous chunk built from the objects in
00538                    this search list, going in either direction.  When the
00539                    whole chunk is at the end of the used area then we can
00540                    reclaim it.  */
00541 #if TLS_TCB_AT_TP
00542                 if (tls_free_start == NO_TLS_OFFSET
00543                     || (size_t) imap->l_tls_offset == tls_free_start)
00544                   {
00545                     /* Extend the contiguous chunk being reclaimed.  */
00546                     tls_free_start
00547                      = imap->l_tls_offset - imap->l_tls_blocksize;
00548 
00549                     if (tls_free_end == NO_TLS_OFFSET)
00550                      tls_free_end = imap->l_tls_offset;
00551                   }
00552                 else if (imap->l_tls_offset - imap->l_tls_blocksize
00553                         == tls_free_end)
00554                   /* Extend the chunk backwards.  */
00555                   tls_free_end = imap->l_tls_offset;
00556                 else
00557                   {
00558                     /* This isn't contiguous with the last chunk freed.
00559                       One of them will be leaked unless we can free
00560                       one block right away.  */
00561                     if (tls_free_end == GL(dl_tls_static_used))
00562                      {
00563                        GL(dl_tls_static_used) = tls_free_start;
00564                        tls_free_end = imap->l_tls_offset;
00565                        tls_free_start
00566                          = tls_free_end - imap->l_tls_blocksize;
00567                      }
00568                     else if ((size_t) imap->l_tls_offset
00569                             == GL(dl_tls_static_used))
00570                      GL(dl_tls_static_used)
00571                        = imap->l_tls_offset - imap->l_tls_blocksize;
00572                     else if (tls_free_end < (size_t) imap->l_tls_offset)
00573                      {
00574                        /* We pick the later block.  It has a chance to
00575                           be freed.  */
00576                        tls_free_end = imap->l_tls_offset;
00577                        tls_free_start
00578                          = tls_free_end - imap->l_tls_blocksize;
00579                      }
00580                   }
00581 #elif TLS_DTV_AT_TP
00582                 if ((size_t) imap->l_tls_offset == tls_free_end)
00583                   /* Extend the contiguous chunk being reclaimed.  */
00584                   tls_free_end -= imap->l_tls_blocksize;
00585                 else if (imap->l_tls_offset + imap->l_tls_blocksize
00586                         == tls_free_start)
00587                   /* Extend the chunk backwards.  */
00588                   tls_free_start = imap->l_tls_offset;
00589                 else
00590                   {
00591                     /* This isn't contiguous with the last chunk freed.
00592                       One of them will be leaked.  */
00593                     if (tls_free_end == GL(dl_tls_static_used))
00594                      GL(dl_tls_static_used) = tls_free_start;
00595                     tls_free_start = imap->l_tls_offset;
00596                     tls_free_end = tls_free_start + imap->l_tls_blocksize;
00597                   }
00598 #else
00599 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
00600 #endif
00601               }
00602            }
00603 
00604          /* We can unmap all the maps at once.  We determined the
00605             start address and length when we loaded the object and
00606             the `munmap' call does the rest.  */
00607          DL_UNMAP (imap);
00608 
00609          /* Finally, unlink the data structure and free it.  */
00610          if (imap->l_prev != NULL)
00611            imap->l_prev->l_next = imap->l_next;
00612          else
00613            {
00614 #ifdef SHARED
00615              assert (nsid != LM_ID_BASE);
00616 #endif
00617              ns->_ns_loaded = imap->l_next;
00618            }
00619 
00620          --ns->_ns_nloaded;
00621          if (imap->l_next != NULL)
00622            imap->l_next->l_prev = imap->l_prev;
00623 
00624          free (imap->l_versions);
00625          if (imap->l_origin != (char *) -1)
00626            free ((char *) imap->l_origin);
00627 
00628          free (imap->l_reldeps);
00629 
00630          /* Print debugging message.  */
00631          if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
00632            _dl_debug_printf ("\nfile=%s [%lu];  destroying link map\n",
00633                            imap->l_name, imap->l_ns);
00634 
00635          /* This name always is allocated.  */
00636          free (imap->l_name);
00637          /* Remove the list with all the names of the shared object.  */
00638 
00639          struct libname_list *lnp = imap->l_libname;
00640          do
00641            {
00642              struct libname_list *this = lnp;
00643              lnp = lnp->next;
00644              if (!this->dont_free)
00645               free (this);
00646            }
00647          while (lnp != NULL);
00648 
00649          /* Remove the searchlists.  */
00650          free (imap->l_initfini);
00651 
00652          /* Remove the scope array if we allocated it.  */
00653          if (imap->l_scope != imap->l_scope_mem)
00654            free (imap->l_scope);
00655 
00656          if (imap->l_phdr_allocated)
00657            free ((void *) imap->l_phdr);
00658 
00659          if (imap->l_rpath_dirs.dirs != (void *) -1)
00660            free (imap->l_rpath_dirs.dirs);
00661          if (imap->l_runpath_dirs.dirs != (void *) -1)
00662            free (imap->l_runpath_dirs.dirs);
00663 
00664          free (imap);
00665        }
00666     }
00667 
00668   /* If we removed any object which uses TLS bump the generation counter.  */
00669   if (any_tls)
00670     {
00671       if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
00672        _dl_fatal_printf ("TLS generation counter wrapped!  Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
00673 
00674       if (tls_free_end == GL(dl_tls_static_used))
00675        GL(dl_tls_static_used) = tls_free_start;
00676     }
00677 
00678 #ifdef SHARED
00679   /* Auditing checkpoint: we have deleted all objects.  */
00680   if (__builtin_expect (do_audit, 0))
00681     {
00682       struct link_map *head = ns->_ns_loaded;
00683       /* Do not call the functions for any auditing object.  */
00684       if (head->l_auditing == 0)
00685        {
00686          struct audit_ifaces *afct = GLRO(dl_audit);
00687          for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
00688            {
00689              if (afct->activity != NULL)
00690               afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
00691 
00692              afct = afct->next;
00693            }
00694        }
00695     }
00696 #endif
00697 
00698   /* Notify the debugger those objects are finalized and gone.  */
00699   r->r_state = RT_CONSISTENT;
00700   _dl_debug_state ();
00701 
00702   /* Recheck if we need to retry, release the lock.  */
00703  out:
00704   if (dl_close_state == rerun)
00705     goto retry;
00706 
00707   dl_close_state = not_pending;
00708 }
00709 
00710 
00711 void
00712 _dl_close (void *_map)
00713 {
00714   struct link_map *map = _map;
00715 
00716   /* First see whether we can remove the object at all.  */
00717   if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
00718     {
00719       assert (map->l_init_called);
00720       /* Nope.  Do nothing.  */
00721       return;
00722     }
00723 
00724   if (__builtin_expect (map->l_direct_opencount, 1) == 0)
00725     GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
00726 
00727   /* Acquire the lock.  */
00728   __rtld_lock_lock_recursive (GL(dl_load_lock));
00729 
00730   _dl_close_worker (map);
00731 
00732   __rtld_lock_unlock_recursive (GL(dl_load_lock));
00733 }
00734 
00735 
00736 static bool __libc_freeres_fn_section
00737 free_slotinfo (struct dtv_slotinfo_list **elemp)
00738 {
00739   size_t cnt;
00740 
00741   if (*elemp == NULL)
00742     /* Nothing here, all is removed (or there never was anything).  */
00743     return true;
00744 
00745   if (!free_slotinfo (&(*elemp)->next))
00746     /* We cannot free the entry.  */
00747     return false;
00748 
00749   /* That cleared our next pointer for us.  */
00750 
00751   for (cnt = 0; cnt < (*elemp)->len; ++cnt)
00752     if ((*elemp)->slotinfo[cnt].map != NULL)
00753       /* Still used.  */
00754       return false;
00755 
00756   /* We can remove the list element.  */
00757   free (*elemp);
00758   *elemp = NULL;
00759 
00760   return true;
00761 }
00762 
00763 
00764 libc_freeres_fn (free_mem)
00765 {
00766   for (Lmid_t nsid = 0; nsid < DL_NNS; ++nsid)
00767     if (__builtin_expect (GL(dl_ns)[nsid]._ns_global_scope_alloc, 0) != 0
00768        && (GL(dl_ns)[nsid]._ns_main_searchlist->r_nlist
00769            // XXX Check whether we need NS-specific initial_searchlist
00770            == GLRO(dl_initial_searchlist).r_nlist))
00771       {
00772        /* All object dynamically loaded by the program are unloaded.  Free
00773           the memory allocated for the global scope variable.  */
00774        struct link_map **old = GL(dl_ns)[nsid]._ns_main_searchlist->r_list;
00775 
00776        /* Put the old map in.  */
00777        GL(dl_ns)[nsid]._ns_main_searchlist->r_list
00778          // XXX Check whether we need NS-specific initial_searchlist
00779          = GLRO(dl_initial_searchlist).r_list;
00780        /* Signal that the original map is used.  */
00781        GL(dl_ns)[nsid]._ns_global_scope_alloc = 0;
00782 
00783        /* Now free the old map.  */
00784        free (old);
00785       }
00786 
00787   if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
00788     {
00789       /* Free the memory allocated for the dtv slotinfo array.  We can do
00790         this only if all modules which used this memory are unloaded.  */
00791 #ifdef SHARED
00792       if (GL(dl_initial_dtv) == NULL)
00793        /* There was no initial TLS setup, it was set up later when
00794           it used the normal malloc.  */
00795        free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
00796       else
00797 #endif
00798        /* The first element of the list does not have to be deallocated.
00799           It was allocated in the dynamic linker (i.e., with a different
00800           malloc), and in the static library it's in .bss space.  */
00801        free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
00802     }
00803 
00804   void *scope_free_list = GL(dl_scope_free_list);
00805   GL(dl_scope_free_list) = NULL;
00806   free (scope_free_list);
00807 }