Back to index

plt-scheme  4.2.1
thread.c
Go to the documentation of this file.
00001 /*
00002   MzScheme
00003   Copyright (c) 2004-2009 PLT Scheme Inc.
00004   Copyright (c) 1995-2001 Matthew Flatt
00005  
00006     This library is free software; you can redistribute it and/or
00007     modify it under the terms of the GNU Library General Public
00008     License as published by the Free Software Foundation; either
00009     version 2 of the License, or (at your option) any later version.
00010 
00011     This library is distributed in the hope that it will be useful,
00012     but WITHOUT ANY WARRANTY; without even the implied warranty of
00013     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014     Library General Public License for more details.
00015 
00016     You should have received a copy of the GNU Library General Public
00017     License along with this library; if not, write to the Free
00018     Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
00019     Boston, MA 02110-1301 USA.
00020 */
00021 
00022 /* This file implements MzScheme threads.
00023 
00024    Usually, MzScheme threads are implemented by copying the stack.
00025    The scheme_thread_block() function is called occassionally by the
00026    evaluator so that the current thread can be swapped out.
00027    do_swap_thread() performs the actual swap. Threads can also be
00028    implemented by the OS; the bottom part of this file contains
00029    OS-specific thread code.
00030 
00031    Much of the work in thread management is knowning when to go to
00032    sleep, to be nice to the OS outside of MzScheme. The rest of the
00033    work is implementing custodians (called "custodians" in the code),
00034    parameters, and wills. */
00035 
00036 /* Some copilers don't like re-def of GC_malloc in schemef.h: */
00037 #ifndef MZ_PRECISE_GC
00038 # define SCHEME_NO_GC_PROTO
00039 #endif
00040 
00041 #include "schpriv.h"
00042 #include "schmach.h"
00043 #include "schgc.h"
00044 #ifndef PALMOS_STUFF
00045 # include <time.h>
00046 #endif
00047 #ifdef FILES_HAVE_FDS
00048 # include <sys/types.h>
00049 # include <sys/time.h>
00050 # ifdef SELECT_INCLUDE
00051 #  include <sys/select.h>
00052 # endif
00053 # ifdef USE_BEOS_SOCKET_INCLUDE
00054 #  include <be/net/socket.h>
00055 # endif
00056 #endif
00057 #ifdef USE_ITIMER
00058 # include <sys/types.h>
00059 # include <sys/time.h>
00060 # include <signal.h>
00061 #endif
00062 #ifdef USE_WINSOCK_TCP
00063 # ifdef USE_TCP
00064 #  include <winsock.h>
00065 # endif
00066 #endif
00067 #ifdef USE_BEOS_PORT_THREADS
00068 # include <be/net/socket.h>
00069 #endif
00070 #ifdef USE_STACKAVAIL
00071 # include <malloc.h>
00072 #endif
00073 #ifdef UNISTD_INCLUDE
00074 # include <unistd.h>
00075 #endif
00076 
00077 #ifndef SIGNMZTHREAD
00078 # define SIGMZTHREAD SIGUSR2
00079 #endif
00080 
00081 #if defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES)
00082 # include <windows.h>
00083 extern HANDLE scheme_break_semaphore;
00084 #endif
00085 
00086 #if defined(FILES_HAVE_FDS) \
00087      || defined(USE_BEOS_PORT_THREADS) \
00088      || (defined(USE_WINSOCK_TCP) && defined(USE_TCP)) \
00089      || (defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES))
00090 # define USING_FDS
00091 # if (!defined(USE_WINSOCK_TCP) || !defined(USE_TCP)) && !defined(FILES_HAVE_FDS)
00092 #  include <sys/types.h>
00093 # endif
00094 #endif
00095 
00096 #include "schfd.h"
00097 
00098 #define DEFAULT_INIT_STACK_SIZE 1000
00099 #define MAX_INIT_STACK_SIZE 100000
00100 
00101 #ifdef SGC_STD_DEBUGGING
00102 # define SENORA_GC_NO_FREE
00103 #endif
00104 
00105 /* If a finalization callback in MrEd invokes Scheme code,
00106    we can end up with a thread swap in the middle of a thread
00107    swap (where the outer swap was interrupted by GC). The
00108    following is a debugging flag to help detect and fix
00109    such problems. */
00110 #define WATCH_FOR_NESTED_SWAPS 0
00111 
00112 #if WATCH_FOR_NESTED_SWAPS
00113 static int swapping = 0;
00114 #endif
00115 
00116 extern void scheme_gmp_tls_init(long *s);
00117 extern void *scheme_gmp_tls_load(long *s);
00118 extern void scheme_gmp_tls_unload(long *s, void *p);
00119 extern void scheme_gmp_tls_snapshot(long *s, long *save);
00120 extern void scheme_gmp_tls_restore_snapshot(long *s, void *data, long *save, int do_free);
00121 
00122 static void check_ready_break();
00123 
00124 extern int scheme_num_read_syntax_objects;
00125 extern long scheme_hash_request_count;
00126 extern long scheme_hash_iteration_count;
00127 #ifdef MZ_USE_JIT
00128 extern int scheme_jit_malloced;
00129 #else
00130 # define scheme_jit_malloced 0
00131 #endif
00132 
00133 /*========================================================================*/
00134 /*                    local variables and prototypes                      */
00135 /*========================================================================*/
00136 
00137 #define INIT_TB_SIZE  20
00138 
00139 #ifndef MZ_THREAD_QUANTUM_USEC
00140 # define MZ_THREAD_QUANTUM_USEC 10000
00141 #endif
00142 
00143 static int buffer_init_size = INIT_TB_SIZE;
00144 
00145 THREAD_LOCAL Scheme_Thread *scheme_current_thread = NULL;
00146 THREAD_LOCAL Scheme_Thread *scheme_main_thread = NULL;
00147 THREAD_LOCAL Scheme_Thread *scheme_first_thread = NULL;
00148 
00149 Scheme_Thread *scheme_get_current_thread() { return scheme_current_thread; }
00150 long scheme_get_multiple_count() { return scheme_current_thread->ku.multiple.count; }
00151 Scheme_Object **scheme_get_multiple_array() { return scheme_current_thread->ku.multiple.array; }
00152 void scheme_set_current_thread_ran_some() { scheme_current_thread->ran_some = 1; }
00153 
00154 THREAD_LOCAL Scheme_Thread_Set *scheme_thread_set_top;
00155 
00156 static int num_running_threads = 1;
00157 
00158 #ifdef LINK_EXTENSIONS_BY_TABLE
00159 Scheme_Thread **scheme_current_thread_ptr;
00160 volatile int *scheme_fuel_counter_ptr;
00161 #endif
00162 static int swap_no_setjmp = 0;
00163 
00164 static int thread_swap_count;
00165 static int did_gc_count;
00166 
00167 static int init_load_on_demand = 1;
00168 
00169 #ifdef RUNSTACK_IS_GLOBAL
00170 THREAD_LOCAL Scheme_Object **scheme_current_runstack_start;
00171 THREAD_LOCAL Scheme_Object **scheme_current_runstack;
00172 THREAD_LOCAL MZ_MARK_STACK_TYPE scheme_current_cont_mark_stack;
00173 THREAD_LOCAL MZ_MARK_POS_TYPE scheme_current_cont_mark_pos;
00174 #endif
00175 
00176 static THREAD_LOCAL Scheme_Custodian *main_custodian;
00177 static THREAD_LOCAL Scheme_Custodian *last_custodian;
00178 static THREAD_LOCAL Scheme_Hash_Table *limited_custodians = NULL;
00179 
00180 static Scheme_Object *initial_inspector;
00181 
00182 #ifndef MZ_PRECISE_GC
00183 static int cust_box_count, cust_box_alloc;
00184 static Scheme_Custodian_Box **cust_boxes;
00185 # ifndef USE_SENORA_GC
00186 extern int GC_is_marked(void *);
00187 # endif
00188 #endif
00189 
00190 /* On swap, put target in a static variable, instead of on the stack,
00191    so that the swapped-out thread is less likely to have a pointer
00192    to the target thread. */
00193 static THREAD_LOCAL Scheme_Thread *swap_target;
00194 
00195 static Scheme_Object *scheduled_kills;
00196 
00197 Scheme_Object *scheme_parameterization_key;
00198 Scheme_Object *scheme_exn_handler_key;
00199 Scheme_Object *scheme_break_enabled_key;
00200 
00201 long scheme_total_gc_time;
00202 static long start_this_gc_time, end_this_gc_time;
00203 static void get_ready_for_GC(void);
00204 static void done_with_GC(void);
00205 #ifdef MZ_PRECISE_GC
00206 static void inform_GC(int major_gc, long pre_used, long post_used);
00207 #endif
00208 
00209 static volatile short delayed_break_ready = 0;
00210 static Scheme_Thread *main_break_target_thread;
00211 
00212 void (*scheme_sleep)(float seconds, void *fds);
00213 void (*scheme_notify_multithread)(int on);
00214 void (*scheme_wakeup_on_input)(void *fds);
00215 int (*scheme_check_for_break)(void);
00216 void (*scheme_on_atomic_timeout)(void);
00217 
00218 static int do_atomic = 0;
00219 static int missed_context_switch = 0;
00220 static int have_activity = 0;
00221 int scheme_active_but_sleeping = 0;
00222 static int thread_ended_with_activity;
00223 THREAD_LOCAL int scheme_no_stack_overflow;
00224 
00225 static int needs_sleep_cancelled;
00226 
00227 static int tls_pos = 0;
00228 
00229 #ifdef MZ_PRECISE_GC
00230 extern long GC_get_memory_use(void *c);
00231 #else
00232 extern MZ_DLLIMPORT long GC_get_memory_use();
00233 #endif
00234 
00235 typedef struct Thread_Cell {
00236   Scheme_Object so;
00237   char inherited, assigned;
00238   Scheme_Object *def_val;
00239   /* A thread's thread_cell table maps cells to keys weakly.
00240      This table maps keys to values weakly. The two weak
00241      levels ensure that thread cells are properly GCed
00242      when the value of a thread cell references the thread
00243      cell. */
00244   Scheme_Bucket_Table *vals;
00245 } Thread_Cell;
00246 
00247 static Scheme_Object *read_symbol, *write_symbol, *execute_symbol, *delete_symbol, *exists_symbol;
00248 static Scheme_Object *client_symbol, *server_symbol;
00249 
00250 static Scheme_Object *nested_exn_handler;
00251 
00252 static Scheme_Object *closers;
00253 
00254 static Scheme_Object *thread_swap_callbacks,  *thread_swap_out_callbacks;
00255 
00256 static Scheme_Object *recycle_cell;
00257 static Scheme_Object *maybe_recycle_cell;
00258 static int recycle_cc_count;
00259        
00260 static mz_jmp_buf main_init_error_buf;
00261        
00262 #ifdef MZ_PRECISE_GC
00263 /* This is a trick to get the types right. Note that 
00264    the layout of the weak box is defined by the
00265    GC spec. */
00266 typedef struct {
00267   short type;
00268   short hash_key;
00269   Scheme_Custodian *val;
00270 } Scheme_Custodian_Weak_Box;
00271 
00272 # define MALLOC_MREF() (Scheme_Custodian_Reference *)scheme_make_weak_box(NULL)
00273 # define CUSTODIAN_FAM(x) ((Scheme_Custodian_Weak_Box *)x)->val
00274 # define xCUSTODIAN_FAM(x) SCHEME_BOX_VAL(x)
00275 #else
00276 # define MALLOC_MREF() MALLOC_ONE_WEAK(Scheme_Custodian_Reference)
00277 # define CUSTODIAN_FAM(x) (*(x))
00278 # define xCUSTODIAN_FAM(x) (*(x))
00279 #endif
00280 
00281 #ifdef MZ_PRECISE_GC
00282 static void register_traversers(void);
00283 #endif
00284 
00285 static void prepare_this_thread_for_GC(Scheme_Thread *t);
00286 
00287 static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[]);
00288 static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[]);
00289 static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[]);
00290 static Scheme_Object *new_tracking_fun(int argc, Scheme_Object *args[]);
00291 static Scheme_Object *union_tracking_val(int argc, Scheme_Object *args[]);
00292 
00293 static Scheme_Object *collect_garbage(int argc, Scheme_Object *args[]);
00294 static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[]);
00295 
00296 static Scheme_Object *sch_thread(int argc, Scheme_Object *args[]);
00297 static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[]);
00298 static Scheme_Object *sch_sleep(int argc, Scheme_Object *args[]);
00299 static Scheme_Object *thread_p(int argc, Scheme_Object *args[]);
00300 static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[]);
00301 static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[]);
00302 static Scheme_Object *thread_wait(int argc, Scheme_Object *args[]);
00303 static Scheme_Object *sch_current(int argc, Scheme_Object *args[]);
00304 static Scheme_Object *kill_thread(int argc, Scheme_Object *args[]);
00305 static Scheme_Object *break_thread(int argc, Scheme_Object *args[]);
00306 static Scheme_Object *thread_suspend(int argc, Scheme_Object *args[]);
00307 static Scheme_Object *thread_resume(int argc, Scheme_Object *args[]);
00308 static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *args[]);
00309 static Scheme_Object *make_thread_resume(int argc, Scheme_Object *args[]);
00310 static Scheme_Object *make_thread_dead(int argc, Scheme_Object *args[]);
00311 static void register_thread_sync();
00312 
00313 static Scheme_Object *sch_sync(int argc, Scheme_Object *args[]);
00314 static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *args[]);
00315 static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *args[]);
00316 static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *args[]);
00317 static Scheme_Object *evt_p(int argc, Scheme_Object *args[]);
00318 static Scheme_Object *evts_to_evt(int argc, Scheme_Object *args[]);
00319 
00320 static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[]);
00321 static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[]);
00322 static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[]);
00323 static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[]);
00324 static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[]);
00325 static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[]);
00326 static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[]);
00327 static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[]);
00328 static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[]);
00329 
00330 static Scheme_Object *current_namespace(int argc, Scheme_Object *args[]);
00331 static Scheme_Object *namespace_p(int argc, Scheme_Object *args[]);
00332 
00333 static Scheme_Object *parameter_p(int argc, Scheme_Object *args[]);
00334 static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object *args[]);
00335 static Scheme_Object *make_parameter(int argc, Scheme_Object *args[]);
00336 static Scheme_Object *make_derived_parameter(int argc, Scheme_Object *args[]);
00337 static Scheme_Object *extend_parameterization(int argc, Scheme_Object *args[]);
00338 static Scheme_Object *parameterization_p(int argc, Scheme_Object *args[]);
00339 
00340 static Scheme_Object *make_thread_cell(int argc, Scheme_Object *args[]);
00341 static Scheme_Object *thread_cell_p(int argc, Scheme_Object *args[]);
00342 static Scheme_Object *thread_cell_get(int argc, Scheme_Object *args[]);
00343 static Scheme_Object *thread_cell_set(int argc, Scheme_Object *args[]);
00344 static Scheme_Object *thread_cell_values(int argc, Scheme_Object *args[]);
00345 
00346 static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[]);
00347 static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[]);
00348 static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[]);
00349 
00350 static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[]);
00351 static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[]);
00352 static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[]);
00353 
00354 static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[]);
00355 
00356 static void adjust_custodian_family(void *pr, void *ignored);
00357 
00358 static Scheme_Object *make_will_executor(int argc, Scheme_Object *args[]);
00359 static Scheme_Object *will_executor_p(int argc, Scheme_Object *args[]);
00360 static Scheme_Object *register_will(int argc, Scheme_Object *args[]);
00361 static Scheme_Object *will_executor_try(int argc, Scheme_Object *args[]);
00362 static Scheme_Object *will_executor_go(int argc, Scheme_Object *args[]);
00363 static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost);
00364 
00365 static Scheme_Object *check_break_now(int argc, Scheme_Object *args[]);
00366 static int syncing_ready(Scheme_Object *s, Scheme_Schedule_Info *sinfo);
00367 
00368 static void make_initial_config(Scheme_Thread *p);
00369 
00370 static int do_kill_thread(Scheme_Thread *p);
00371 static void suspend_thread(Scheme_Thread *p);
00372 static void wait_until_suspend_ok();
00373 
00374 static int check_sleep(int need_activity, int sleep_now);
00375 
00376 static void remove_thread(Scheme_Thread *r);
00377 static void exit_or_escape(Scheme_Thread *p);
00378 
00379 static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
00380 static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
00381 
00382 static int can_break_param(Scheme_Thread *p);
00383 
00384 static int post_system_idle();
00385 
00386 static Scheme_Object *current_stats(int argc, Scheme_Object *args[]);
00387 
00388 static Scheme_Object **config_map;
00389 Scheme_Object *mtrace_cmark_key = NULL;
00390 
00391 typedef struct {
00392   MZTAG_IF_REQUIRED
00393   short is_derived;
00394   Scheme_Object *key;
00395   Scheme_Object *guard;
00396   Scheme_Object *extract_guard;
00397   Scheme_Object *defcell;
00398 } ParamData;
00399 
00400 enum {
00401   CONFIG_DIRECT,
00402   CONFIG_INDIRECT
00403 };
00404 
00405 typedef struct Scheme_Thread_Custodian_Hop {
00406   Scheme_Object so;
00407   Scheme_Thread *p; /* really an indirection with precise gc */
00408 } Scheme_Thread_Custodian_Hop;
00409 
00410 static Scheme_Custodian_Extractor *extractors;
00411 
00412 typedef struct {
00413   MZTAG_IF_REQUIRED
00414   Scheme_Object *key;
00415   void (*f)(Scheme_Env *);
00416 } Scheme_NSO;
00417 static int num_nsos = 0;
00418 static Scheme_NSO *namespace_options = NULL;
00419 
00420 #define SETJMP(p) scheme_setjmpup(&p->jmpup_buf, p, p->stack_start)
00421 #define LONGJMP(p) scheme_longjmpup(&p->jmpup_buf)
00422 #define RESETJMP(p) scheme_reset_jmpup_buf(&p->jmpup_buf)
00423 
00424 #ifdef WIN32_THREADS
00425 /* Only set up for Boehm GC that thinks it's a DLL: */
00426 # define GC_THINKS_ITS_A_DLL_BUT_ISNT
00427 
00428 # ifdef GC_THINKS_ITS_A_DLL_BUT_ISNT
00429 extern BOOL WINAPI DllMain(HINSTANCE inst, ULONG reason, LPVOID reserved);
00430 # endif
00431 #endif
00432 
00433 #ifndef MZ_PRECISE_GC
00434 # define scheme_thread_hop_type scheme_thread_type
00435 #endif
00436 
00437 #ifdef MZ_PRECISE_GC
00438 unsigned long scheme_get_current_thread_stack_start(void);
00439 #endif
00440 
00441 /*========================================================================*/
00442 /*                             initialization                             */
00443 /*========================================================================*/
00444 
00445 void scheme_init_thread(Scheme_Env *env)
00446 {
00447   scheme_add_global_constant("dump-memory-stats",
00448                           scheme_make_prim_w_arity(scheme_dump_gc_stats,
00449                                                 "dump-memory-stats",
00450                                                 0, -1), 
00451                           env);
00452 
00453   scheme_add_global_constant("vector-set-performance-stats!",
00454                           scheme_make_prim_w_arity(current_stats,
00455                                                 "vector-set-performance-stats!",
00456                                                 1, 2),
00457                           env);
00458 
00459 
00460 
00461   scheme_add_global_constant("make-empty-namespace",
00462                           scheme_make_prim_w_arity(scheme_make_namespace,
00463                                                 "make-empty-namespace",
00464                                                 0, 0),
00465                           env);
00466 
00467   scheme_add_global_constant("thread",
00468                           scheme_make_prim_w_arity(sch_thread,
00469                                                 "thread",
00470                                                 1, 1),
00471                           env);
00472   scheme_add_global_constant("thread/suspend-to-kill",
00473                           scheme_make_prim_w_arity(sch_thread_nokill,
00474                                                 "thread/suspend-to-kill",
00475                                                 1, 1),
00476                           env);
00477   
00478   scheme_add_global_constant("sleep",
00479                           scheme_make_prim_w_arity(sch_sleep,
00480                                                 "sleep",
00481                                                 0, 1),
00482                           env);
00483 
00484   scheme_add_global_constant("thread?",
00485                           scheme_make_folding_prim(thread_p,
00486                                                 "thread?",
00487                                                 1, 1, 1),
00488                           env);
00489   scheme_add_global_constant("thread-running?",
00490                           scheme_make_prim_w_arity(thread_running_p,
00491                                                 "thread-running?",
00492                                                 1, 1),
00493                           env);
00494   scheme_add_global_constant("thread-dead?",
00495                           scheme_make_prim_w_arity(thread_dead_p,
00496                                                 "thread-dead?",
00497                                                 1, 1),
00498                           env);
00499   scheme_add_global_constant("thread-wait",
00500                           scheme_make_prim_w_arity(thread_wait,
00501                                                 "thread-wait",
00502                                                 1, 1),
00503                           env);
00504 
00505   scheme_add_global_constant("current-thread", 
00506                           scheme_make_prim_w_arity(sch_current,
00507                                                 "current-thread", 
00508                                                 0, 0), 
00509                           env);
00510 
00511   scheme_add_global_constant("kill-thread", 
00512                           scheme_make_prim_w_arity(kill_thread,
00513                                                 "kill-thread", 
00514                                                 1, 1), 
00515                           env);
00516   scheme_add_global_constant("break-thread", 
00517                           scheme_make_prim_w_arity(break_thread,
00518                                                 "break-thread", 
00519                                                 1, 1), 
00520                           env);
00521 
00522   scheme_add_global_constant("thread-suspend", 
00523                           scheme_make_prim_w_arity(thread_suspend,
00524                                                 "thread-suspend", 
00525                                                 1, 1), 
00526                           env);
00527   scheme_add_global_constant("thread-resume", 
00528                           scheme_make_prim_w_arity(thread_resume,
00529                                                 "thread-resume", 
00530                                                 1, 2), 
00531                           env);
00532 
00533   scheme_add_global_constant("thread-resume-evt", 
00534                           scheme_make_prim_w_arity(make_thread_resume,
00535                                                 "thread-resume-evt", 
00536                                                 1, 1), 
00537                           env);
00538   scheme_add_global_constant("thread-suspend-evt", 
00539                           scheme_make_prim_w_arity(make_thread_suspend,
00540                                                 "thread-suspend-evt", 
00541                                                 1, 1), 
00542                           env);
00543   scheme_add_global_constant("thread-dead-evt", 
00544                           scheme_make_prim_w_arity(make_thread_dead,
00545                                                 "thread-dead-evt", 
00546                                                 1, 1), 
00547                           env);
00548 
00549   register_thread_sync();
00550   scheme_add_evt(scheme_thread_suspend_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
00551   scheme_add_evt(scheme_thread_resume_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
00552   scheme_add_evt(scheme_thread_dead_type, (Scheme_Ready_Fun)dead_ready, NULL, NULL, 1);
00553 
00554   scheme_add_global_constant("make-custodian",
00555                           scheme_make_prim_w_arity(make_custodian,
00556                                                 "make-custodian",
00557                                                 0, 1),
00558                           env);
00559   scheme_add_global_constant("custodian?",
00560                           scheme_make_folding_prim(custodian_p,
00561                                                 "custodian?",
00562                                                 1, 1, 1),
00563                           env);
00564   scheme_add_global_constant("custodian-shutdown-all",
00565                           scheme_make_prim_w_arity(custodian_close_all,
00566                                                 "custodian-shutdown-all",
00567                                                 1, 1),
00568                           env);
00569   scheme_add_global_constant("custodian-managed-list",
00570                           scheme_make_prim_w_arity(custodian_to_list,
00571                                                 "custodian-managed-list",
00572                                                 2, 2),
00573                           env);
00574   scheme_add_global_constant("current-custodian", 
00575                           scheme_register_parameter(current_custodian,
00576                                                  "current-custodian",
00577                                                  MZCONFIG_CUSTODIAN),
00578                           env);
00579   scheme_add_global_constant("make-custodian-box",
00580                           scheme_make_prim_w_arity(make_custodian_box,
00581                                                 "make-custodian-box",
00582                                                 2, 2),
00583                           env);
00584   scheme_add_global_constant("custodian-box-value",
00585                           scheme_make_prim_w_arity(custodian_box_value,
00586                                                 "custodian-box-value",
00587                                                 1, 1),
00588                           env);
00589   scheme_add_global_constant("custodian-box?",
00590                           scheme_make_folding_prim(custodian_box_p,
00591                                                       "custodian-box?",
00592                                                       1, 1, 1),
00593                           env);
00594   scheme_add_global_constant("call-in-nested-thread",
00595                           scheme_make_prim_w_arity(call_as_nested_thread,
00596                                                 "call-in-nested-thread",
00597                                                 1, 2),
00598                           env);
00599 
00600   scheme_add_global_constant("current-namespace", 
00601                           scheme_register_parameter(current_namespace,
00602                                                  "current-namespace",
00603                                                  MZCONFIG_ENV),
00604                           env);
00605 
00606   scheme_add_global_constant("namespace?", 
00607                           scheme_make_prim_w_arity(namespace_p,
00608                                                 "namespace?", 
00609                                                 1, 1), 
00610                           env);
00611 
00612   scheme_add_global_constant("security-guard?", 
00613                           scheme_make_prim_w_arity(security_guard_p,
00614                                                 "security-guard?", 
00615                                                 1, 1), 
00616                           env);
00617   scheme_add_global_constant("make-security-guard", 
00618                           scheme_make_prim_w_arity(make_security_guard,
00619                                                 "make-security-guard", 
00620                                                 3, 4), 
00621                           env);
00622   scheme_add_global_constant("current-security-guard", 
00623                           scheme_register_parameter(current_security_guard,
00624                                                  "current-security-guard",
00625                                                  MZCONFIG_SECURITY_GUARD),
00626                           env);
00627 
00628   scheme_add_global_constant("thread-group?", 
00629                           scheme_make_prim_w_arity(thread_set_p,
00630                                                 "thread-group?", 
00631                                                 1, 1), 
00632                           env);
00633   scheme_add_global_constant("make-thread-group", 
00634                           scheme_make_prim_w_arity(make_thread_set,
00635                                                 "make-thread-group", 
00636                                                 0, 1), 
00637                           env);
00638   scheme_add_global_constant("current-thread-group", 
00639                           scheme_register_parameter(current_thread_set,
00640                                                  "current-thread-group",
00641                                                  MZCONFIG_THREAD_SET),
00642                           env);
00643 
00644   scheme_add_global_constant("parameter?", 
00645                           scheme_make_prim_w_arity(parameter_p,
00646                                                 "parameter?", 
00647                                                 1, 1), 
00648                           env);
00649   scheme_add_global_constant("make-parameter", 
00650                           scheme_make_prim_w_arity(make_parameter,
00651                                                 "make-parameter", 
00652                                                 1, 2), 
00653                           env);
00654   scheme_add_global_constant("make-derived-parameter", 
00655                           scheme_make_prim_w_arity(make_derived_parameter,
00656                                                 "make-derived-parameter", 
00657                                                 3, 3), 
00658                           env);
00659   scheme_add_global_constant("parameter-procedure=?", 
00660                           scheme_make_prim_w_arity(parameter_procedure_eq,
00661                                                 "parameter-procedure=?", 
00662                                                 2, 2), 
00663                           env);
00664   scheme_add_global_constant("parameterization?", 
00665                           scheme_make_prim_w_arity(parameterization_p,
00666                                                 "parameterization?", 
00667                                                 1, 1), 
00668                           env);
00669 
00670   scheme_add_global_constant("thread-cell?", 
00671                           scheme_make_prim_w_arity(thread_cell_p,
00672                                                 "thread-cell?", 
00673                                                 1, 1), 
00674                           env);
00675   scheme_add_global_constant("make-thread-cell", 
00676                           scheme_make_prim_w_arity(make_thread_cell,
00677                                                 "make-thread-cell", 
00678                                                 1, 2), 
00679                           env);
00680   scheme_add_global_constant("thread-cell-ref", 
00681                           scheme_make_prim_w_arity(thread_cell_get,
00682                                                 "thread-cell-ref", 
00683                                                 1, 1), 
00684                           env);
00685   scheme_add_global_constant("thread-cell-set!", 
00686                           scheme_make_prim_w_arity(thread_cell_set,
00687                                                 "thread-cell-set!", 
00688                                                 2, 2), 
00689                           env);
00690   scheme_add_global_constant("current-preserved-thread-cell-values", 
00691                           scheme_make_prim_w_arity(thread_cell_values,
00692                                                 "current-preserved-thread-cell-values", 
00693                                                 0, 1), 
00694                           env);
00695 
00696   
00697   scheme_add_global_constant("make-will-executor", 
00698                           scheme_make_prim_w_arity(make_will_executor,
00699                                                 "make-will-executor", 
00700                                                 0, 0), 
00701                           env);
00702   scheme_add_global_constant("will-executor?", 
00703                           scheme_make_prim_w_arity(will_executor_p,
00704                                                 "will-executor?", 
00705                                                 1, 1), 
00706                           env);
00707   scheme_add_global_constant("will-register", 
00708                           scheme_make_prim_w_arity(register_will,
00709                                                 "will-register", 
00710                                                 3, 3), 
00711                           env);
00712   scheme_add_global_constant("will-try-execute", 
00713                           scheme_make_prim_w_arity(will_executor_try,
00714                                                 "will-try-execute", 
00715                                                 1, 1), 
00716                           env);
00717   scheme_add_global_constant("will-execute", 
00718                           scheme_make_prim_w_arity(will_executor_go,
00719                                                 "will-execute", 
00720                                                 1, 1), 
00721                           env);
00722   
00723   scheme_add_evt_through_sema(scheme_will_executor_type, will_executor_sema, NULL);
00724 
00725 
00726   scheme_add_global_constant("collect-garbage", 
00727                           scheme_make_prim_w_arity(collect_garbage, 
00728                                                 "collect-garbage",
00729                                                 0, 0), 
00730                           env);
00731   scheme_add_global_constant("current-memory-use", 
00732                           scheme_make_prim_w_arity(current_memory_use, 
00733                                                 "current-memory-use",
00734                                                 0, 1),
00735                           env);
00736 
00737   scheme_add_global_constant("custodian-require-memory",
00738                           scheme_make_prim_w_arity(custodian_require_mem,
00739                                                 "custodian-require-memory",
00740                                                 3, 3),
00741                           env);
00742   scheme_add_global_constant("custodian-limit-memory",
00743                           scheme_make_prim_w_arity(custodian_limit_mem,
00744                                                 "custodian-limit-memory",
00745                                                 2, 3),
00746                           env);
00747   scheme_add_global_constant("custodian-memory-accounting-available?",
00748                              scheme_make_prim_w_arity(custodian_can_mem,
00749                                                       "custodian-memory-accounting-available?",
00750                                                 0, 0),
00751                           env);
00752   
00753 
00754   scheme_add_global_constant("evt?", 
00755                           scheme_make_folding_prim(evt_p,
00756                                                 "evt?", 
00757                                                 1, 1, 1), 
00758                           env);
00759   scheme_add_global_constant("sync", 
00760                           scheme_make_prim_w_arity2(sch_sync,
00761                                                  "sync", 
00762                                                  1, -1,
00763                                                  0, -1), 
00764                           env);
00765   scheme_add_global_constant("sync/timeout", 
00766                           scheme_make_prim_w_arity2(sch_sync_timeout,
00767                                                  "sync/timeout", 
00768                                                  2, -1,
00769                                                  0, -1), 
00770                           env);
00771   scheme_add_global_constant("sync/enable-break", 
00772                           scheme_make_prim_w_arity2(sch_sync_enable_break,
00773                                                  "sync/enable-break", 
00774                                                  1, -1,
00775                                                  0, -1),
00776                           env);
00777   scheme_add_global_constant("sync/timeout/enable-break", 
00778                           scheme_make_prim_w_arity2(sch_sync_timeout_enable_break,
00779                                                  "sync/timeout/enable-break", 
00780                                                  2, -1,
00781                                                  0, -1),
00782                           env);
00783   scheme_add_global_constant("choice-evt", 
00784                           scheme_make_prim_w_arity(evts_to_evt,
00785                                                 "choice-evt", 
00786                                                 0, -1), 
00787                           env);
00788                              
00789   scheme_add_global_constant("current-thread-initial-stack-size", 
00790                           scheme_register_parameter(current_thread_initial_stack_size,
00791                                                  "current-thread-initial-stack-size",
00792                                                  MZCONFIG_THREAD_INIT_STACK_SIZE),
00793                           env);
00794 
00795 
00796   REGISTER_SO(namespace_options);
00797 }
00798 
00799 void scheme_init_memtrace(Scheme_Env *env)
00800 {
00801   Scheme_Object *v;
00802   Scheme_Env *newenv;
00803 
00804   v = scheme_intern_symbol("#%memtrace");
00805   newenv = scheme_primitive_module(v, env);
00806     
00807   mtrace_cmark_key = scheme_make_symbol("memory-trace-continuation-mark");
00808   scheme_add_global("memory-trace-continuation-mark", mtrace_cmark_key, 
00809                   newenv);
00810   v = scheme_make_prim_w_arity(new_tracking_fun, 
00811                             "new-memtrace-tracking-function", 1, 1);
00812   scheme_add_global("new-memtrace-tracking-function", v, newenv);
00813   v = scheme_make_prim_w_arity(union_tracking_val, 
00814                             "unioned-memtrace-tracking-value", 1, 1);
00815   scheme_add_global("unioned-memtrace-tracking-value", v, newenv);
00816   scheme_finish_primitive_module(newenv);
00817 }
00818 
00819 void scheme_init_parameterization_readonly_globals()
00820 {
00821   REGISTER_SO(scheme_exn_handler_key);
00822   REGISTER_SO(scheme_parameterization_key);
00823   REGISTER_SO(scheme_break_enabled_key);
00824   scheme_exn_handler_key = scheme_make_symbol("exnh");
00825   scheme_parameterization_key = scheme_make_symbol("paramz");
00826   scheme_break_enabled_key = scheme_make_symbol("break-on?");
00827 }
00828   
00829 void scheme_init_parameterization(Scheme_Env *env)
00830 {
00831   Scheme_Object *v;
00832   Scheme_Env *newenv;
00833 
00834   REGISTER_SO(recycle_cell);
00835   REGISTER_SO(maybe_recycle_cell);
00836 
00837   v = scheme_intern_symbol("#%paramz");
00838   newenv = scheme_primitive_module(v, env);
00839   
00840   scheme_add_global_constant("exception-handler-key", 
00841                           scheme_exn_handler_key,
00842                           newenv);
00843   scheme_add_global_constant("parameterization-key", 
00844                           scheme_parameterization_key,
00845                           newenv);
00846   scheme_add_global_constant("break-enabled-key", 
00847                           scheme_break_enabled_key,
00848                           newenv);
00849 
00850   scheme_add_global_constant("extend-parameterization", 
00851                           scheme_make_prim_w_arity(extend_parameterization,
00852                                                 "extend-parameterization", 
00853                                                 1, -1), 
00854                           newenv);
00855 
00856   scheme_add_global_constant("check-for-break", 
00857                           scheme_make_prim_w_arity(check_break_now,
00858                                                 "check-for-break", 
00859                                                 0, 0), 
00860                           newenv);
00861 
00862 
00863   scheme_finish_primitive_module(newenv);
00864   scheme_protect_primitive_provide(newenv, NULL);
00865 }
00866 
00867 static Scheme_Object *collect_garbage(int c, Scheme_Object *p[])
00868 {
00869   scheme_collect_garbage();
00870 
00871   return scheme_void;
00872 }
00873 
00874 static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[])
00875 {
00876   Scheme_Object *arg = NULL;
00877   long retval = 0;
00878 
00879   if (argc) {
00880     if(SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
00881       arg = args[0];
00882     } else if(SCHEME_PROCP(args[0])) {
00883       arg = args[0];
00884     } else {
00885       scheme_wrong_type("current-memory-use", 
00886                      "custodian or memory-trace-function", 
00887                      0, argc, args);
00888     }
00889   }
00890 
00891 #ifdef MZ_PRECISE_GC
00892   retval = GC_get_memory_use(arg);
00893 #else
00894   retval = GC_get_memory_use();
00895 #endif
00896   
00897   return scheme_make_integer_value(retval);
00898 }
00899 
00900 
00901 /*========================================================================*/
00902 /*                              custodians                                */
00903 /*========================================================================*/
00904 
00905 static void adjust_limit_table(Scheme_Custodian *c)
00906 {
00907   /* If a custodian has a limit and any object or children, then it
00908      must not be collected and merged with its parent. To prevent
00909      collection, we register the custodian in the `limite_custodians'
00910      table. */
00911   if (c->has_limit) {
00912     if (c->elems || CUSTODIAN_FAM(c->children)) {
00913       if (!c->recorded) {
00914         c->recorded = 1;
00915         if (!limited_custodians)
00916           limited_custodians = scheme_make_hash_table(SCHEME_hash_ptr);
00917         scheme_hash_set(limited_custodians, (Scheme_Object *)c, scheme_true);
00918       }
00919     } else if (c->recorded) {
00920       c->recorded = 0;
00921       if (limited_custodians)
00922         scheme_hash_set(limited_custodians, (Scheme_Object *)c, NULL);
00923     }
00924   }
00925 }
00926 
00927 static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[])
00928 {
00929   long lim;
00930   Scheme_Custodian *c1, *c2, *cx;
00931 
00932   if(NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
00933     scheme_wrong_type("custodian-require-memory", "custodian", 0, argc, args);
00934     return NULL;
00935   }
00936 
00937   if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
00938     lim = SCHEME_INT_VAL(args[1]);
00939   } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
00940     lim = 0x3fffffff; /* more memory than we actually have */
00941   } else {
00942     scheme_wrong_type("custodian-require-memory", "positive exact integer", 1, argc, args);
00943     return NULL;
00944   }
00945 
00946   if(NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
00947     scheme_wrong_type("custodian-require-memory", "custodian", 2, argc, args);
00948     return NULL;
00949   }
00950 
00951   c1 = (Scheme_Custodian *)args[0];
00952   c2 = (Scheme_Custodian *)args[2];
00953 
00954   /* Check whether c1 is super to c2: */
00955   if (c1 == c2) {
00956     cx = NULL;
00957   } else {
00958     for (cx = c2; cx && NOT_SAME_OBJ(cx, c1); ) {
00959       cx = CUSTODIAN_FAM(cx->parent);
00960     }
00961   }
00962   if (!cx) {
00963     scheme_raise_exn(MZEXN_FAIL_CONTRACT,
00964                      "custodian-require-memory: second custodian is not a sub-custodian of the first custodian");
00965   }
00966 
00967 #ifdef NEWGC_BTC_ACCOUNT
00968   if (GC_set_account_hook(MZACCT_REQUIRE, c1, lim, c2))
00969     return scheme_void;
00970 #endif
00971 
00972   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
00973                  "custodian-require-memory: not supported");
00974   return NULL; /* doesn't get here */
00975 }
00976 
00977 static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[])
00978 {
00979   long lim;
00980   
00981   if (NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
00982     scheme_wrong_type("custodian-limit-memory", "custodian", 0, argc, args);
00983     return NULL;
00984   }
00985 
00986   if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
00987     lim = SCHEME_INT_VAL(args[1]);
00988   } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
00989     lim = 0x3fffffff; /* more memory than we actually have */
00990   } else {
00991     scheme_wrong_type("custodian-limit-memory", "positive exact integer", 1, argc, args);
00992     return NULL;
00993   }
00994 
00995   if (argc > 2) {
00996     if (NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
00997       scheme_wrong_type("custodian-require-memory", "custodian", 2, argc, args);
00998       return NULL;
00999     }
01000   }
01001 
01002   ((Scheme_Custodian *)args[0])->has_limit = 1;
01003   adjust_limit_table((Scheme_Custodian *)args[0]);
01004   if (argc > 2) {
01005     ((Scheme_Custodian *)args[2])->has_limit = 1;
01006     adjust_limit_table((Scheme_Custodian *)args[2]);
01007   }
01008 
01009 #ifdef NEWGC_BTC_ACCOUNT
01010   if (GC_set_account_hook(MZACCT_LIMIT, args[0], lim, (argc > 2) ? args[2] : args[0]))
01011     return scheme_void;
01012 #endif
01013 
01014   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
01015                  "custodian-limit-memory: not supported");
01016   return NULL; /* doesn't get here */
01017 }
01018 
01019 static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[])
01020 {
01021 #ifdef NEWGC_BTC_ACCOUNT
01022   return scheme_true;
01023 #else
01024   return scheme_false;
01025 #endif
01026 }
01027 
01028 static Scheme_Object *new_tracking_fun(int argc, Scheme_Object *args[])
01029 {
01030   int retval = 0;
01031 
01032 #ifdef MZ_PRECISE_GC
01033   retval = GC_mtrace_new_id(args[0]);
01034 #endif
01035 
01036   return scheme_make_integer(retval);
01037 }
01038 
01039 static Scheme_Object *union_tracking_val(int argc, Scheme_Object *args[])
01040 {
01041   int retval = 0;
01042 
01043 #ifdef MZ_PRECISE_GC
01044   retval = GC_mtrace_union_current_with(SCHEME_INT_VAL(args[0]));
01045 #endif
01046 
01047   return scheme_make_integer(retval);
01048 }
01049 
01050 static void ensure_custodian_space(Scheme_Custodian *m, int k)
01051 {
01052   int i;
01053 
01054   if (m->count + k >= m->alloc) {
01055     Scheme_Object ***naya_boxes;
01056     Scheme_Custodian_Reference **naya_mrefs;
01057     Scheme_Close_Custodian_Client **naya_closers;
01058     void **naya_data;
01059 
01060     m->alloc = (m->alloc ? (2 * m->alloc) : 4);
01061     if (m->alloc < k)
01062       m->alloc += k;
01063     
01064     naya_boxes = MALLOC_N(Scheme_Object**, m->alloc);
01065     naya_closers = MALLOC_N(Scheme_Close_Custodian_Client*, m->alloc);
01066     naya_data = MALLOC_N(void*, m->alloc);
01067     naya_mrefs = MALLOC_N(Scheme_Custodian_Reference*, m->alloc);
01068 
01069     for (i = m->count; i--; ) {
01070       naya_boxes[i] = m->boxes[i];
01071       m->boxes[i] = NULL;
01072       naya_closers[i] = m->closers[i];
01073       m->closers[i] = NULL;
01074       naya_data[i] = m->data[i];
01075       m->data[i] = NULL;
01076       naya_mrefs[i] = m->mrefs[i];
01077       m->mrefs[i] = NULL;
01078     }
01079 
01080     m->boxes = naya_boxes;
01081     m->closers = naya_closers;
01082     m->data = naya_data;
01083     m->mrefs = naya_mrefs;
01084   }
01085 }
01086 
01087 static void add_managed_box(Scheme_Custodian *m, 
01088                          Scheme_Object **box, Scheme_Custodian_Reference *mref,
01089                          Scheme_Close_Custodian_Client *f, void *data)
01090 {
01091   int i;
01092 
01093   for (i = m->count; i--; ) {
01094     if (!m->boxes[i]) {
01095       m->boxes[i] = box;
01096       m->closers[i] = f;
01097       m->data[i] = data;
01098       m->mrefs[i] = mref;
01099 
01100       m->elems++;
01101       adjust_limit_table(m);
01102 
01103       return;
01104     }
01105   }
01106 
01107   ensure_custodian_space(m, 1);
01108 
01109   m->boxes[m->count] = box;
01110   m->closers[m->count] = f;
01111   m->data[m->count] = data;
01112   m->mrefs[m->count] = mref;
01113 
01114   m->elems++;
01115   adjust_limit_table(m);
01116 
01117   m->count++;
01118 }
01119 
01120 static void remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o,
01121                         Scheme_Close_Custodian_Client **old_f, void **old_data)
01122 {
01123   Scheme_Custodian *m;
01124   int i;
01125 
01126   if (!mr)
01127     return;
01128   m = CUSTODIAN_FAM(mr);
01129   if (!m)
01130     return;
01131 
01132   for (i = m->count; i--; ) {
01133     if (m->boxes[i] && SAME_OBJ((xCUSTODIAN_FAM(m->boxes[i])),  o)) {
01134       xCUSTODIAN_FAM(m->boxes[i]) = 0;
01135       m->boxes[i] = NULL;
01136       CUSTODIAN_FAM(m->mrefs[i]) = 0;
01137       m->mrefs[i] = NULL;
01138       if (old_f)
01139        *old_f = m->closers[i];
01140       if (old_data)
01141        *old_data = m->data[i];
01142       m->data[i] = NULL;
01143       --m->elems;
01144       adjust_limit_table(m);
01145       break;
01146     }
01147   }
01148 
01149   while (m->count && !m->boxes[m->count - 1]) {
01150     --m->count;
01151   }
01152 }
01153 
01154 static void adjust_custodian_family(void *mgr, void *skip_move)
01155 {
01156   /* Threads note: because this function is only called as a
01157      finalization callback, it is automatically syncronized by the GC
01158      locks. And it is synchronized against all finalizations, so a
01159      managee can't try to unregister while we're shuffling its
01160      custodian. */
01161   Scheme_Custodian *r = (Scheme_Custodian *)mgr, *parent, *m;
01162   int i;
01163 
01164   parent = CUSTODIAN_FAM(r->parent);
01165 
01166   if (parent) {
01167     /* Remove from parent's list of children: */
01168     if (CUSTODIAN_FAM(parent->children) == r) {
01169       CUSTODIAN_FAM(parent->children) = CUSTODIAN_FAM(r->sibling);
01170     } else {
01171       m = CUSTODIAN_FAM(parent->children);
01172       while (m && CUSTODIAN_FAM(m->sibling) != r) {
01173        m = CUSTODIAN_FAM(m->sibling);
01174       }
01175       if (m)
01176        CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(r->sibling);
01177     }
01178 
01179     /* Remove from global list: */
01180     if (CUSTODIAN_FAM(r->global_next))
01181       CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_next)->global_prev) = CUSTODIAN_FAM(r->global_prev);
01182     else
01183       last_custodian = CUSTODIAN_FAM(r->global_prev);
01184     CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_prev)->global_next) = CUSTODIAN_FAM(r->global_next);
01185     
01186     /* Add children to parent's list: */
01187     for (m = CUSTODIAN_FAM(r->children); m; ) {
01188       Scheme_Custodian *next = CUSTODIAN_FAM(m->sibling);
01189       
01190       CUSTODIAN_FAM(m->parent) = parent;
01191       CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
01192       CUSTODIAN_FAM(parent->children) = m;
01193 
01194       m = next;
01195     }
01196 
01197     adjust_limit_table(parent);
01198 
01199     /* Add remaining managed items to parent: */
01200     if (!skip_move) {
01201       for (i = 0; i < r->count; i++) {
01202        if (r->boxes[i]) {
01203          CUSTODIAN_FAM(r->mrefs[i]) = parent;
01204          add_managed_box(parent, r->boxes[i], r->mrefs[i], r->closers[i], r->data[i]);
01205 #ifdef MZ_PRECISE_GC
01206          {
01207            Scheme_Object *o;
01208            o = xCUSTODIAN_FAM(r->boxes[i]);
01209            if (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type)) {
01210              o = WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
01211              if (o)
01212               GC_register_thread(o, parent);
01213            }
01214          }
01215 #endif
01216        }
01217       }
01218     }
01219   }
01220 
01221   CUSTODIAN_FAM(r->parent) = NULL;
01222   CUSTODIAN_FAM(r->sibling) = NULL;
01223   if (!skip_move)
01224     CUSTODIAN_FAM(r->children) = NULL;
01225   CUSTODIAN_FAM(r->global_prev) = NULL;
01226   CUSTODIAN_FAM(r->global_next) = NULL;
01227 }
01228 
01229 void insert_custodian(Scheme_Custodian *m, Scheme_Custodian *parent)
01230 {
01231   /* insert into parent's list: */
01232   CUSTODIAN_FAM(m->parent) = parent;
01233   if (parent) {
01234     CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
01235     CUSTODIAN_FAM(parent->children) = m;
01236   } else
01237     CUSTODIAN_FAM(m->sibling) = NULL;
01238 
01239   /* Insert into global chain. A custodian is always inserted
01240      directly after its parent, so families stay together, and
01241      the local list stays in the same order as the sibling list. */
01242   if (parent) {
01243     Scheme_Custodian *next;
01244     next = CUSTODIAN_FAM(parent->global_next);
01245     CUSTODIAN_FAM(m->global_next) = next;
01246     CUSTODIAN_FAM(m->global_prev) = parent;
01247     CUSTODIAN_FAM(parent->global_next) = m;
01248     if (next)
01249       CUSTODIAN_FAM(next->global_prev) = m;
01250     else
01251       last_custodian = m;
01252   } else {
01253     CUSTODIAN_FAM(m->global_next) = NULL;
01254     CUSTODIAN_FAM(m->global_prev) = NULL;
01255   }
01256 
01257   if (parent)
01258     adjust_limit_table(parent);
01259 }
01260 
01261 Scheme_Custodian *scheme_make_custodian(Scheme_Custodian *parent) 
01262 {
01263   Scheme_Custodian *m;
01264   Scheme_Custodian_Reference *mw;
01265 
01266   if (!parent)
01267     parent = main_custodian; /* still NULL if we're creating main; that's ok */
01268   
01269   m = MALLOC_ONE_TAGGED(Scheme_Custodian);
01270 
01271   m->so.type = scheme_custodian_type;
01272 
01273   m->alloc = m->count = 0;
01274 
01275   mw = MALLOC_MREF();
01276   m->parent = mw;
01277   mw = MALLOC_MREF();
01278   m->children = mw;
01279   mw = MALLOC_MREF();
01280   m->sibling = mw;
01281   mw = MALLOC_MREF();
01282   m->global_next = mw;
01283   mw = MALLOC_MREF();
01284   m->global_prev = mw;
01285 
01286   CUSTODIAN_FAM(m->children) = NULL;
01287 
01288   insert_custodian(m, parent);
01289 
01290   scheme_add_finalizer(m, adjust_custodian_family, NULL);
01291 
01292   return m;
01293 }
01294 
01295 static void rebox_willdone_object(void *o, void *mr)
01296 {
01297   Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
01298   Scheme_Close_Custodian_Client *f;
01299   void *data;
01300 
01301   /* Still needs management? */
01302   if (m) {
01303 #ifdef MZ_PRECISE_GC
01304     Scheme_Object *b;
01305 #else
01306     Scheme_Object **b;
01307 #endif
01308 
01309     remove_managed(mr, o, &f, &data);
01310 
01311 #ifdef MZ_PRECISE_GC
01312     b = scheme_box(NULL);
01313 #else
01314     b = MALLOC_ONE(Scheme_Object*); /* not atomic this time */
01315 #endif
01316     xCUSTODIAN_FAM(b) = o;
01317     
01318     /* Put the custodian back: */
01319     CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr) = m;
01320 
01321     add_managed_box(m, (Scheme_Object **)b, (Scheme_Custodian_Reference *)mr, f, data);
01322   }
01323 }
01324 
01325 static void managed_object_gone(void *o, void *mr)
01326 {
01327   Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
01328 
01329   /* Still has management? */
01330   if (m)
01331     remove_managed(mr, o, NULL, NULL);
01332 }
01333 
01334 int scheme_custodian_is_available(Scheme_Custodian *m)
01335 {
01336   if (m->shut_down)
01337     return 0;
01338   return 1;
01339 }
01340 
01341 void scheme_custodian_check_available(Scheme_Custodian *m, const char *who, const char *what)
01342 {
01343   if (!m)
01344     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
01345   
01346   if (!scheme_custodian_is_available(m))
01347     scheme_arg_mismatch(who, "the custodian has been shut down: ",
01348                         (Scheme_Object *)m);
01349 }
01350 
01351 Scheme_Custodian_Reference *scheme_add_managed(Scheme_Custodian *m, Scheme_Object *o, 
01352                                           Scheme_Close_Custodian_Client *f, void *data, 
01353                                           int must_close)
01354 {
01355 #ifdef MZ_PRECISE_GC
01356     Scheme_Object *b;
01357 #else
01358     Scheme_Object **b;
01359 #endif
01360   Scheme_Custodian_Reference *mr;
01361 
01362   if (!m)
01363     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
01364   
01365   if (m->shut_down) {
01366     /* The custodian was shut down in the time that it took
01367        to allocate o. This situation should be avoided if at
01368        all possible, but here's the fail-safe. */
01369     if (f)
01370       f(o, data);
01371     return NULL;
01372   }
01373 
01374 #ifdef MZ_PRECISE_GC
01375   b = scheme_make_weak_box(NULL);
01376 #else
01377   b = MALLOC_ONE_WEAK(Scheme_Object*);
01378 #endif
01379   xCUSTODIAN_FAM(b) = o;
01380 
01381   mr = MALLOC_MREF();
01382 
01383   CUSTODIAN_FAM(mr) = m;
01384 
01385   /* The atomic link via the box `b' allows the execution of wills for
01386      o. After this, we should either drop the object or we have to
01387      hold on to the object strongly (for when custodian-close-all is
01388      called). */
01389   if (must_close)
01390     scheme_add_finalizer(o, rebox_willdone_object, mr);
01391   else
01392     scheme_add_finalizer(o, managed_object_gone, mr);
01393 
01394   add_managed_box(m, (Scheme_Object **)b, mr, f, data);
01395 
01396   return mr;
01397 }
01398 
01399 void scheme_remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o)
01400 {
01401   /* Is this a good idea? I'm not sure: */
01402   scheme_subtract_finalizer(o, managed_object_gone, mr);
01403   scheme_subtract_finalizer(o, rebox_willdone_object, mr);
01404 
01405   remove_managed(mr, o, NULL, NULL);
01406 }
01407 
01408 Scheme_Thread *scheme_do_close_managed(Scheme_Custodian *m, Scheme_Exit_Closer_Func cf)
01409 {
01410   Scheme_Thread *kill_self = NULL;
01411   Scheme_Custodian *c, *start, *next_m;
01412   int i, is_thread;
01413   Scheme_Thread *the_thread;
01414   Scheme_Object *o;
01415   Scheme_Close_Custodian_Client *f;
01416   void *data;
01417 
01418   if (!m)
01419     m = main_custodian;
01420 
01421   if (m->shut_down)
01422     return NULL;
01423 
01424   m->shut_down = 1;
01425 
01426   /* Need to kill children first, transitively, so find
01427      last decendent. The family will be the global-list from
01428      m to this last decendent, inclusive. */
01429   for (c = m; CUSTODIAN_FAM(c->children); ) {
01430     for (c = CUSTODIAN_FAM(c->children); CUSTODIAN_FAM(c->sibling); ) {
01431       c = CUSTODIAN_FAM(c->sibling);
01432     }
01433   }
01434 
01435   start = m;
01436   m = c;
01437   while (1) {
01438     /* It matters that this loop starts at the top. See
01439        the m->count = i assignment below. */
01440     for (i = m->count; i--; ) {
01441       if (m->boxes[i]) {
01442 
01443        o = xCUSTODIAN_FAM(m->boxes[i]);
01444 
01445        f = m->closers[i];
01446        data = m->data[i];
01447 
01448        if (!cf && (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type))) {
01449          /* We've added an indirection and made it weak. See mr_hop note above. */
01450          is_thread = 1;
01451          the_thread = (Scheme_Thread *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
01452        } else {
01453          is_thread = 0;
01454          the_thread = NULL;
01455        }
01456 
01457        xCUSTODIAN_FAM(m->boxes[i]) = NULL;
01458        CUSTODIAN_FAM(m->mrefs[i]) = NULL;
01459        
01460        /* Set m->count to i in case a GC happens while
01461           the closer is running. If there's a GC, then
01462           for_each_managed will be called. */
01463        m->count = i;
01464 
01465        if (is_thread && !the_thread) {
01466          /* Thread is already collected, so skip */
01467        } else if (cf) {
01468          cf(o, f, data);
01469        } else {
01470          if (is_thread) {
01471            if (the_thread) {
01472              /* Only kill the thread if it has no other custodians */
01473              if (SCHEME_NULLP(the_thread->extra_mrefs)) {
01474               if (do_kill_thread(the_thread))
01475                 kill_self = the_thread;
01476              } else {
01477               Scheme_Custodian_Reference *mref;
01478 
01479               mref = m->mrefs[i];
01480               if (mref == the_thread->mref) {
01481                 /* Designate a new main custodian for the thread */
01482                 mref = (Scheme_Custodian_Reference *)SCHEME_CAR(the_thread->extra_mrefs);
01483                 the_thread->mref = mref;
01484                 the_thread->extra_mrefs = SCHEME_CDR(the_thread->extra_mrefs);
01485 #ifdef MZ_PRECISE_GC
01486                 GC_register_thread(the_thread, CUSTODIAN_FAM(mref));
01487 #endif
01488               } else {
01489                 /* Just remove mref from the list of extras */
01490                 Scheme_Object *l, *prev = NULL;
01491                 for (l = the_thread->extra_mrefs; 1; l = SCHEME_CDR(l)) {
01492                   if (SAME_OBJ(SCHEME_CAR(l), (Scheme_Object *)mref)) {
01493                     if (prev)
01494                      SCHEME_CDR(prev) = SCHEME_CDR(l);
01495                     else
01496                      the_thread->extra_mrefs = SCHEME_CDR(l);
01497                     break;
01498                   }
01499                   prev = l;
01500                 }
01501               }
01502              }
01503            }
01504          } else {
01505            f(o, data);
01506          }
01507        }
01508       }
01509     }
01510 
01511 #ifdef MZ_PRECISE_GC
01512     {
01513       Scheme_Object *pr = m->cust_boxes, *wb;
01514       Scheme_Custodian_Box *cb;
01515       while (pr) {
01516         wb = SCHEME_CAR(pr);
01517         cb = (Scheme_Custodian_Box *)SCHEME_BOX_VAL(wb);
01518         if (cb) cb->v = NULL;
01519         pr = SCHEME_CDR(pr);
01520       }
01521       m->cust_boxes = NULL;
01522     }
01523 #endif
01524 
01525     m->count = 0;
01526     m->alloc = 0;
01527     m->elems = 0;
01528     m->boxes = NULL;
01529     m->closers = NULL;
01530     m->data = NULL;
01531     m->mrefs = NULL;
01532     m->shut_down = 1;
01533     
01534     if (SAME_OBJ(m, start))
01535       break;
01536     next_m = CUSTODIAN_FAM(m->global_prev);
01537 
01538     /* Remove this custodian from its parent */
01539     adjust_custodian_family(m, m);
01540 
01541     adjust_limit_table(m);
01542     
01543     m = next_m;
01544   }
01545 
01546   return kill_self;
01547 }
01548 
01549 #ifdef MZ_XFORM
01550 START_XFORM_SKIP;
01551 #endif
01552 
01553 typedef void (*Scheme_For_Each_Func)(Scheme_Object *);
01554 
01555 static void for_each_managed(Scheme_Type type, Scheme_For_Each_Func cf)
01556      /* This function must not allocate. */
01557 {
01558   Scheme_Custodian *m;
01559   int i;
01560 
01561   if (SAME_TYPE(type, scheme_thread_type))
01562     type = scheme_thread_hop_type;
01563 
01564   /* back to front so children are first: */
01565   m = last_custodian;
01566 
01567   while (m) {
01568     for (i = m->count; i--; ) {
01569       if (m->boxes[i]) {
01570        Scheme_Object *o;
01571 
01572        o = xCUSTODIAN_FAM(m->boxes[i]);
01573       
01574        if (SAME_TYPE(SCHEME_TYPE(o), type)) {
01575          if (SAME_TYPE(type, scheme_thread_hop_type)) {
01576            /* We've added an indirection and made it weak. See mr_hop note above. */
01577            Scheme_Thread *t;
01578            t = (Scheme_Thread *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
01579            if (!t) {
01580              /* The thread is already collected */
01581              continue;
01582            } else if (SAME_OBJ(t->mref, m->mrefs[i]))
01583              o = (Scheme_Object *)t;
01584            else {
01585              /* The main custodian for this thread is someone else */
01586              continue;
01587            }
01588          }
01589 
01590          cf(o);
01591        }
01592       }
01593     }
01594 
01595     m = CUSTODIAN_FAM(m->global_prev);
01596   }
01597 }
01598 
01599 #ifdef MZ_XFORM
01600 END_XFORM_SKIP;
01601 #endif
01602 
01603 void scheme_close_managed(Scheme_Custodian *m)
01604 /* The trick is that we may need to kill the thread
01605    that is running us. If so, delay it to the very
01606    end. */
01607 {
01608   if (scheme_do_close_managed(m, NULL)) {
01609     /* Kill/suspend self */
01610     if (scheme_current_thread->suspend_to_kill)
01611       suspend_thread(scheme_current_thread);
01612     else
01613       scheme_thread_block(0.0);
01614   }
01615 
01616   /* Give killed threads time to die: */
01617   scheme_thread_block(0);
01618   scheme_current_thread->ran_some = 1;
01619 }
01620 
01621 static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[])
01622 {
01623   Scheme_Custodian *m;
01624 
01625   if (argc) {
01626     if (!SCHEME_CUSTODIANP(argv[0]))
01627       scheme_wrong_type("make-custodian", "custodian", 0, argc, argv);
01628     m = (Scheme_Custodian *)argv[0];
01629   } else
01630     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
01631 
01632   if (m->shut_down)
01633     scheme_arg_mismatch("make-custodian", 
01634                      "the custodian has been shut down: ", 
01635                      (Scheme_Object *)m);
01636 
01637   return (Scheme_Object *)scheme_make_custodian(m);
01638 }
01639 
01640 static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[])
01641 {
01642   return SCHEME_CUSTODIANP(argv[0]) ? scheme_true : scheme_false;
01643 }
01644 
01645 static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[])
01646 {
01647   if (!SCHEME_CUSTODIANP(argv[0]))
01648     scheme_wrong_type("custodian-shutdown-all", "custodian", 0, argc, argv);
01649 
01650   scheme_close_managed((Scheme_Custodian *)argv[0]);
01651 
01652   return scheme_void;
01653 }
01654 
01655 
01656 static Scheme_Object *extract_thread(Scheme_Object *o)
01657 {
01658   return (Scheme_Object *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
01659 }
01660 
01661 void scheme_add_custodian_extractor(Scheme_Type t, Scheme_Custodian_Extractor e)
01662 {
01663   if (!extractors) {
01664     int n;
01665     n = scheme_num_types();
01666     REGISTER_SO(extractors);
01667     extractors = MALLOC_N_ATOMIC(Scheme_Custodian_Extractor, n);
01668     memset(extractors, 0, sizeof(Scheme_Custodian_Extractor) * n);
01669     extractors[scheme_thread_hop_type] = extract_thread;
01670   }
01671 
01672   if (t) {
01673     extractors[t] = e;
01674   }
01675 }
01676 
01677 static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[])
01678 {
01679   Scheme_Custodian *m, *m2, *c;
01680   Scheme_Object **hold, *o;
01681   int i, j, cnt, kids;
01682   Scheme_Type type;
01683   Scheme_Custodian_Extractor ex;
01684 
01685   if (!SCHEME_CUSTODIANP(argv[0]))
01686     scheme_wrong_type("custodian-managed-list", "custodian", 0, argc, argv);
01687   if (!SCHEME_CUSTODIANP(argv[1]))
01688     scheme_wrong_type("custodian-managed-list", "custodian", 1, argc, argv);
01689 
01690   m = (Scheme_Custodian *)argv[0];
01691   m2 = (Scheme_Custodian *)argv[1];
01692 
01693   /* Check that the second manages the first: */
01694   c = CUSTODIAN_FAM(m->parent);
01695   while (c && NOT_SAME_OBJ(m2, c)) {
01696     c = CUSTODIAN_FAM(c->parent);
01697   }
01698   if (!c) {
01699     scheme_arg_mismatch("custodian-managed-list",
01700                      "the second custodian does not "
01701                      "manage the first custodian: ",
01702                      argv[0]);
01703   }
01704 
01705   /* Init extractors: */
01706   scheme_add_custodian_extractor(0, NULL);
01707 
01708   /* Count children: */
01709   kids = 0;
01710   for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
01711     kids++;
01712   }
01713 
01714   /* Do all allocation first, since custodian links are weak.
01715      Furthermore, allocation may trigger collection of an otherwise
01716      unreferenced custodian, folding its items into this one,
01717      so loop until we've allocated enough. */
01718   do {
01719     cnt = m->count;
01720     hold = MALLOC_N(Scheme_Object *, cnt + kids);
01721   } while (cnt < m->count);
01722   
01723   /* Put managed items into hold array: */
01724   for (i = m->count, j = 0; i--; ) {
01725     if (m->boxes[i]) {
01726       o = xCUSTODIAN_FAM(m->boxes[i]);
01727       
01728       type = SCHEME_TYPE(o);
01729       ex = extractors[type];
01730       if (ex) {
01731        o = ex(o);
01732       }
01733 
01734       if (o) {
01735        hold[j] = o;
01736        j++;
01737       }
01738     }
01739   }
01740   /* Add kids: */
01741   for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
01742     hold[j] = (Scheme_Object *)c;
01743     j++;
01744   }
01745 
01746   /* Convert the array to a list: */
01747   return scheme_build_list(j, hold);
01748 }
01749 
01750 static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[])
01751 {
01752   return scheme_param_config("current-custodian", 
01753                           scheme_make_integer(MZCONFIG_CUSTODIAN),
01754                           argc, argv,
01755                           -1, custodian_p, "custodian", 0);
01756 }
01757 
01758 static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[])
01759 {
01760   Scheme_Custodian_Box *cb;
01761 
01762   if (!SCHEME_CUSTODIANP(argv[0]))
01763     scheme_wrong_type("make-custodian-box", "custodian", 0, argc, argv);
01764 
01765   cb = MALLOC_ONE_TAGGED(Scheme_Custodian_Box);
01766   cb->so.type = scheme_cust_box_type;
01767   cb->cust = (Scheme_Custodian *)argv[0];
01768   cb->v = argv[1];
01769 
01770 #ifdef MZ_PRECISE_GC
01771   /* 3m  */
01772   {
01773     Scheme_Object *wb, *pr, *prev;
01774     wb = GC_malloc_weak_box(cb, NULL, 0);
01775     pr = scheme_make_raw_pair(wb, cb->cust->cust_boxes);
01776     cb->cust->cust_boxes = pr;
01777     cb->cust->num_cust_boxes++;
01778     
01779     /* The GC prunes the list of custodian boxes in accounting mode,
01780        but prune here in case accounting is never triggered. */
01781     if (cb->cust->num_cust_boxes > 2 * cb->cust->checked_cust_boxes) {
01782       prev = pr;
01783       pr = SCHEME_CDR(pr);
01784       while (pr) {
01785         wb = SCHEME_CAR(pr);
01786         if (!SCHEME_BOX_VAL(pr)) {
01787           SCHEME_CDR(prev) = SCHEME_CDR(pr);
01788           --cb->cust->num_cust_boxes;
01789         } else {
01790           prev = pr;
01791         }
01792         pr = SCHEME_CDR(pr);
01793       } 
01794       cb->cust->checked_cust_boxes = cb->cust->num_cust_boxes;
01795     }
01796   }
01797 #else
01798   /* CGC */
01799   if (cust_box_count >= cust_box_alloc) {
01800     Scheme_Custodian_Box **cbs;
01801     if (!cust_box_alloc) {
01802       cust_box_alloc = 16;
01803       REGISTER_SO(cust_boxes);
01804     } else {
01805       cust_box_alloc = 2 * cust_box_alloc;
01806     }
01807     cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
01808     memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
01809     cust_boxes = cbs;
01810   }
01811   cust_boxes[cust_box_count++] = cb;
01812 #endif
01813 
01814   return (Scheme_Object *)cb;
01815 }
01816 
01817 static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[])
01818 {
01819   Scheme_Custodian_Box *cb;
01820 
01821   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
01822     scheme_wrong_type("custodian-box-value", "custodian-box", 0, argc, argv);
01823 
01824   cb = (Scheme_Custodian_Box *)argv[0];
01825   if (cb->cust->shut_down)
01826     return scheme_false;
01827 
01828   return cb->v;
01829 }
01830 
01831 static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[])
01832 {
01833   if (SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
01834     return scheme_true;
01835   else
01836     return scheme_false;
01837 }
01838 
01839 #ifndef MZ_PRECISE_GC
01840 void scheme_clean_cust_box_list(void)
01841 {
01842   int src = 0, dest = 0;
01843   Scheme_Custodian_Box *cb;
01844   void *b;
01845 
01846   while (src < cust_box_count) {
01847     cb = cust_boxes[src];
01848     b = GC_base(cb);
01849     if (b 
01850 #ifndef USE_SENORA_GC
01851         && GC_is_marked(b)
01852 #endif
01853         ) {
01854       cust_boxes[dest++] = cb;
01855       if (cb->v) {
01856         if (cb->cust->shut_down) {
01857           cb->v = NULL;
01858         }
01859       }
01860     }
01861     src++;
01862   }
01863   cust_box_count = dest;
01864 }
01865 
01866 static void shrink_cust_box_array(void)
01867 {
01868   /* Call this function periodically to clean up. */
01869   if (cust_box_alloc > 128 && (cust_box_count * 4 < cust_box_alloc)) {
01870     Scheme_Custodian_Box **cbs;
01871     cust_box_alloc = cust_box_count * 2;
01872     cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
01873     memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
01874     cust_boxes = cbs;
01875   }
01876 }
01877 #else
01878 # define shrink_cust_box_array() /* empty */
01879 # define clean_cust_box_list()   /* empty */
01880 #endif
01881 
01882 static void run_closers(Scheme_Object *o, Scheme_Close_Custodian_Client *f, void *data)
01883 {
01884   Scheme_Object *l;
01885 
01886   for (l = closers; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
01887     Scheme_Exit_Closer_Func cf;
01888     cf = (Scheme_Exit_Closer_Func)SCHEME_CAR(l);
01889     cf(o, f, data);
01890   }
01891 }
01892 
01893 static void run_atexit_closers(void)
01894 {
01895   mz_jmp_buf newbuf, *savebuf;
01896 
01897   /* scheme_start_atomic(); */
01898   /* Atomic would be needed if this was run to implement
01899      a custodian shutdown, but an actual custodian shutdown
01900      will have terminated everything else anyway. For a
01901      polite exit, other threads can run. */
01902 
01903   savebuf = scheme_current_thread->error_buf;
01904   scheme_current_thread->error_buf = &newbuf;
01905   if (!scheme_setjmp(newbuf)) {  
01906     scheme_do_close_managed(NULL, run_closers);
01907   }
01908   scheme_current_thread->error_buf = savebuf;
01909 }
01910 
01911 void scheme_add_atexit_closer(Scheme_Exit_Closer_Func f)
01912 {
01913   if (!closers) {
01914 #ifdef USE_ON_EXIT_FOR_ATEXIT
01915     on_exit(run_atexit_closers, NULL);
01916 #else
01917     atexit(run_atexit_closers);
01918 #endif
01919 
01920     REGISTER_SO(closers);
01921     closers = scheme_null;
01922   }
01923 
01924   closers = scheme_make_raw_pair((Scheme_Object *)f, closers);
01925 }
01926 
01927 void scheme_schedule_custodian_close(Scheme_Custodian *c)
01928 {
01929   /* This procedure might be called by a garbage collector to register
01930      a resource-based kill. */
01931 
01932   if (!scheduled_kills) {
01933     REGISTER_SO(scheduled_kills);
01934     scheduled_kills = scheme_null;
01935   }
01936 
01937   scheduled_kills = scheme_make_pair((Scheme_Object *)c, scheduled_kills);
01938   scheme_fuel_counter = 0;
01939   scheme_jit_stack_boundary = (unsigned long)-1;
01940 }
01941 
01942 static void check_scheduled_kills()
01943 {
01944   while (scheduled_kills && !SCHEME_NULLP(scheduled_kills)) {
01945     Scheme_Object *k;
01946     k = SCHEME_CAR(scheduled_kills);
01947     scheduled_kills = SCHEME_CDR(scheduled_kills);
01948     scheme_close_managed((Scheme_Custodian *)k);
01949   }
01950 }
01951 
01952 static void check_current_custodian_allows(const char *who, Scheme_Thread *p)
01953 {
01954   Scheme_Object *l;
01955   Scheme_Custodian_Reference *mref;
01956   Scheme_Custodian *m, *current;
01957 
01958   /* Check management of the thread: */
01959   current = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
01960 
01961   for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
01962     mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
01963     m = CUSTODIAN_FAM(mref);
01964     while (NOT_SAME_OBJ(m, current)) {
01965       m = CUSTODIAN_FAM(m->parent);
01966       if (!m)
01967        goto bad;
01968     }
01969   }
01970 
01971   mref = p->mref;
01972   if (!mref)
01973     return;
01974   m = CUSTODIAN_FAM(mref);
01975   if (!m)
01976     return;
01977 
01978   while (NOT_SAME_OBJ(m, current)) {
01979     m = CUSTODIAN_FAM(m->parent);
01980     if (!m)
01981       goto bad;
01982   }
01983 
01984   return;
01985 
01986  bad:
01987   scheme_arg_mismatch(who,
01988                     "the current custodian does not "
01989                     "solely manage the specified thread: ",
01990                     (Scheme_Object *)p);  
01991 }
01992 
01993 void scheme_free_all(void)
01994 {
01995   scheme_do_close_managed(NULL, NULL);
01996   scheme_free_dynamic_extensions();
01997 #ifdef MZ_PRECISE_GC
01998   GC_free_all();
01999 #endif
02000 }
02001 
02002 /*========================================================================*/
02003 /*                             thread sets                                */
02004 /*========================================================================*/
02005 
02006 #define TSET_IL MZ_INLINE
02007 
02008 static Scheme_Thread_Set *create_thread_set(Scheme_Thread_Set *parent)
02009 {
02010   Scheme_Thread_Set *t_set;
02011 
02012   t_set = MALLOC_ONE_TAGGED(Scheme_Thread_Set);
02013   t_set->so.type = scheme_thread_set_type;
02014 
02015   t_set->parent = parent;
02016 
02017   /* Everything in t_set is zeroed */
02018 
02019   return t_set;
02020 }
02021 
02022 static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[])
02023 {
02024   Scheme_Thread_Set *parent;
02025 
02026   if (argc) {
02027     if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type)))
02028       scheme_wrong_type("make-thread-group", "thread-group", 0, argc, argv);
02029     parent = (Scheme_Thread_Set *)argv[0];
02030   } else
02031     parent = (Scheme_Thread_Set *)scheme_get_param(scheme_current_config(), MZCONFIG_THREAD_SET);
02032 
02033   return (Scheme_Object *)create_thread_set(parent);
02034 }
02035 
02036 static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[])
02037 {
02038   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type)) 
02039          ? scheme_true 
02040          : scheme_false);
02041 }
02042 
02043 static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[])
02044 {
02045   return scheme_param_config("current-thread-group", 
02046                           scheme_make_integer(MZCONFIG_THREAD_SET),
02047                           argc, argv,
02048                           -1, thread_set_p, "thread-group", 0);
02049 }
02050 
02051 static TSET_IL void set_t_set_next(Scheme_Object *o, Scheme_Object *n)
02052 {
02053   if (SCHEME_THREADP(o))
02054     ((Scheme_Thread *)o)->t_set_next = n;
02055   else
02056     ((Scheme_Thread_Set *)o)->next = n;
02057 }
02058 
02059 static TSET_IL void set_t_set_prev(Scheme_Object *o, Scheme_Object *n)
02060 {
02061   if (SCHEME_THREADP(o))
02062     ((Scheme_Thread *)o)->t_set_prev = n;
02063   else
02064     ((Scheme_Thread_Set *)o)->prev = n;
02065 }
02066 
02067 static TSET_IL Scheme_Object *get_t_set_next(Scheme_Object *o)
02068 {
02069   if (SCHEME_THREADP(o))
02070     return ((Scheme_Thread *)o)->t_set_next;
02071   else
02072     return ((Scheme_Thread_Set *)o)->next;
02073 }
02074 
02075 static TSET_IL Scheme_Object *get_t_set_prev(Scheme_Object *o)
02076 {
02077   if (SCHEME_THREADP(o))
02078     return ((Scheme_Thread *)o)->t_set_prev;
02079   else
02080     return ((Scheme_Thread_Set *)o)->prev;
02081 }
02082 
02083 static void schedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
02084 {
02085   num_running_threads += 1;
02086 
02087   while (1) {
02088     set_t_set_next(s, t_set->first);
02089     if (t_set->first)
02090       set_t_set_prev(t_set->first, s);
02091     t_set->first = s;
02092     if (t_set->current)
02093       break;
02094 
02095     t_set->current = s;
02096 
02097     s = (Scheme_Object *)t_set;
02098     t_set = t_set->parent;
02099   }
02100 }
02101 
02102 static void unschedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
02103 {
02104   Scheme_Object *prev;
02105   Scheme_Object *next;
02106 
02107   num_running_threads -= 1;
02108 
02109   while (1) {
02110     prev = get_t_set_prev(s);
02111     next = get_t_set_next(s);
02112 
02113     if (!prev)
02114       t_set->first = next;
02115     else
02116       set_t_set_next(prev, next);
02117     if (next)
02118       set_t_set_prev(next, prev);
02119     set_t_set_prev(s, NULL);
02120     set_t_set_next(s, NULL);
02121     
02122     if (t_set->current == s) {
02123       if (next) {
02124        t_set->current = next;
02125       } else {
02126        t_set->current = t_set->first;
02127       }
02128     }
02129     
02130     if (t_set->current)
02131       break;
02132     
02133     s = (Scheme_Object *)t_set;
02134     t_set = t_set->parent;
02135   }
02136 }
02137 
02138 /*========================================================================*/
02139 /*                      thread record creation                            */
02140 /*========================================================================*/
02141 
02142 static Scheme_Thread *make_thread(Scheme_Config *config, 
02143                               Scheme_Thread_Cell_Table *cells,
02144                               Scheme_Object *init_break_cell,
02145                               Scheme_Custodian *mgr,
02146           void *stack_base)
02147 {
02148   Scheme_Thread *process;
02149   int prefix = 0;
02150 
02151   process = MALLOC_ONE_TAGGED(Scheme_Thread);
02152 
02153   process->so.type = scheme_thread_type;
02154 
02155   if (!scheme_main_thread) {
02156     /* Creating the first thread... */
02157 #ifdef MZ_PRECISE_GC
02158     register_traversers();
02159 #endif
02160     REGISTER_SO(scheme_current_thread);
02161     REGISTER_SO(scheme_main_thread);
02162     REGISTER_SO(scheme_first_thread);
02163     REGISTER_SO(thread_swap_callbacks);
02164     REGISTER_SO(thread_swap_out_callbacks);
02165     REGISTER_SO(swap_target);
02166 
02167     scheme_current_thread = process;
02168     scheme_first_thread = scheme_main_thread = process;
02169     process->prev = NULL;
02170     process->next = NULL;
02171 
02172     process->suspend_break = 1; /* until start-up finished */
02173 
02174     process->error_buf = &main_init_error_buf;
02175 
02176     thread_swap_callbacks = scheme_null;
02177     thread_swap_out_callbacks = scheme_null;
02178 
02179     GC_set_collect_start_callback(get_ready_for_GC);
02180     GC_set_collect_end_callback(done_with_GC);
02181 #ifdef MZ_PRECISE_GC
02182     GC_set_collect_inform_callback(inform_GC);
02183 #endif
02184 
02185 #ifdef LINK_EXTENSIONS_BY_TABLE
02186     scheme_current_thread_ptr = &scheme_current_thread;
02187     scheme_fuel_counter_ptr = &scheme_fuel_counter;
02188 #endif
02189     
02190 #if defined(MZ_PRECISE_GC)
02191     GC_set_get_thread_stack_base(scheme_get_current_thread_stack_start);
02192 #endif
02193     process->stack_start = stack_base;
02194 
02195   } else {
02196     prefix = 1;
02197   }
02198 
02199   process->engine_weight = 10000;
02200 
02201   process->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
02202   process->cont_mark_stack = 0;
02203   process->cont_mark_stack_segments = NULL;
02204   process->cont_mark_seg_count = 0;
02205 
02206   if (!config) {
02207     make_initial_config(process);
02208     config = process->init_config;
02209   } else {
02210     process->init_config = config;
02211     process->cell_values = cells;
02212   }
02213 
02214   if (init_break_cell) {
02215     process->init_break_cell = init_break_cell;
02216   } else {
02217     Scheme_Object *v;
02218     v = scheme_make_thread_cell(scheme_false, 1);
02219     process->init_break_cell = v;
02220   }
02221 
02222   if (!mgr)
02223     mgr = (Scheme_Custodian *)scheme_get_param(config, MZCONFIG_CUSTODIAN);
02224 
02225 #ifdef MZ_PRECISE_GC
02226   GC_register_new_thread(process, mgr);
02227 #endif
02228 
02229   {
02230     Scheme_Object *t_set;
02231     t_set = scheme_get_param(config, MZCONFIG_THREAD_SET);
02232     process->t_set_parent = (Scheme_Thread_Set *)t_set;
02233   }
02234   
02235   if (SAME_OBJ(process, scheme_first_thread)) {
02236     REGISTER_SO(scheme_thread_set_top);
02237     scheme_thread_set_top = process->t_set_parent;
02238     scheme_thread_set_top->first = (Scheme_Object *)process;
02239     scheme_thread_set_top->current = (Scheme_Object *)process;
02240   } else
02241     schedule_in_set((Scheme_Object *)process, process->t_set_parent);
02242     
02243   scheme_init_jmpup_buf(&process->jmpup_buf);
02244 
02245   process->running = MZTHREAD_RUNNING;
02246 
02247   process->dw = NULL;
02248 
02249   process->block_descriptor = NOT_BLOCKED;
02250   process->block_check = NULL;
02251   process->block_needs_wakeup = NULL;
02252   process->sleep_end = 0;
02253 
02254   process->current_local_env = NULL;
02255 
02256   process->external_break = 0;
02257 
02258   process->ran_some = 1;
02259 
02260   process->list_stack = NULL;
02261 
02262   scheme_gmp_tls_init(process->gmp_tls);
02263   
02264   if (prefix) {
02265     process->next = scheme_first_thread;
02266     process->prev = NULL;
02267     process->next->prev = process;
02268     scheme_first_thread = process;
02269   }
02270 
02271   {
02272     Scheme_Object **tb;
02273     tb = MALLOC_N(Scheme_Object *, buffer_init_size);
02274     process->tail_buffer = tb;
02275   }
02276   process->tail_buffer_size = buffer_init_size;
02277  
02278   {
02279     int init_stack_size;
02280     Scheme_Object *iss;
02281 
02282     iss = scheme_get_thread_param(config, cells, MZCONFIG_THREAD_INIT_STACK_SIZE);
02283     if (SCHEME_INTP(iss))
02284       init_stack_size = SCHEME_INT_VAL(iss);
02285     else if (SCHEME_BIGNUMP(iss))
02286       init_stack_size = 0x7FFFFFFF;
02287     else
02288       init_stack_size = DEFAULT_INIT_STACK_SIZE;
02289     
02290     /* A too-large stack size won't help performance.
02291        A too-small stack size is unsafe for certain kinds of
02292        tail calls. */
02293     if (init_stack_size > MAX_INIT_STACK_SIZE)
02294       init_stack_size = MAX_INIT_STACK_SIZE;
02295     if (init_stack_size < SCHEME_TAIL_COPY_THRESHOLD)
02296       init_stack_size = SCHEME_TAIL_COPY_THRESHOLD;
02297 
02298     process->runstack_size = init_stack_size;
02299     {
02300       Scheme_Object **sa;
02301       sa = scheme_alloc_runstack(init_stack_size);
02302       process->runstack_start = sa;
02303     }
02304     process->runstack = process->runstack_start + init_stack_size;
02305   }
02306   
02307   process->runstack_saved = NULL;
02308 
02309 #ifdef RUNSTACK_IS_GLOBAL
02310   if (!prefix) {
02311 # ifndef MZ_PRECISE_GC
02312     /* Precise GC: we intentionally don't register MZ_RUNSTACK. See done_with_GC() */
02313     REGISTER_SO(MZ_RUNSTACK);
02314 # endif
02315     REGISTER_SO(MZ_RUNSTACK_START);
02316 
02317     MZ_RUNSTACK = process->runstack;
02318     MZ_RUNSTACK_START = process->runstack_start;
02319     MZ_CONT_MARK_STACK = process->cont_mark_stack;
02320     MZ_CONT_MARK_POS = process->cont_mark_pos;
02321   }
02322 #endif
02323 
02324   process->on_kill = NULL;
02325 
02326   process->user_tls = NULL;
02327   process->user_tls_size = 0;
02328   
02329   process->nester = process->nestee = NULL;
02330 
02331   process->mbox_first = NULL;
02332   process->mbox_last = NULL;
02333   process->mbox_sema = NULL;
02334 
02335   process->mref = NULL;
02336   process->extra_mrefs = NULL;
02337 
02338     
02339 
02340   /* A thread points to a lot of stuff, so it's bad to put a finalization
02341      on it, which is what registering with a custodian does. Instead, we
02342      register a weak indirection with the custodian. That way, the thread
02343      (and anything it points to) can be collected one GC cycle earlier. 
02344 
02345      It's possible that the thread will be collected before the indirection
02346      record, so when we use the indirection (e.g., in custodian traversals),
02347      we'll need to check for NULL. */
02348   {
02349     Scheme_Thread_Custodian_Hop *hop;
02350     Scheme_Custodian_Reference *mref;
02351     hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
02352     process->mr_hop = hop;
02353     hop->so.type = scheme_thread_hop_type;
02354     {
02355       Scheme_Thread *wp;
02356       wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)process);
02357       hop->p = wp;
02358     }
02359 
02360     mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
02361     process->mref = mref;
02362     process->extra_mrefs = scheme_null;
02363 
02364 #ifndef MZ_PRECISE_GC
02365     scheme_weak_reference((void **)(void *)&hop->p);
02366 #endif
02367   }
02368 
02369   return process;
02370 }
02371 
02372 Scheme_Thread *scheme_make_thread(void *stack_base)
02373 {
02374   /* Makes the initial process. */
02375   return make_thread(NULL, NULL, NULL, NULL, stack_base);
02376 }
02377 
02378 static void scheme_check_tail_buffer_size(Scheme_Thread *p)
02379 {
02380   if (p->tail_buffer_size < buffer_init_size) {
02381     Scheme_Object **tb;
02382     tb = MALLOC_N(Scheme_Object *, buffer_init_size);
02383     p->tail_buffer = tb;
02384     p->tail_buffer_size = buffer_init_size;
02385   }
02386 }
02387 
02388 void scheme_set_tail_buffer_size(int s)
02389 {
02390   if (s > buffer_init_size) {
02391     Scheme_Thread *p;
02392 
02393     buffer_init_size = s;
02394 
02395     for (p = scheme_first_thread; p; p = p->next) {
02396       scheme_check_tail_buffer_size(p);
02397     }
02398   }
02399 }
02400 
02401 int scheme_tls_allocate()
02402 {
02403   return tls_pos++;
02404 }
02405 
02406 void scheme_tls_set(int pos, void *v)
02407 {
02408   Scheme_Thread *p = scheme_current_thread;
02409 
02410   if (p->user_tls_size <= pos) {
02411     int oldc = p->user_tls_size;
02412     void **old_tls = p->user_tls, **va;
02413 
02414     p->user_tls_size = tls_pos;
02415     va = MALLOC_N(void*, tls_pos);
02416     p->user_tls = va;
02417     while (oldc--) {
02418       p->user_tls[oldc] = old_tls[oldc];
02419     }
02420   }
02421 
02422   p->user_tls[pos] = v;
02423 }
02424 
02425 void *scheme_tls_get(int pos)
02426 {
02427   Scheme_Thread *p = scheme_current_thread;
02428 
02429   if (p->user_tls_size <= pos)
02430     return NULL;
02431   else
02432     return p->user_tls[pos];
02433 }
02434 
02435 #ifdef MZ_XFORM
02436 START_XFORM_SKIP;
02437 #endif
02438 
02439 Scheme_Object **scheme_alloc_runstack(long len)
02440 {
02441 #ifdef MZ_PRECISE_GC
02442   long sz;
02443   void **p;
02444   sz = sizeof(Scheme_Object*) * (len + 4);
02445   p = (void **)GC_malloc_tagged_allow_interior(sz);
02446   *(Scheme_Type *)(void *)p = scheme_rt_runstack;
02447   ((long *)(void *)p)[1] = gcBYTES_TO_WORDS(sz);
02448   ((long *)(void *)p)[2] = 0;
02449   ((long *)(void *)p)[3] = len;
02450   return (Scheme_Object **)(p + 4);
02451 #else
02452   return (Scheme_Object **)scheme_malloc_allow_interior(sizeof(Scheme_Object*) * len);
02453 #endif
02454 }
02455 
02456 void scheme_set_runstack_limits(Scheme_Object **rs, long len, long start, long end)
02457 /* With 3m, we can tell the GC not to scan the unused parts, and we
02458    can have the fixup function zero out the unused parts; that avoids
02459    writing and scanning pages that could be skipped for a minor
02460    GC. For CGC, we have to just clear out the unused part. */
02461 {
02462 #ifdef MZ_PRECISE_GC
02463   if (((long *)(void *)rs)[-2] != start)
02464     ((long *)(void *)rs)[-2] = start;
02465   if (((long *)(void *)rs)[-1] != end)
02466     ((long *)(void *)rs)[-1] = end;
02467 #else
02468   memset(rs, 0, start * sizeof(Scheme_Object *));
02469   memset(rs + end, 0, (len - end) * sizeof(Scheme_Object *));
02470 #endif
02471 }
02472 
02473 #ifdef MZ_XFORM
02474 END_XFORM_SKIP;
02475 #endif
02476 
02477 /*========================================================================*/
02478 /*                     thread creation and swapping                       */
02479 /*========================================================================*/
02480 
02481 int scheme_in_main_thread(void)
02482 {
02483   return !scheme_current_thread->next;
02484 }
02485 
02486 static void stash_current_marks()
02487 {
02488   Scheme_Object *m;
02489   m = scheme_current_continuation_marks(scheme_current_thread->returned_marks);
02490   scheme_current_thread->returned_marks = m;
02491   swap_target = scheme_current_thread->return_marks_to;
02492   scheme_current_thread->return_marks_to = NULL;
02493 }
02494 
02495 static void do_swap_thread()
02496 {
02497  start:
02498 
02499   scheme_zero_unneeded_rands(scheme_current_thread);
02500 
02501 #if WATCH_FOR_NESTED_SWAPS
02502   if (swapping)
02503     printf("death\n");
02504   swapping = 1;
02505 #endif
02506 
02507   if (!swap_no_setjmp && SETJMP(scheme_current_thread)) {
02508     /* We're back! */
02509     /* See also initial swap in in start_child() */
02510     thread_swap_count++;
02511 #ifdef RUNSTACK_IS_GLOBAL
02512     MZ_RUNSTACK = scheme_current_thread->runstack;
02513     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
02514     MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
02515     MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
02516 #endif
02517     RESETJMP(scheme_current_thread);
02518 #if WATCH_FOR_NESTED_SWAPS
02519     swapping = 0;
02520 #endif
02521     scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
02522     scheme_current_thread->gmp_tls_data = NULL;
02523 
02524     {
02525       Scheme_Object *l, *o;
02526       Scheme_Closure_Func f;
02527       for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
02528        o = SCHEME_CAR(l);
02529        f = SCHEME_CLOS_FUNC(o);
02530        o = SCHEME_CLOS_DATA(o);
02531        f(o);
02532       }
02533     }
02534     if ((scheme_current_thread->runstack_owner
02535         && ((*scheme_current_thread->runstack_owner) != scheme_current_thread))
02536        || (scheme_current_thread->cont_mark_stack_owner
02537            && ((*scheme_current_thread->cont_mark_stack_owner) != scheme_current_thread))) {
02538       scheme_takeover_stacks(scheme_current_thread);
02539     }
02540 
02541     {
02542       long cpm;
02543       cpm = scheme_get_process_milliseconds();
02544       scheme_current_thread->current_start_process_msec = cpm;
02545     }
02546 
02547     if (scheme_current_thread->return_marks_to) {
02548       stash_current_marks();
02549       goto start;
02550     }
02551   } else {
02552     Scheme_Thread *new_thread = swap_target;
02553 
02554     {
02555       long cpm;
02556       cpm = scheme_get_process_milliseconds();
02557       scheme_current_thread->accum_process_msec += (cpm - scheme_current_thread->current_start_process_msec);
02558     }
02559 
02560     swap_target = NULL;
02561 
02562     swap_no_setjmp = 0;
02563 
02564     /* We're leaving... */
02565 
02566     {
02567       Scheme_Object *l, *o;
02568       Scheme_Closure_Func f;
02569       for (l = thread_swap_out_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
02570        o = SCHEME_CAR(l);
02571        f = SCHEME_CLOS_FUNC(o);
02572        o = SCHEME_CLOS_DATA(o);
02573        f(o);
02574       }
02575     }
02576 
02577     if (scheme_current_thread->init_break_cell) {
02578       int cb;
02579       cb = can_break_param(scheme_current_thread);
02580       scheme_current_thread->can_break_at_swap = cb;
02581     }
02582     {
02583       GC_CAN_IGNORE void *data;
02584       data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
02585       scheme_current_thread->gmp_tls_data = data;
02586     }
02587 #ifdef RUNSTACK_IS_GLOBAL
02588     scheme_current_thread->runstack = MZ_RUNSTACK;
02589     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
02590     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
02591     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
02592 #endif
02593     scheme_current_thread = new_thread;
02594 
02595     /* Fixup current pointers in thread sets */
02596     if (!scheme_current_thread->return_marks_to) {
02597       Scheme_Thread_Set *t_set = new_thread->t_set_parent;
02598       t_set->current = (Scheme_Object *)new_thread;
02599       while (t_set->parent) {
02600        t_set->parent->current = (Scheme_Object *)t_set;
02601        t_set = t_set->parent;
02602       }
02603     }
02604 
02605     LONGJMP(scheme_current_thread);
02606   }
02607 }
02608 
02609 void scheme_swap_thread(Scheme_Thread *new_thread)
02610 {
02611   swap_target = new_thread;
02612   new_thread = NULL;
02613   do_swap_thread();
02614 }
02615 
02616 static void select_thread()
02617 {
02618   Scheme_Thread *new_thread;
02619   Scheme_Object *o;
02620   Scheme_Thread_Set *t_set;
02621 
02622   /* Try to pick a next thread to avoid DOS attacks
02623      through whatever kinds of things call select_thread() */
02624   o = (Scheme_Object *)scheme_thread_set_top;
02625   while (!SCHEME_THREADP(o)) {
02626     t_set = (Scheme_Thread_Set *)o;
02627     o = get_t_set_next(t_set->current);
02628     if (!o)
02629       o = t_set->first;
02630   }
02631   /* It's possible that o won't work out. So o is a suggestion for the
02632      new thread, but the loop below will pick a definitely suitable
02633      thread. */
02634   
02635   new_thread = (Scheme_Thread *)o;
02636   do {
02637     if (!new_thread)
02638       new_thread = scheme_first_thread;
02639     
02640     /* Can't swap in a thread with a nestee: */
02641     while (new_thread 
02642           && (new_thread->nestee
02643               || (new_thread->running & MZTHREAD_SUSPENDED)
02644               /* USER_SUSPENDED should only happen if new_thread is the main thread
02645                 or if the thread has MZTHREAD_NEED_SUSPEND_CLEANUP */
02646               || ((new_thread->running & MZTHREAD_USER_SUSPENDED)
02647                  && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)))) {
02648       new_thread = new_thread->next;
02649     }
02650 
02651     if (!new_thread && !o) {
02652       /* The main thread must be blocked on a nestee, and everything
02653         else is suspended. But we have to go somewhere.  Weakly
02654         resume the main thread's innermost nestee. If it's
02655          suspended by the user, then we've deadlocked. */
02656       new_thread = scheme_main_thread;
02657       while (new_thread->nestee) {
02658        new_thread = new_thread->nestee;
02659       }
02660       if ((new_thread->running & MZTHREAD_USER_SUSPENDED)
02661          && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
02662         if (post_system_idle()) {
02663           /* Aha! Someone was waiting for us to do nothing. Try again... */
02664         } else {
02665           scheme_console_printf("unbreakable deadlock\n");
02666           if (scheme_exit)
02667             scheme_exit(1);
02668           /* We really have to exit: */
02669           exit(1);
02670         }
02671       } else {
02672        scheme_weak_resume_thread(new_thread);
02673       }
02674       break;
02675     }
02676     o = NULL;
02677   } while (!new_thread);
02678 
02679   swap_target = new_thread;
02680   new_thread = NULL;
02681   o = NULL;
02682   t_set = NULL;
02683   do_swap_thread();
02684 }
02685 
02686 static void thread_is_dead(Scheme_Thread *r)
02687 {
02688   if (r->dead_box) {
02689     Scheme_Object *o;
02690     o = SCHEME_PTR_VAL(r->dead_box);
02691     scheme_post_sema_all(o);
02692   }
02693   if (r->running_box) {
02694     SCHEME_PTR_VAL(r->running_box) = NULL;
02695     r->running_box = NULL;
02696   }
02697   r->suspended_box = NULL;
02698   r->resumed_box = NULL;
02699   
02700   r->list_stack = NULL;
02701 
02702   r->dw = NULL;
02703   r->init_config = NULL;
02704   r->cell_values = NULL;
02705   r->init_break_cell = NULL;
02706   r->cont_mark_stack_segments = NULL;
02707   r->overflow = NULL;
02708 
02709   r->blocker = NULL;
02710 
02711   r->transitive_resumes = NULL;
02712   
02713   r->error_buf = NULL;
02714 
02715   r->spare_runstack = NULL;
02716 
02717   r->mbox_first = NULL;
02718   r->mbox_last = NULL;
02719   r->mbox_sema = NULL;
02720 }
02721 
02722 static void remove_thread(Scheme_Thread *r)
02723 {
02724   Scheme_Saved_Stack *saved;
02725   Scheme_Object *l;
02726 
02727   r->running = 0;
02728 
02729   if (r->prev) {
02730     r->prev->next = r->next;
02731     r->next->prev = r->prev;
02732   } else if (r->next) {
02733     r->next->prev = NULL;
02734     scheme_first_thread = r->next;
02735   }
02736   r->next = r->prev = NULL;
02737 
02738   unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
02739   
02740 #ifdef RUNSTACK_IS_GLOBAL
02741   if (r == scheme_current_thread) {
02742     r->runstack = MZ_RUNSTACK;
02743     MZ_RUNSTACK = NULL;
02744     r->runstack_start = MZ_RUNSTACK_START;
02745     MZ_RUNSTACK_START = NULL;
02746     r->cont_mark_stack = MZ_CONT_MARK_STACK;
02747     r->cont_mark_pos = MZ_CONT_MARK_POS;
02748   }
02749 #endif
02750 
02751   if (r->runstack_owner) {
02752     /* Drop ownership, if active, and clear the stack */
02753     if (r == *(r->runstack_owner)) {
02754       if (r->runstack_start) {
02755         scheme_set_runstack_limits(r->runstack_start, r->runstack_size, 0, 0);
02756         r->runstack_start = NULL;
02757       }
02758       for (saved = r->runstack_saved; saved; saved = saved->prev) {
02759         scheme_set_runstack_limits(saved->runstack_start, saved->runstack_size, 0, 0);
02760       }
02761       r->runstack_saved = NULL;
02762       *(r->runstack_owner) = NULL;
02763       r->runstack_owner = NULL;
02764     }
02765   } else {
02766     /* Only this thread used the runstack, so clear/free it
02767        as aggressively as possible */
02768 #if defined(SENORA_GC_NO_FREE) || defined(MZ_PRECISE_GC)
02769     memset(r->runstack_start, 0, r->runstack_size * sizeof(Scheme_Object*));
02770 #else
02771     GC_free(r->runstack_start);
02772 #endif
02773     r->runstack_start = NULL;
02774     for (saved = r->runstack_saved; saved; saved = saved->prev) {
02775 #if defined(SENORA_GC_NO_FREE) || defined(MZ_PRECISE_GC)
02776       memset(saved->runstack_start, 0, saved->runstack_size * sizeof(Scheme_Object*));
02777 #else
02778       GC_free(saved->runstack_start);
02779 #endif
02780       saved->runstack_start = NULL;
02781     }
02782   }
02783 
02784   r->runstack = NULL;
02785   r->runstack_swapped = NULL;
02786 
02787   if (r->cont_mark_stack_owner
02788       && ((*r->cont_mark_stack_owner) == r)) {
02789     *r->cont_mark_stack_owner = NULL;
02790   }
02791 
02792   r->cont_mark_stack = 0;
02793   r->cont_mark_stack_owner = NULL;
02794   r->cont_mark_stack_swapped = NULL;
02795 
02796   r->ku.apply.tail_rator = NULL;
02797   r->ku.apply.tail_rands = NULL;
02798   r->tail_buffer = NULL;
02799   r->ku.multiple.array = NULL;
02800   r->values_buffer = NULL;
02801 
02802 #ifndef SENORA_GC_NO_FREE
02803   if (r->list_stack)
02804     GC_free(r->list_stack);
02805 #endif
02806 
02807   thread_is_dead(r);
02808 
02809   /* In case we kill a thread while in a bignum operation: */
02810   scheme_gmp_tls_restore_snapshot(r->gmp_tls, r->gmp_tls_data, 
02811                                   NULL, ((r == scheme_current_thread) ? 1 : 2));
02812 
02813   if (r == scheme_current_thread) {
02814     /* We're going to be swapped out immediately. */
02815     swap_no_setjmp = 1;
02816   } else
02817     RESETJMP(r);
02818 
02819   scheme_remove_managed(r->mref, (Scheme_Object *)r->mr_hop);
02820   for (l = r->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
02821     scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l), (Scheme_Object *)r->mr_hop);
02822   }
02823   r->extra_mrefs = scheme_null;
02824 }
02825 
02826 void scheme_end_current_thread(void)
02827 {
02828   remove_thread(scheme_current_thread);
02829   
02830   thread_ended_with_activity = 1;
02831   
02832   if (scheme_notify_multithread && !scheme_first_thread->next) {
02833     scheme_notify_multithread(0);
02834     have_activity = 0;
02835   }
02836   
02837   select_thread();
02838 }
02839 
02840 static void start_child(Scheme_Thread * volatile child,
02841                      Scheme_Object * volatile child_eval)
02842 {
02843   if (SETJMP(child)) {
02844     /* Initial swap in: */
02845     Scheme_Object * volatile result = NULL;
02846 
02847     thread_swap_count++;
02848 #ifdef RUNSTACK_IS_GLOBAL
02849     MZ_RUNSTACK = scheme_current_thread->runstack;
02850     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
02851     MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
02852     MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
02853 #endif
02854     scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
02855     scheme_current_thread->gmp_tls_data = NULL;
02856     {
02857       Scheme_Object *l, *o;
02858       Scheme_Closure_Func f;
02859       for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
02860        o = SCHEME_CAR(l);
02861        f = SCHEME_CLOS_FUNC(o);
02862        o = SCHEME_CLOS_DATA(o);
02863        f(o);
02864       }
02865     }
02866 
02867     {
02868       long cpm;
02869       cpm = scheme_get_process_milliseconds();
02870       scheme_current_thread->current_start_process_msec = cpm;
02871     }
02872 
02873     RESETJMP(child);
02874 
02875 #if WATCH_FOR_NESTED_SWAPS
02876     swapping = 0;
02877 #endif
02878 
02879     if (scheme_current_thread->running & MZTHREAD_KILLED) {
02880       /* This thread is dead! Give up now. */
02881       exit_or_escape(scheme_current_thread);
02882     }
02883 
02884     if (scheme_current_thread->return_marks_to) {
02885       stash_current_marks();
02886       do_swap_thread();
02887     }
02888 
02889     {
02890       mz_jmp_buf newbuf;
02891       scheme_current_thread->error_buf = &newbuf;
02892       if (!scheme_setjmp(newbuf)) {
02893        /* Run the main thunk: */
02894        /* (checks for break before doing anything else) */
02895        result = scheme_apply_thread_thunk(child_eval);
02896       }
02897     }
02898 
02899     /* !! At this point, scheme_current_thread can turn out to be a
02900        different thread, which invoked the original thread's
02901        continuation. */
02902 
02903     /* If we still have a meta continuation, then it means we
02904        should be resuming at a prompt, not exiting. */
02905     while (scheme_current_thread->meta_continuation) {
02906       Scheme_Thread *p = scheme_current_thread;
02907       Scheme_Overflow *oflow;
02908 
02909       p->cjs.val = result;
02910 
02911       if (!SAME_OBJ(p->meta_continuation->prompt_tag, scheme_default_prompt_tag)) {
02912         scheme_signal_error("thread ended with meta continuation that isn't for the default prompt");
02913       } else {
02914         Scheme_Meta_Continuation *mc;
02915         mc = p->meta_continuation;
02916         oflow = mc->overflow;
02917         p->meta_continuation = mc->next;
02918         if (!oflow->eot) {
02919           p->stack_start = oflow->stack_start;
02920           p->decompose_mc = mc;
02921           scheme_longjmpup(&oflow->jmp->cont);
02922         }
02923       }
02924     }
02925 
02926     scheme_end_current_thread();
02927 
02928     /* Shouldn't get here! */
02929     scheme_signal_error("bad thread switch");
02930   }
02931 }
02932 
02933 static Scheme_Object *make_subprocess(Scheme_Object *child_thunk,
02934                                   void *child_start, 
02935                                   Scheme_Config *config,
02936                                   Scheme_Thread_Cell_Table *cells,
02937                                   Scheme_Object *break_cell,
02938                                   Scheme_Custodian *mgr,
02939                                   int normal_kill)
02940 {
02941   Scheme_Thread *child;
02942   int turn_on_multi;
02943  
02944   turn_on_multi = !scheme_first_thread->next;
02945   
02946   if (!config)
02947     config = scheme_current_config();
02948   if (!cells)
02949     cells = scheme_inherit_cells(NULL);
02950   if (!break_cell) {
02951     break_cell = scheme_current_break_cell();
02952     if (SAME_OBJ(break_cell, maybe_recycle_cell))
02953       maybe_recycle_cell = NULL;
02954   }
02955 
02956   child = make_thread(config, cells, break_cell, mgr, child_start);
02957 
02958   /* Use child_thunk name, if any, for the thread name: */
02959   {
02960     Scheme_Object *sym;
02961     const char *s;
02962     int len;
02963     
02964     s = scheme_get_proc_name(child_thunk, &len, -1);
02965     if (s)  {
02966       if (len < 0)
02967        sym = (Scheme_Object *)s;
02968       else
02969        sym = scheme_intern_exact_symbol(s, len);
02970       child->name = sym;
02971     }
02972   }
02973 
02974   {
02975     Scheme_Object *v;
02976     v = scheme_thread_cell_get(break_cell, cells);
02977     child->can_break_at_swap = SCHEME_TRUEP(v);
02978   }
02979 
02980   if (!normal_kill)
02981     child->suspend_to_kill = 1;
02982 
02983   child->stack_start = child_start;
02984 
02985   /* Sets the child's jmpbuf for swapping in later: */
02986   start_child(child, child_thunk);
02987 
02988   if (scheme_notify_multithread && turn_on_multi) {
02989     scheme_notify_multithread(1);
02990     have_activity = 1;
02991   }
02992 
02993   SCHEME_USE_FUEL(1000);
02994   
02995   return (Scheme_Object *)child;
02996 }
02997 
02998 Scheme_Object *scheme_thread(Scheme_Object *thunk)
02999 {
03000   return scheme_thread_w_details(thunk, NULL, NULL, NULL, NULL, 0);
03001 }
03002 
03003 static Scheme_Object *sch_thread(int argc, Scheme_Object *args[])
03004 {
03005   scheme_check_proc_arity("thread", 0, 0, argc, args);
03006   scheme_custodian_check_available(NULL, "thread", "thread");
03007 
03008   return scheme_thread(args[0]);
03009 }
03010 
03011 static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[])
03012 {
03013   scheme_check_proc_arity("thread/suspend-to-kill", 0, 0, argc, args);
03014   scheme_custodian_check_available(NULL, "thread/suspend-to-kill", "thread");
03015 
03016   return scheme_thread_w_details(args[0], NULL, NULL, NULL, NULL, 1);
03017 }
03018 
03019 static Scheme_Object *sch_current(int argc, Scheme_Object *args[])
03020 {
03021   return (Scheme_Object *)scheme_current_thread;
03022 }
03023 
03024 static Scheme_Object *thread_p(int argc, Scheme_Object *args[])
03025 {
03026   return SCHEME_THREADP(args[0]) ? scheme_true : scheme_false;
03027 }
03028 
03029 static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[])
03030 {
03031   int running;
03032 
03033   if (!SCHEME_THREADP(args[0]))
03034     scheme_wrong_type("thread-running?", "thread", 0, argc, args);
03035 
03036   running = ((Scheme_Thread *)args[0])->running;
03037 
03038   return ((MZTHREAD_STILL_RUNNING(running) && !(running & MZTHREAD_USER_SUSPENDED))
03039          ? scheme_true 
03040          : scheme_false);
03041 }
03042 
03043 static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[])
03044 {
03045   int running;
03046 
03047   if (!SCHEME_THREADP(args[0]))
03048     scheme_wrong_type("thread-running?", "thread", 0, argc, args);
03049 
03050   running = ((Scheme_Thread *)args[0])->running;
03051 
03052   return MZTHREAD_STILL_RUNNING(running) ? scheme_false : scheme_true;
03053 }
03054 
03055 static int thread_wait_done(Scheme_Object *p, Scheme_Schedule_Info *sinfo)
03056 {
03057   int running = ((Scheme_Thread *)p)->running;
03058   if (MZTHREAD_STILL_RUNNING(running)) {
03059     /* Replace the direct thread reference with an event, so that
03060        the blocking thread can be dequeued: */
03061     Scheme_Object *evt;
03062     evt = scheme_get_thread_dead((Scheme_Thread *)p);
03063     scheme_set_sync_target(sinfo, evt, p, NULL, 0, 0, NULL);
03064     return 0;
03065   } else
03066     return 1;
03067 }
03068 
03069 static Scheme_Object *thread_wait(int argc, Scheme_Object *args[])
03070 {
03071   Scheme_Thread *p;
03072 
03073   if (!SCHEME_THREADP(args[0]))
03074     scheme_wrong_type("thread-wait", "thread", 0, argc, args);
03075 
03076   p = (Scheme_Thread *)args[0];
03077 
03078   if (MZTHREAD_STILL_RUNNING(p->running)) {
03079     sch_sync(1, args);
03080   }
03081 
03082   return scheme_void;
03083 }
03084 
03085 static void register_thread_sync()
03086 {
03087   scheme_add_evt(scheme_thread_type, 
03088                  (Scheme_Ready_Fun)thread_wait_done, 
03089                  NULL, NULL, 0);
03090 }
03091 
03092 void scheme_add_swap_callback(Scheme_Closure_Func f, Scheme_Object *data)
03093 {
03094   Scheme_Object *p;
03095 
03096   p = scheme_make_raw_pair((Scheme_Object *)f, data);
03097   thread_swap_callbacks = scheme_make_pair(p, thread_swap_callbacks);
03098 }
03099 
03100 void scheme_add_swap_out_callback(Scheme_Closure_Func f, Scheme_Object *data)
03101 {
03102   Scheme_Object *p;
03103 
03104   p = scheme_make_raw_pair((Scheme_Object *)f, data);
03105   thread_swap_out_callbacks = scheme_make_pair(p, thread_swap_out_callbacks);
03106 }
03107 
03108 /**************************************************************************/
03109 /* Ensure that a new thread has a reasonable starting stack */
03110 
03111 #ifdef DO_STACK_CHECK
03112 # define THREAD_STACK_SPACE (STACK_SAFETY_MARGIN / 2)
03113 void scheme_check_stack_ok(char *s); /* prototype, needed for PalmOS */
03114 
03115 void scheme_check_stack_ok(char *s) {
03116 # include "mzstkchk.h"
03117   {
03118     s[THREAD_STACK_SPACE] = 1;
03119   } else {
03120     s[THREAD_STACK_SPACE] = 0;
03121   }
03122 }
03123 
03124 static int is_stack_too_shallow2(void)
03125 {
03126   char s[THREAD_STACK_SPACE+1];
03127   
03128   scheme_check_stack_ok(s);
03129   return s[THREAD_STACK_SPACE];
03130 }
03131 
03132 int scheme_is_stack_too_shallow(void)
03133 /* Make sure this function insn't inlined, mainly because
03134    is_stack_too_shallow2() can get inlined, and it adds a lot
03135    to the stack. */
03136 {
03137 #  include "mzstkchk.h"
03138   {
03139     return 1;
03140   }
03141   return is_stack_too_shallow2();
03142 }
03143 
03144 static Scheme_Object *thread_k(void)
03145 {
03146   Scheme_Thread *p = scheme_current_thread;
03147   Scheme_Object *thunk, *result, *break_cell;
03148   Scheme_Config *config;
03149   Scheme_Custodian *mgr;
03150   Scheme_Thread_Cell_Table *cells;
03151   int suspend_to_kill = p->ku.k.i1;
03152   
03153   thunk = (Scheme_Object *)p->ku.k.p1;
03154   config = (Scheme_Config *)p->ku.k.p2;
03155   mgr = (Scheme_Custodian *)p->ku.k.p3;
03156   cells = (Scheme_Thread_Cell_Table *)SCHEME_CAR((Scheme_Object *)p->ku.k.p4);
03157   break_cell = SCHEME_CDR((Scheme_Object *)p->ku.k.p4);
03158 
03159   p->ku.k.p1 = NULL;
03160   p->ku.k.p2 = NULL;
03161   p->ku.k.p3 = NULL;
03162   p->ku.k.p4 = NULL;
03163   
03164   result = make_subprocess(thunk, PROMPT_STACK(result),
03165                         config, cells, break_cell, mgr, !suspend_to_kill);
03166 
03167   /* Don't get rid of `result'; it keeps the
03168      Precise GC xformer from "optimizing" away
03169      the __gc_var_stack__ frame. */
03170   return result;
03171 }
03172 
03173 #endif /* DO_STACK_CHECK */
03174 
03175 Scheme_Object *scheme_thread_w_details(Scheme_Object *thunk, 
03176                                    Scheme_Config *config, 
03177                                    Scheme_Thread_Cell_Table *cells,
03178                                    Scheme_Object *break_cell,
03179                                    Scheme_Custodian *mgr, 
03180                                    int suspend_to_kill)
03181 {
03182   Scheme_Object *result;
03183   void *stack_marker;
03184 
03185 #ifdef DO_STACK_CHECK
03186   /* Make sure the thread starts out with a reasonable stack size, so
03187      it doesn't thrash right away: */
03188   if (scheme_is_stack_too_shallow()) {
03189     Scheme_Thread *p = scheme_current_thread;
03190 
03191     /* Don't mangle the stack if we're in atomic mode, because that
03192        probably means a MrEd HET trampoline, etc. */
03193     wait_until_suspend_ok();
03194 
03195     p->ku.k.p1 = thunk;
03196     p->ku.k.p2 = config;
03197     p->ku.k.p3 = mgr;
03198     result = scheme_make_pair((Scheme_Object *)cells, break_cell);
03199     p->ku.k.p4 = result;
03200     p->ku.k.i1 = suspend_to_kill;
03201 
03202     return scheme_handle_stack_overflow(thread_k);
03203   }
03204 #endif
03205 
03206   result = make_subprocess(thunk, PROMPT_STACK(stack_marker),
03207                         config, cells, break_cell, mgr, !suspend_to_kill);
03208 
03209   /* Don't get rid of `result'; it keeps the
03210      Precise GC xformer from "optimizing" away
03211      the __gc_var_stack__ frame. */
03212   return result;
03213 }
03214 
03215 /**************************************************************************/
03216 /* Nested threads */
03217 
03218 static Scheme_Object *def_nested_exn_handler(int argc, Scheme_Object *argv[])
03219 {
03220   if (scheme_current_thread->nester) {
03221     Scheme_Thread *p = scheme_current_thread;
03222     p->cjs.jumping_to_continuation = (Scheme_Object *)scheme_current_thread;
03223     p->cjs.val = argv[0];
03224     p->cjs.is_kill = 0;
03225     scheme_longjmp(*p->error_buf, 1);
03226   }
03227 
03228   return scheme_void; /* misuse of exception handler (wrong kind of thread or under prompt) */
03229 }
03230 
03231 MZ_DO_NOT_INLINE(Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom));
03232 
03233 Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom)
03234 {
03235   Scheme_Thread *p = scheme_current_thread;
03236   Scheme_Thread * volatile np;
03237   Scheme_Custodian *mgr;
03238   Scheme_Object * volatile v;
03239   mz_jmp_buf newbuf;
03240   volatile int failure;
03241 
03242   scheme_check_proc_arity("call-in-nested-thread", 0, 0, argc, argv);
03243   if (argc > 1) {
03244     if (SCHEME_CUSTODIANP(argv[1]))
03245       mgr = (Scheme_Custodian *)argv[1];
03246     else {
03247       scheme_wrong_type("call-in-nested-thread", "custodian", 1, argc, argv);
03248       return NULL;
03249     }
03250   } else
03251     mgr = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
03252 
03253   scheme_custodian_check_available(mgr, "call-in-nested-thread", "thread");
03254 
03255   SCHEME_USE_FUEL(25);
03256 
03257   wait_until_suspend_ok();
03258 
03259   np = MALLOC_ONE_TAGGED(Scheme_Thread);
03260   np->so.type = scheme_thread_type;
03261 #ifdef MZ_PRECISE_GC
03262   GC_register_new_thread(np, mgr);
03263 #endif
03264   np->running = MZTHREAD_RUNNING;
03265   np->ran_some = 1;
03266 
03267 #ifdef RUNSTACK_IS_GLOBAL
03268   p->runstack = MZ_RUNSTACK;
03269   p->runstack_start = MZ_RUNSTACK_START;
03270   p->cont_mark_stack = MZ_CONT_MARK_STACK;
03271   p->cont_mark_pos = MZ_CONT_MARK_POS;
03272 #endif
03273 
03274   /* zero out anything we need now, because nestee disables
03275      GC cleaning for this thread: */
03276   prepare_this_thread_for_GC(p);
03277 
03278   if (!p->runstack_owner) {
03279     Scheme_Thread **owner;
03280     owner = MALLOC_N(Scheme_Thread *, 1);
03281     p->runstack_owner = owner;
03282     *owner = p;
03283   }
03284 
03285   np->runstack = p->runstack;
03286   np->runstack_start = p->runstack_start;
03287   np->runstack_size = p->runstack_size;
03288   np->runstack_saved = p->runstack_saved;
03289   np->runstack_owner = p->runstack_owner;
03290   *np->runstack_owner = np;
03291   np->stack_start = p->stack_start;
03292   np->engine_weight = p->engine_weight;
03293   {
03294     Scheme_Object **tb;
03295     tb = MALLOC_N(Scheme_Object *, p->tail_buffer_size);
03296     np->tail_buffer = tb;
03297   }
03298   np->tail_buffer_size = p->tail_buffer_size;
03299 
03300   np->list_stack = p->list_stack;
03301   np->list_stack_pos = p->list_stack_pos;
03302 
03303   scheme_gmp_tls_init(np->gmp_tls);
03304 
03305   /* np->prev = NULL; - 0ed by allocation */
03306   np->next = scheme_first_thread;
03307   scheme_first_thread->prev = np;
03308   scheme_first_thread = np;
03309 
03310   np->t_set_parent = p->t_set_parent;
03311   schedule_in_set((Scheme_Object *)np, np->t_set_parent);
03312 
03313   {
03314     Scheme_Thread_Cell_Table *cells;
03315     cells = scheme_inherit_cells(p->cell_values);
03316     np->cell_values = cells;
03317   }
03318   {
03319     Scheme_Config *config;
03320     config = scheme_current_config();
03321     np->init_config = config;
03322   }
03323   {
03324     int cb;
03325     Scheme_Object *bc;
03326     cb = scheme_can_break(p);
03327     p->can_break_at_swap = cb;
03328     bc = scheme_current_break_cell();
03329     np->init_break_cell = bc;
03330     if (SAME_OBJ(bc, maybe_recycle_cell))
03331       maybe_recycle_cell = NULL;
03332   }
03333   np->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
03334   /* others 0ed already by allocation */
03335 
03336   check_ready_break();
03337 
03338   np->nester = p;
03339   p->nestee = np;
03340   np->external_break = p->external_break;
03341   p->external_break = 0;
03342 
03343   {
03344     Scheme_Thread_Custodian_Hop *hop;
03345     Scheme_Custodian_Reference *mref;
03346     hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
03347     np->mr_hop = hop;
03348     hop->so.type = scheme_thread_hop_type;
03349     {
03350       Scheme_Thread *wp;
03351       wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)np);
03352       hop->p = wp;
03353     }
03354     mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
03355     np->mref = mref;
03356     np->extra_mrefs = scheme_null;
03357 #ifndef MZ_PRECISE_GC
03358     scheme_weak_reference((void **)(void *)&hop->p);
03359 #endif
03360   }
03361 
03362 #ifdef RUNSTACK_IS_GLOBAL
03363   MZ_CONT_MARK_STACK = np->cont_mark_stack;
03364   MZ_CONT_MARK_POS = np->cont_mark_pos;
03365 #endif
03366 
03367   scheme_current_thread = np;
03368 
03369   if (p != scheme_main_thread)
03370     scheme_weak_suspend_thread(p);
03371 
03372   if (!nested_exn_handler) {
03373     REGISTER_SO(nested_exn_handler);
03374     nested_exn_handler = scheme_make_prim_w_arity(def_nested_exn_handler,
03375                                                   "nested-thread-exception-handler",
03376                                                   1, 1);
03377   }
03378   scheme_set_cont_mark(scheme_exn_handler_key, nested_exn_handler);
03379 
03380   /* Call thunk, catch escape: */
03381   np->error_buf = &newbuf;
03382   if (scheme_setjmp(newbuf)) {
03383     if (!np->cjs.is_kill)
03384       v = np->cjs.val;
03385     else
03386       v = NULL;
03387     failure = 1;
03388   } else {
03389     v = scheme_apply(argv[0], 0, NULL);
03390     failure = 0;
03391   }
03392 
03393   scheme_remove_managed(np->mref, (Scheme_Object *)np->mr_hop);
03394   {
03395     Scheme_Object *l;
03396     for (l = np->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
03397       scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l), 
03398                          (Scheme_Object *)np->mr_hop);
03399     }
03400   }
03401   np->extra_mrefs = scheme_null;
03402 #ifdef MZ_PRECISE_GC
03403   WEAKIFIED(np->mr_hop->p) = NULL;
03404 #else
03405   scheme_unweak_reference((void **)(void *)&np->mr_hop->p);
03406 #endif
03407   scheme_remove_all_finalization(np->mr_hop);
03408 
03409   if (np->prev)
03410     np->prev->next = np->next;
03411   else
03412     scheme_first_thread = np->next;
03413   np->next->prev = np->prev;
03414 
03415   np->next = NULL;
03416   np->prev = NULL;
03417 
03418   unschedule_in_set((Scheme_Object *)np, np->t_set_parent);
03419 
03420   np->running = 0;
03421 
03422   *p->runstack_owner = p;
03423 
03424   p->external_break = np->external_break;
03425   p->nestee = NULL;
03426   np->nester = NULL;
03427 
03428   thread_is_dead(np);
03429 
03430   scheme_current_thread = p;
03431 
03432   if (p != scheme_main_thread)
03433     scheme_weak_resume_thread(p);
03434 
03435 #ifdef RUNSTACK_IS_GLOBAL
03436   MZ_CONT_MARK_STACK = p->cont_mark_stack;
03437   MZ_CONT_MARK_POS = p->cont_mark_pos;
03438 #endif
03439 
03440   if ((p->running & MZTHREAD_KILLED)
03441       || (p->running & MZTHREAD_USER_SUSPENDED))
03442     scheme_thread_block(0.0);
03443 
03444   if (failure) {
03445     if (!v)
03446       scheme_raise_exn(MZEXN_FAIL, 
03447                      "call-in-nested-thread: the thread was killed, or it exited via the default error escape handler");
03448     else
03449       scheme_raise(v);
03450   }
03451 
03452   /* May have just moved a break to a breakable thread: */
03453   /* Check for external break again after swap or sleep */
03454   scheme_check_break_now();
03455 
03456   return v;
03457 }
03458 
03459 static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[])
03460 {
03461   Scheme_Object *result;
03462   result = scheme_call_as_nested_thread(argc, argv, PROMPT_STACK(result));
03463   return result;
03464 }
03465 
03466 /*========================================================================*/
03467 /*                     thread scheduling and termination                  */
03468 /*========================================================================*/
03469 
03470 static int check_sleep(int need_activity, int sleep_now)
03471 /* Signals should be suspended */
03472 {
03473   Scheme_Thread *p, *p2;
03474   int end_with_act;
03475 
03476 #if defined(USING_FDS)
03477   DECL_FDSET(set, 3);
03478   fd_set *set1, *set2;
03479 #endif
03480   void *fds;
03481 
03482   if (scheme_no_stack_overflow)
03483     return 0;
03484   
03485   /* Is everything blocked? */
03486   if (!do_atomic) {
03487     p = scheme_first_thread;
03488     while (p) {
03489       if (!p->nestee
03490           && (p->ran_some || p->block_descriptor == NOT_BLOCKED)
03491           && (p->next || !(p->running & MZTHREAD_USER_SUSPENDED)))
03492        break;
03493       p = p->next;
03494     }
03495   } else
03496     p = NULL;
03497   
03498   p2 = scheme_first_thread;
03499   while (p2) {
03500     if (p2->ran_some) {
03501       scheme_notify_sleep_progress();
03502       p2->ran_some = 0;
03503     }
03504     p2 = p2->next;
03505   }
03506   
03507   end_with_act = thread_ended_with_activity;
03508   thread_ended_with_activity = 0;
03509   
03510   if (need_activity 
03511       && !end_with_act 
03512       && (do_atomic 
03513          || (!p && ((!sleep_now && scheme_wakeup_on_input)
03514                    || (sleep_now && scheme_sleep))))) {
03515     double max_sleep_time = 0;
03516 
03517     /* Poll from top-level process, and all subprocesses are blocked. */
03518     /* So, everything is blocked pending external input. */
03519     /* Build a list of file descriptors that we're waiting on */
03520     /* and turn off polling. */
03521     if (have_activity)
03522       scheme_active_but_sleeping = 1;
03523     if (have_activity && scheme_notify_multithread)
03524       scheme_notify_multithread(0);
03525     
03526 #if defined(USING_FDS)
03527     INIT_DECL_FDSET(set, 3);
03528     set1 = (fd_set *) MZ_GET_FDSET(set, 1);
03529     set2 = (fd_set *) MZ_GET_FDSET(set, 2);
03530 
03531     fds = (void *)set;
03532     MZ_FD_ZERO(set);
03533     MZ_FD_ZERO(set1);
03534     MZ_FD_ZERO(set2);
03535 #else
03536     fds = NULL;
03537 #endif
03538     
03539     needs_sleep_cancelled = 0;
03540 
03541     p = scheme_first_thread;
03542     while (p) {
03543       int merge_time = 0;
03544 
03545       if (p->nestee) {
03546        /* nothing */
03547       } else if (p->block_descriptor == GENERIC_BLOCKED) {
03548        if (p->block_needs_wakeup) {
03549          Scheme_Needs_Wakeup_Fun f = p->block_needs_wakeup;
03550          f(p->blocker, fds);
03551        }
03552        merge_time = (p->sleep_end > 0.0);
03553       } else if (p->block_descriptor == SLEEP_BLOCKED) {
03554        merge_time = 1;
03555       }
03556 
03557       if (merge_time) {
03558        double d = p->sleep_end;
03559        double t;
03560 
03561        d = (d - scheme_get_inexact_milliseconds());
03562 
03563        t = (d / 1000);
03564        if (t <= 0) {
03565          t = (float)0.00001;
03566          needs_sleep_cancelled = 1;
03567        }
03568        if (!max_sleep_time || (t < max_sleep_time))
03569          max_sleep_time = t;
03570       } 
03571       p = p->next;
03572     }
03573   
03574     if (needs_sleep_cancelled)
03575       return 0;
03576 
03577     if (post_system_idle()) {
03578       return 0;
03579     }
03580   
03581     if (sleep_now) {
03582       float mst = (float)max_sleep_time;
03583 
03584       /* Make sure that mst didn't go to infinity: */
03585       if ((double)mst > (2 * max_sleep_time)) {
03586        mst = 100000000.0;
03587       }
03588 
03589       scheme_sleep(mst, fds);
03590     } else if (scheme_wakeup_on_input)
03591       scheme_wakeup_on_input(fds);
03592 
03593     return 1;
03594   }
03595 
03596   return 0;
03597 }
03598 
03599 static int post_system_idle()
03600 {
03601   return scheme_try_channel_get(scheme_system_idle_channel);
03602 }
03603 
03604 void scheme_cancel_sleep()
03605 {
03606   needs_sleep_cancelled = 1;
03607 }
03608 
03609 void scheme_check_threads(void)
03610 /* Signals should be suspended. */
03611 {
03612   scheme_current_thread->suspend_break++;
03613   scheme_thread_block((float)0);
03614   --scheme_current_thread->suspend_break;
03615 
03616   check_sleep(have_activity, 0);
03617 }
03618 
03619 void scheme_wake_up(void)
03620 {
03621   scheme_active_but_sleeping = 0;
03622   if (have_activity && scheme_notify_multithread)
03623     scheme_notify_multithread(1);
03624 }
03625 
03626 void scheme_out_of_fuel(void)
03627 {
03628   scheme_thread_block((float)0);
03629   scheme_current_thread->ran_some = 1;
03630 }
03631 
03632 #ifdef USE_ITIMER
03633 static int itimer_handler_installed = 0;
03634 
03635 #ifdef MZ_XFORM
03636 START_XFORM_SKIP;
03637 #endif
03638 
03639 static void timer_expired(int ignored)
03640 {
03641   scheme_fuel_counter = 0;
03642   scheme_jit_stack_boundary = (unsigned long)-1;
03643 #  ifdef SIGSET_NEEDS_REINSTALL
03644   MZ_SIGSET(SIGPROF, timer_expired);
03645 #  endif
03646 }
03647 
03648 #ifdef MZ_XFORM
03649 END_XFORM_SKIP;
03650 #endif
03651 
03652 #endif
03653 
03654 static void init_schedule_info(Scheme_Schedule_Info *sinfo, Scheme_Thread *false_pos_ok, 
03655                             double sleep_end)
03656 {
03657   sinfo->false_positive_ok = false_pos_ok;
03658   sinfo->potentially_false_positive = 0;
03659   sinfo->current_syncing = NULL;
03660   sinfo->spin = 0;
03661   sinfo->is_poll = 0;
03662   sinfo->sleep_end = sleep_end;
03663 }
03664 
03665 Scheme_Object *scheme_current_break_cell()
03666 {
03667   return scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
03668 }
03669 
03670 static int can_break_param(Scheme_Thread *p)
03671 {
03672   if (p == scheme_current_thread) {
03673     Scheme_Object *v;
03674     
03675     v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
03676     
03677     v = scheme_thread_cell_get(v, p->cell_values);
03678     
03679     return SCHEME_TRUEP(v);
03680   } else
03681     return p->can_break_at_swap;
03682 }
03683 
03684 int scheme_can_break(Scheme_Thread *p)
03685 {
03686   if (!p->suspend_break && !scheme_no_stack_overflow) {
03687     return can_break_param(p);
03688   } else
03689     return 0;
03690 }
03691 
03692 void scheme_set_can_break(int on)
03693 {
03694   Scheme_Object *v;
03695       
03696   v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
03697 
03698   scheme_thread_cell_set(v, scheme_current_thread->cell_values, 
03699                       (on ? scheme_true : scheme_false));
03700 
03701   if (SAME_OBJ(v, maybe_recycle_cell))
03702     maybe_recycle_cell = NULL;
03703 }
03704 
03705 void scheme_check_break_now(void)
03706 {
03707   Scheme_Thread *p = scheme_current_thread;
03708 
03709   check_ready_break();
03710 
03711   if (p->external_break && scheme_can_break(p)) {
03712     scheme_thread_block_w_thread(0.0, p);
03713     p->ran_some = 1;
03714   }
03715 }
03716 
03717 static Scheme_Object *check_break_now(int argc, Scheme_Object *args[])
03718 {
03719   scheme_check_break_now();
03720   return scheme_void;
03721 }
03722 
03723 void scheme_push_break_enable(Scheme_Cont_Frame_Data *cframe, int on, int post_check)
03724 {
03725   Scheme_Object *v = NULL;
03726 
03727   if (recycle_cell) {
03728     if (!SCHEME_TRUEP(((Thread_Cell *)recycle_cell)->def_val) == !on) {
03729       v = recycle_cell;
03730       recycle_cell = NULL;
03731     }
03732   }
03733 
03734   if (!v)
03735     v = scheme_make_thread_cell(on ? scheme_true : scheme_false, 1);
03736   scheme_push_continuation_frame(cframe);
03737   scheme_set_cont_mark(scheme_break_enabled_key, v);
03738   if (post_check)
03739     scheme_check_break_now();    
03740 
03741   cframe->cache = v;
03742   maybe_recycle_cell = v;
03743   recycle_cc_count = scheme_cont_capture_count;
03744 }
03745 
03746 void scheme_pop_break_enable(Scheme_Cont_Frame_Data *cframe, int post_check)
03747 {
03748   scheme_pop_continuation_frame(cframe);
03749   if (post_check)
03750     scheme_check_break_now();    
03751 
03752   if (cframe->cache == maybe_recycle_cell) {
03753     if (recycle_cc_count == scheme_cont_capture_count) {
03754       recycle_cell = maybe_recycle_cell;
03755     }
03756     maybe_recycle_cell = NULL;
03757   }
03758 }
03759 
03760 static Scheme_Object *raise_user_break(int argc, Scheme_Object ** volatile argv)
03761 {
03762   /* The main action here is buried in code to free temporary bignum
03763      space on escapes. Aside from a thread kill, this is the only
03764      place where we have to worry about freeing bignum space, because
03765      kill and escape are the only possible actions within a bignum
03766      calculaion. It is possible to have nested bignum calculations,
03767      though (if the break handler performs bignum arithmetic), so
03768      that's why we save and restore an old snapshot. */
03769   mz_jmp_buf *savebuf, newbuf;
03770   long save[4];
03771 
03772   savebuf = scheme_current_thread->error_buf;
03773   scheme_current_thread->error_buf = &newbuf;
03774   scheme_gmp_tls_snapshot(scheme_current_thread->gmp_tls, save);
03775 
03776   if (!scheme_setjmp(newbuf)) {
03777     /* >>>> This is the main action <<<< */
03778     scheme_raise_exn(MZEXN_BREAK, argv[0], "user break");
03779     /* will definitely escape (or thread will die) */
03780   } else {
03781     /* As expected, we're escaping. Unless we're continuing, then
03782        reset temporary bignum memory. */
03783     int cont;
03784     cont = SAME_OBJ((Scheme_Object *)scheme_jumping_to_continuation,
03785                   argv[0]);
03786     scheme_gmp_tls_restore_snapshot(scheme_current_thread->gmp_tls, NULL, save, !cont);
03787     scheme_longjmp(*savebuf, 1);
03788   }
03789 
03790   /* Can't get here */
03791   return NULL;
03792 }
03793 
03794 static void raise_break(Scheme_Thread *p)
03795 {
03796   int block_descriptor;
03797   Scheme_Object *blocker; /* semaphore or port */
03798   Scheme_Ready_Fun block_check;
03799   Scheme_Needs_Wakeup_Fun block_needs_wakeup;
03800   Scheme_Object *a[1];
03801   Scheme_Cont_Frame_Data cframe;
03802 
03803   p->external_break = 0;
03804 
03805   if (p->blocker && (p->block_check == (Scheme_Ready_Fun)syncing_ready)) {
03806     /* Get out of lines for channels, etc., before calling a break exn handler. */
03807     scheme_post_syncing_nacks((Syncing *)p->blocker);
03808   }
03809 
03810   block_descriptor = p->block_descriptor;
03811   blocker = p->blocker;
03812   block_check = p->block_check;
03813   block_needs_wakeup = p->block_needs_wakeup;
03814   
03815   p->block_descriptor = NOT_BLOCKED;
03816   p->blocker = NULL;
03817   p->block_check = NULL;
03818   p->block_needs_wakeup = NULL;
03819   p->ran_some = 1;
03820   
03821   a[0] = scheme_make_prim((Scheme_Prim *)raise_user_break);
03822 
03823   /* Continuation frame ensures that this doesn't
03824      look like it's in tail position with respect to
03825      an existing escape continuation */
03826   scheme_push_continuation_frame(&cframe);
03827 
03828   scheme_call_ec(1, a);
03829 
03830   scheme_pop_continuation_frame(&cframe);
03831 
03832   /* Continue from break... */
03833   p->block_descriptor = block_descriptor;
03834   p->blocker = blocker;
03835   p->block_check = block_check;
03836   p->block_needs_wakeup = block_needs_wakeup;
03837 }
03838 
03839 static void exit_or_escape(Scheme_Thread *p)
03840 {
03841   /* Maybe this killed thread is nested: */
03842   if (p->nester) {
03843     if (p->running & MZTHREAD_KILLED)
03844       p->running -= MZTHREAD_KILLED;
03845     p->cjs.jumping_to_continuation = (Scheme_Object *)p;
03846     p->cjs.is_kill = 1;
03847     scheme_longjmp(*p->error_buf, 1);
03848   }
03849 
03850   if (SAME_OBJ(p, scheme_main_thread)) {
03851     /* Hard exit: */
03852     if (scheme_exit)
03853       scheme_exit(0);
03854     
03855     /* We really have to exit: */
03856     exit(0);
03857   }
03858 
03859   remove_thread(p);
03860   select_thread();
03861 }
03862 
03863 void scheme_break_main_thread()
03864 /* This function can be called from an interrupt handler. 
03865    On some platforms, it will even be called from multiple
03866    OS threads. In the case of multiple threads, there's a
03867    tiny chance that a single Ctl-C will trigger multiple
03868    break exceptions. */
03869 {
03870   delayed_break_ready = 1;
03871 }
03872 
03873 void scheme_set_break_main_target(Scheme_Thread *p)
03874 {
03875   if (!main_break_target_thread) {
03876     REGISTER_SO(main_break_target_thread);
03877   }
03878   main_break_target_thread = p;
03879 }
03880 
03881 static void check_ready_break()
03882 {
03883   if (delayed_break_ready) {
03884     if (scheme_main_thread) {
03885       delayed_break_ready = 0;
03886       scheme_break_thread(main_break_target_thread);
03887     }
03888   }
03889 }
03890 
03891 void scheme_break_thread(Scheme_Thread *p)
03892 {
03893   if (!p) {
03894     p = scheme_main_thread;
03895     if (!p)
03896       return;
03897   }
03898 
03899   /* Propagate breaks: */
03900   while (p->nestee) {
03901     p = p->nestee;
03902   }
03903 
03904   p->external_break = 1;
03905 
03906   if (p == scheme_current_thread) {
03907     if (scheme_can_break(p)) {
03908       scheme_fuel_counter = 0;
03909       scheme_jit_stack_boundary = (unsigned long)-1;
03910     }
03911   }
03912   scheme_weak_resume_thread(p);
03913 # if defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES)
03914   if (SAME_OBJ(p, scheme_main_thread))
03915     ReleaseSemaphore(scheme_break_semaphore, 1, NULL);
03916 # endif
03917 }
03918 
03919 static void find_next_thread(Scheme_Thread **return_arg) {
03920   Scheme_Thread *next;
03921   Scheme_Thread *p = scheme_current_thread;
03922   Scheme_Object *next_in_set;
03923   Scheme_Thread_Set *t_set;
03924 
03925   double msecs = 0.0;
03926 
03927   /* Find the next process. Skip processes that are definitely
03928      blocked. */
03929 
03930   /* Start from the root */
03931   next_in_set = (Scheme_Object *)scheme_thread_set_top;
03932   t_set = NULL; /* this will get set at the beginning of the loop */
03933 
03934   /* Each thread may or may not be available. If it's not available,
03935      we search thread by thread to find something that is available. */
03936   while (1) {
03937     /* next_in_set is the thread or set to try... */
03938 
03939     /* While it's a set, go down into the set, choosing the next
03940        item after the set's current. For each set, remember where we
03941        started searching for something to run, so we'll know when
03942        we've tried everything in the set. */
03943     while (!SCHEME_THREADP(next_in_set)) {
03944       t_set = (Scheme_Thread_Set *)next_in_set;
03945       next_in_set = get_t_set_next(t_set->current);
03946       if (!next_in_set)
03947         next_in_set = t_set->first;
03948       t_set->current = next_in_set;
03949       t_set->search_start = next_in_set;
03950     }
03951 
03952     /* Now `t_set' is the set we're trying, and `next' will be the
03953        thread to try: */
03954     next = (Scheme_Thread *)next_in_set;
03955 
03956     /* If we get back to the current thread, then
03957        no other thread was ready. */
03958     if (SAME_PTR(next, p)) {
03959       next = NULL;
03960       break;
03961     }
03962 
03963     /* Check whether `next' is ready... */
03964 
03965     if (next->nestee) {
03966       /* Blocked on nestee */
03967     } else if (next->running & MZTHREAD_USER_SUSPENDED) {
03968       if (next->next || (next->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
03969         /* If a non-main thread is still in the queue, 
03970            it needs to be swapped in so it can clean up
03971            and suspend itself. */
03972         break;
03973       }
03974     } else if (next->running & MZTHREAD_KILLED) {
03975       /* This one has been terminated. */
03976       if ((next->running & MZTHREAD_NEED_KILL_CLEANUP) 
03977           || next->nester
03978           || !next->next) {
03979         /* The thread needs to clean up. Swap it in so it can die. */
03980         break;
03981       } else
03982         remove_thread(next);
03983       break;
03984     } else if (next->external_break && scheme_can_break(next)) {
03985       break;
03986     } else {
03987       if (next->block_descriptor == GENERIC_BLOCKED) {
03988         if (next->block_check) {
03989           Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)next->block_check;
03990           Scheme_Schedule_Info sinfo;
03991           init_schedule_info(&sinfo, next, next->sleep_end);
03992           if (f(next->blocker, &sinfo))
03993             break;
03994           next->sleep_end = sinfo.sleep_end;
03995           msecs = 0.0; /* that could have taken a while */
03996         }
03997       } else if (next->block_descriptor == SLEEP_BLOCKED) {
03998         if (!msecs)
03999           msecs = scheme_get_inexact_milliseconds();
04000         if (next->sleep_end <= msecs)
04001           break;
04002       } else
04003         break;
04004     }
04005 
04006     /* Look for the next thread/set in this set */
04007     if (next->t_set_next)
04008       next_in_set = next->t_set_next;
04009     else
04010       next_in_set = t_set->first;
04011 
04012     /* If we run out of things to try in this set,
04013        go up to find the next set. */
04014     if (SAME_OBJ(next_in_set, t_set->search_start)) {
04015       /* Loop to go up past exhausted sets, clearing search_start
04016          from each exhausted set. */
04017       while (1) {
04018         t_set->search_start = NULL;
04019         t_set = t_set->parent;
04020 
04021         if (t_set) {
04022           next_in_set = get_t_set_next(t_set->current);
04023           if (!next_in_set)
04024             next_in_set = t_set->first;
04025 
04026           if (SAME_OBJ(next_in_set, t_set->search_start)) {
04027             t_set->search_start = NULL;
04028             /* continue going up */
04029           } else {
04030             t_set->current = next_in_set;
04031             break;
04032           }
04033         } else
04034           break;
04035       }
04036 
04037       if (!t_set) {
04038         /* We ran out of things to try. If we
04039            start again with the top, we should
04040            land back at p. */
04041         next = NULL;
04042         break;
04043       }
04044     } else {
04045       /* Set current... */
04046       t_set->current = next_in_set;
04047     } 
04048     /* As we go back to the top of the loop, we'll check whether
04049        next_in_set is a thread or set, etc. */
04050   }
04051 
04052   p           = NULL;
04053   next_in_set = NULL;
04054   t_set       = NULL;
04055   *return_arg = next;
04056   next        = NULL;
04057 }
04058 
04059 void scheme_thread_block(float sleep_time)
04060      /* If we're blocked, `sleep_time' is a max sleep time,
04061        not a min sleep time. Otherwise, it's a min & max sleep time.
04062        This proc auto-resets p's blocking info if an escape occurs. */
04063 {
04064   double sleep_end;
04065   Scheme_Thread *next;
04066   Scheme_Thread *p = scheme_current_thread;
04067 
04068   if (p->return_marks_to) /* just in case we get here */
04069     return;
04070 
04071   if (p->running & MZTHREAD_KILLED) {
04072     /* This thread is dead! Give up now. */
04073     if (!do_atomic)
04074       exit_or_escape(p);
04075   }
04076 
04077   if ((p->running & MZTHREAD_USER_SUSPENDED)
04078       && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
04079     /* This thread was suspended. */
04080     wait_until_suspend_ok();
04081     if (!p->next) {
04082       /* Suspending the main thread... */
04083       select_thread();
04084     } else
04085       scheme_weak_suspend_thread(p);
04086   }
04087 
04088   /* Check scheduled_kills early and often. */
04089   check_scheduled_kills();
04090 
04091   shrink_cust_box_array();
04092 
04093   if (scheme_active_but_sleeping)
04094     scheme_wake_up();
04095 
04096   if (sleep_time > 0) {
04097     sleep_end = scheme_get_inexact_milliseconds();
04098     sleep_end += (sleep_time * 1000.0);
04099   } else
04100     sleep_end = 0;
04101 
04102  start_sleep_check:
04103 
04104   check_ready_break();
04105 
04106   if (!p->external_break && !p->next && scheme_check_for_break && scheme_check_for_break())
04107     p->external_break = 1;
04108 
04109   if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
04110     raise_break(p);
04111     goto start_sleep_check;
04112   }
04113   
04114  swap_or_sleep:
04115 
04116 #ifdef USE_OSKIT_CONSOLE
04117   scheme_check_keyboard_input();
04118 #endif
04119 
04120   /* Check scheduled_kills early and often. */
04121   check_scheduled_kills();
04122 
04123   if (!do_atomic && (sleep_end >= 0.0)) {
04124     find_next_thread(&next);
04125   } else
04126     next = NULL;
04127   
04128   if (next) {
04129     /* Clear out search_start fields */
04130     Scheme_Thread_Set *t_set;
04131     t_set = next->t_set_parent;
04132     while (t_set) {
04133       t_set->search_start = NULL;
04134       t_set = t_set->parent;
04135     }
04136     t_set = NULL;
04137   }
04138 
04139   if ((sleep_end > 0.0) && (p->block_descriptor == NOT_BLOCKED)) {
04140     p->block_descriptor = SLEEP_BLOCKED;
04141     p->sleep_end = sleep_end;
04142   } else if ((sleep_end > 0.0) && (p->block_descriptor == GENERIC_BLOCKED)) {
04143     p->sleep_end = sleep_end;
04144   }
04145 
04146   if (next && (!next->running || (next->running & MZTHREAD_SUSPENDED))) {
04147     /* In the process of selecting another thread, it was suspended or
04148        removed. Very unusual, but possible if a block checker does
04149        stange things??? */
04150     next = NULL;
04151   }
04152 
04153 #if 0
04154   /* Debugging: next must be in the chain of processes */
04155   if (next) {
04156     Scheme_Thread *p = scheme_first_thread;
04157     while (p != next) {
04158       p = p->next;
04159       if (!p) {
04160        printf("error: tried to switch to bad thread\n");
04161        exit(1);
04162       }
04163     }
04164   }
04165 #endif
04166 
04167   if (next) {
04168     /* Swap in `next', but first clear references to other threads. */
04169     swap_target = next;
04170     next = NULL;
04171     do_swap_thread();
04172   } else if (do_atomic && scheme_on_atomic_timeout) {
04173     scheme_on_atomic_timeout();
04174   } else {
04175     /* If all processes are blocked, check for total process sleeping: */
04176     if (p->block_descriptor != NOT_BLOCKED) {
04177       check_sleep(1, 1);
04178     }
04179   }
04180 
04181   if (p->block_descriptor == SLEEP_BLOCKED) {
04182     p->block_descriptor = NOT_BLOCKED;
04183   }
04184   p->sleep_end = 0.0;
04185 
04186   /* Killed while I was asleep? */
04187   if (p->running & MZTHREAD_KILLED) {
04188     /* This thread is dead! Give up now. */
04189     if (p->running & MZTHREAD_NEED_KILL_CLEANUP) {
04190       /* The thread needs to clean up. It will block immediately to die. */
04191       return;
04192     } else {
04193       if (!do_atomic)
04194        exit_or_escape(p);
04195     }
04196   }
04197 
04198   /* Suspended while I was asleep? */
04199   if ((p->running & MZTHREAD_USER_SUSPENDED)
04200       && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
04201     wait_until_suspend_ok();
04202     if (!p->next)
04203       scheme_thread_block(0.0); /* main thread handled at top of this function */
04204     else
04205       scheme_weak_suspend_thread(p);
04206   }
04207 
04208   /* Check for external break again after swap or sleep */
04209   check_ready_break();
04210   if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
04211     raise_break(p);
04212   }
04213   
04214   if (sleep_end > 0) {
04215     if (sleep_end > scheme_get_inexact_milliseconds()) {
04216       /* Still have time to sleep if necessary, but make sure we're
04217         not ready (because maybe that's why we were swapped back in!) */
04218       if (p->block_descriptor == GENERIC_BLOCKED) {
04219        if (p->block_check) {
04220          Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)p->block_check;
04221          Scheme_Schedule_Info sinfo;
04222          init_schedule_info(&sinfo, p, sleep_end);
04223          if (f(p->blocker, &sinfo)) {
04224            sleep_end = 0;
04225          } else {
04226            sleep_end = sinfo.sleep_end;
04227          }
04228        }
04229       }
04230 
04231       if (sleep_end > 0)
04232        goto swap_or_sleep;
04233     }
04234   }
04235 
04236   if (do_atomic)
04237     missed_context_switch = 1;
04238 
04239   scheme_fuel_counter = p->engine_weight;
04240   scheme_jit_stack_boundary = scheme_stack_boundary;
04241 
04242 #ifdef USE_ITIMER
04243   {
04244     struct itimerval t, old;
04245 
04246     if (!itimer_handler_installed) {
04247       itimer_handler_installed = 1;
04248       MZ_SIGSET(SIGPROF, timer_expired);
04249     }
04250 
04251     t.it_value.tv_sec = 0;
04252     t.it_value.tv_usec = MZ_THREAD_QUANTUM_USEC;
04253     t.it_interval.tv_sec = 0;
04254     t.it_interval.tv_usec = 0;
04255 
04256     setitimer(ITIMER_PROF, &t, &old);
04257   }
04258 #endif
04259 #if defined(USE_WIN32_THREAD_TIMER) || defined(USE_PTHREAD_THREAD_TIMER)
04260   scheme_start_itimer_thread(MZ_THREAD_QUANTUM_USEC);
04261 #endif
04262 
04263   /* Check scheduled_kills early and often. */
04264   check_scheduled_kills();
04265 }
04266 
04267 void scheme_making_progress()
04268 {
04269   scheme_current_thread->ran_some = 1;
04270 }
04271 
04272 int scheme_block_until(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
04273                      Scheme_Object *data, float delay)
04274 {
04275   int result;
04276   Scheme_Thread *p = scheme_current_thread;
04277   Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)_f;
04278   Scheme_Schedule_Info sinfo;
04279   double sleep_end;
04280 
04281   if (!delay)
04282     sleep_end = 0.0;
04283   else {
04284     sleep_end = scheme_get_inexact_milliseconds();
04285     sleep_end += (delay * 1000.0);    
04286   }
04287 
04288   /* We make an sinfo to be polite, but we also assume
04289      that f will not generate any redirections! */
04290   init_schedule_info(&sinfo, NULL, sleep_end);
04291 
04292   while (!(result = f((Scheme_Object *)data, &sinfo))) {
04293     sleep_end = sinfo.sleep_end;
04294     if (sinfo.spin) {
04295       init_schedule_info(&sinfo, NULL, 0.0);
04296       scheme_thread_block(0.0);
04297       scheme_current_thread->ran_some = 1;
04298     } else {
04299       if (sleep_end) {
04300        delay = (float)(sleep_end - scheme_get_inexact_milliseconds());
04301        delay /= 1000.0;
04302        if (delay < 0)
04303          delay = (float)0.00001;
04304       } else
04305        delay = 0.0;
04306 
04307       p->block_descriptor = GENERIC_BLOCKED;
04308       p->blocker = (Scheme_Object *)data;
04309       p->block_check = (Scheme_Ready_Fun)f;
04310       p->block_needs_wakeup = fdf;
04311       
04312       scheme_thread_block(delay);
04313       
04314       p->block_descriptor = NOT_BLOCKED;
04315       p->blocker = NULL;
04316       p->block_check = NULL;
04317       p->block_needs_wakeup = NULL;
04318     }
04319   }
04320   p->ran_some = 1;
04321 
04322   return result;
04323 }
04324 
04325 int scheme_block_until_enable_break(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
04326                                 Scheme_Object *data, float delay, int enable_break)
04327 {
04328   if (enable_break) {
04329     int v;
04330     Scheme_Cont_Frame_Data cframe;
04331 
04332     scheme_push_break_enable(&cframe, 1, 1);
04333     v = scheme_block_until(_f, fdf, data, delay);
04334     scheme_pop_break_enable(&cframe, 0);
04335 
04336     return v;
04337   } else
04338     return scheme_block_until(_f, fdf, data, delay);
04339 }
04340 
04341 static int ready_unless(Scheme_Object *o)
04342 {
04343   Scheme_Object *unless_evt, *data;
04344   Scheme_Ready_Fun f;
04345 
04346   data = (Scheme_Object *)((void **)o)[0];
04347   unless_evt = (Scheme_Object *)((void **)o)[1];
04348   f = (Scheme_Ready_Fun)((void **)o)[2];
04349 
04350   return f(data);
04351 }
04352 
04353 static void needs_wakeup_unless(Scheme_Object *o, void *fds)
04354 {
04355   Scheme_Object *data;
04356   Scheme_Needs_Wakeup_Fun fdf;
04357 
04358   data = (Scheme_Object *)((void **)o)[0];
04359   fdf = (Scheme_Needs_Wakeup_Fun)((void **)o)[3];
04360 
04361   fdf(data, fds);
04362 }
04363 
04364 
04365 int scheme_block_until_unless(Scheme_Ready_Fun f, Scheme_Needs_Wakeup_Fun fdf,
04366                            Scheme_Object *data, float delay, 
04367                            Scheme_Object *unless,
04368                            int enable_break)
04369 {
04370   if (unless) {
04371     void **a;
04372     a = MALLOC_N(void *, 4);
04373     a[0] = data;
04374     a[1] = unless;
04375     a[2] = f;
04376     a[3] = fdf;
04377 
04378     data = (Scheme_Object *) mzALIAS a;
04379     f = ready_unless;
04380     if (fdf)
04381       fdf = needs_wakeup_unless;
04382   }
04383    
04384   return scheme_block_until_enable_break(f, fdf, data, delay, enable_break);
04385 }
04386 
04387 void scheme_thread_block_enable_break(float sleep_time, int enable_break)
04388 {
04389   if (enable_break) {
04390     Scheme_Cont_Frame_Data cframe;
04391     
04392     scheme_push_break_enable(&cframe, 1, 1);
04393     scheme_thread_block(sleep_time);
04394     scheme_pop_break_enable(&cframe, 0);
04395   } else
04396     scheme_thread_block(sleep_time);
04397 }
04398 
04399 void scheme_start_atomic(void)
04400 {
04401   if (!do_atomic)
04402     missed_context_switch = 0;
04403   do_atomic++;
04404 }
04405 
04406 void scheme_end_atomic_no_swap(void)
04407 {
04408   --do_atomic;
04409 }
04410 
04411 void scheme_start_in_scheduler(void)
04412 {
04413   do_atomic++;
04414   scheme_no_stack_overflow++;
04415 }
04416       
04417 void scheme_end_in_scheduler(void)
04418 {
04419   --do_atomic;
04420   --scheme_no_stack_overflow;
04421 }
04422 
04423 void scheme_end_atomic(void)
04424 {
04425   scheme_end_atomic_no_swap();
04426   if (!do_atomic && missed_context_switch) {
04427     scheme_thread_block(0.0);
04428     scheme_current_thread->ran_some = 1;    
04429   }
04430 }
04431 
04432 static void wait_until_suspend_ok()
04433 {
04434   while (do_atomic && scheme_on_atomic_timeout) {
04435     scheme_on_atomic_timeout();
04436   }
04437 }
04438 
04439 void scheme_weak_suspend_thread(Scheme_Thread *r)
04440 {
04441   if (r->running & MZTHREAD_SUSPENDED)
04442     return;
04443 
04444   if (r == scheme_current_thread) {
04445     wait_until_suspend_ok();
04446   }
04447   
04448   if (r->prev) {
04449     r->prev->next = r->next;
04450     r->next->prev = r->prev;
04451   } else {
04452     r->next->prev = NULL;
04453     scheme_first_thread = r->next;
04454   }
04455 
04456   r->next = r->prev = NULL;
04457   unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
04458 
04459   r->running |= MZTHREAD_SUSPENDED;
04460 
04461   prepare_this_thread_for_GC(r);
04462 
04463   if (r == scheme_current_thread) {
04464     select_thread();
04465 
04466     /* Killed while suspended? */
04467     if ((r->running & MZTHREAD_KILLED) && !(r->running & MZTHREAD_NEED_KILL_CLEANUP))
04468       scheme_thread_block(0);
04469   }
04470 }
04471 
04472 void scheme_weak_resume_thread(Scheme_Thread *r)
04473      /* This function can be called from an interrupt handler, but
04474        only for the main thread, which is never suspended. */
04475 {
04476   if (!(r->running & MZTHREAD_USER_SUSPENDED)) {
04477     if (r->running & MZTHREAD_SUSPENDED) {
04478       r->running -= MZTHREAD_SUSPENDED;
04479       r->next = scheme_first_thread;
04480       r->prev = NULL;
04481       scheme_first_thread = r;
04482       r->next->prev = r;
04483       r->ran_some = 1;
04484       schedule_in_set((Scheme_Object *)r, r->t_set_parent);
04485       scheme_check_tail_buffer_size(r);
04486     }
04487   }
04488 }
04489 
04490 void scheme_about_to_move_C_stack(void)
04491 {
04492   wait_until_suspend_ok();
04493 }
04494 
04495 static Scheme_Object *
04496 sch_sleep(int argc, Scheme_Object *args[])
04497 {
04498   float t;
04499 
04500   if (argc && !SCHEME_REALP(args[0]))
04501     scheme_wrong_type("sleep", "non-negative real number", 0, argc, args);
04502 
04503   if (argc) {
04504     t = (float)scheme_real_to_double(args[0]);
04505     if (t < 0)
04506       scheme_wrong_type("sleep", "non-negative real number", 0, argc, args);
04507   } else
04508     t = 0;
04509 
04510   scheme_thread_block(t);
04511   scheme_current_thread->ran_some = 1;
04512 
04513   return scheme_void;
04514 }
04515 
04516 static Scheme_Object *break_thread(int argc, Scheme_Object *args[])
04517 {
04518   Scheme_Thread *p;
04519 
04520   if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_thread_type))
04521     scheme_wrong_type("break-thread", "thread", 0, argc, args);
04522 
04523   p = (Scheme_Thread *)args[0];
04524 
04525   scheme_break_thread(p);
04526 
04527   /* In case p == scheme_current_thread */
04528   if (!scheme_fuel_counter) {
04529     scheme_thread_block(0.0);
04530     scheme_current_thread->ran_some = 1;
04531   }
04532 
04533   return scheme_void;
04534 }
04535 
04536 static int do_kill_thread(Scheme_Thread *p)
04537 {
04538   int kill_self = 0;
04539 
04540   if (!MZTHREAD_STILL_RUNNING(p->running)) {
04541     return 0;
04542   }
04543 
04544   if (p->suspend_to_kill) {
04545     if (p == scheme_current_thread)
04546       return 1; /* suspend in caller */
04547     suspend_thread(p);
04548     return 0;
04549   }
04550 
04551   if (p->nestee)
04552     scheme_break_thread(p->nestee);
04553 
04554   while (p->private_on_kill) {
04555     p->private_on_kill(p->private_kill_data);
04556     if (p->private_kill_next) {
04557       p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
04558       p->private_kill_data = p->private_kill_next[1];
04559       p->private_kill_next = (void **)p->private_kill_next[2];
04560     } else {
04561       p->private_on_kill = NULL;
04562       p->private_kill_data = NULL;
04563     }
04564   }
04565 
04566   if (p->on_kill)
04567     p->on_kill(p);
04568 
04569   scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
04570   {
04571     Scheme_Object *l;
04572     for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
04573       scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l), 
04574                          (Scheme_Object *)p->mr_hop);
04575     }
04576   }
04577 
04578   if (p->running) {
04579     if (p->running & MZTHREAD_USER_SUSPENDED) {
04580       /* end user suspension, because we need to kill the thread */
04581       p->running -= MZTHREAD_USER_SUSPENDED;
04582     }
04583 
04584     p->running |= MZTHREAD_KILLED;
04585     if ((p->running & MZTHREAD_NEED_KILL_CLEANUP)
04586        || p->nester)
04587       scheme_weak_resume_thread(p);
04588     else if (p != scheme_current_thread) {
04589       /* Do kill stuff... */
04590       if (p->next)
04591        remove_thread(p);
04592     }
04593   }
04594   if (p == scheme_current_thread)
04595     kill_self = 1;
04596 
04597   return kill_self;
04598 }
04599 
04600 void scheme_kill_thread(Scheme_Thread *p)
04601 {
04602   if (do_kill_thread(p)) {
04603     /* Suspend/kill self: */
04604     wait_until_suspend_ok();
04605     if (p->suspend_to_kill)
04606       suspend_thread(p);
04607     else
04608       scheme_thread_block(0.0);
04609   }
04610 
04611   /* Give killed threads time to die: */
04612   scheme_thread_block(0.0);
04613   scheme_current_thread->ran_some = 1;
04614 }
04615 
04616 static Scheme_Object *kill_thread(int argc, Scheme_Object *argv[])
04617 {
04618   Scheme_Thread *p = (Scheme_Thread *)argv[0];
04619 
04620   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
04621     scheme_wrong_type("kill-thread", "thread", 0, argc, argv);
04622 
04623   if (!MZTHREAD_STILL_RUNNING(p->running))
04624     return scheme_void;
04625 
04626   check_current_custodian_allows("kill-thread", p);
04627 
04628   scheme_kill_thread(p);
04629 
04630   return scheme_void;
04631 }
04632 
04633 void scheme_push_kill_action(Scheme_Kill_Action_Func f, void *d)
04634 {
04635   Scheme_Thread *p = scheme_current_thread;
04636 
04637   if (p->private_on_kill) {
04638     /* Pretty unlikely that these get nested. An exception handler
04639        would have to block on and within operations that need special
04640        kill handling. But it could happen. */
04641     void **next;
04642     next = MALLOC_N(void *, 3);
04643     next[0] = (void *)p->private_on_kill;
04644     next[1] = p->private_kill_data;
04645     next[2] = (void *)p->private_kill_next;
04646     p->private_kill_next = next;
04647   }
04648 
04649   p->private_on_kill = f;
04650   p->private_kill_data = d;
04651 }
04652 
04653 void scheme_pop_kill_action()
04654 {
04655   Scheme_Thread *p = scheme_current_thread;
04656 
04657   if (p->private_kill_next) {
04658     p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
04659     p->private_kill_data = p->private_kill_next[1];
04660     p->private_kill_next = (void **)p->private_kill_next[2];
04661   } else {
04662     p->private_on_kill = NULL;
04663     p->private_kill_data = NULL;
04664   }
04665 }
04666 
04667 /*========================================================================*/
04668 /*                      suspend/resume and evts                          */
04669 /*========================================================================*/
04670 
04671 /* Forward decl: */
04672 static void transitive_resume(Scheme_Object *resumes);
04673 static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c);
04674 static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c);
04675 
04676 static Scheme_Object *thread_suspend(int argc, Scheme_Object *argv[])
04677 {
04678   Scheme_Thread *p;
04679 
04680   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
04681     scheme_wrong_type("thread-suspend", "thread", 0, argc, argv);
04682 
04683   p = (Scheme_Thread *)argv[0];
04684 
04685   check_current_custodian_allows("thread-suspend", p);
04686 
04687   suspend_thread(p);
04688 
04689   return scheme_void;
04690 }
04691 
04692 static void suspend_thread(Scheme_Thread *p)
04693 {
04694   int running;
04695 
04696   if (!MZTHREAD_STILL_RUNNING(p->running))
04697     return;
04698   
04699   if (p->running & MZTHREAD_USER_SUSPENDED)
04700     return;
04701 
04702   /* Get running now, just in case the thread is waiting on its own
04703      suspend event (in which case posting to the sema will unsuspend
04704      the thread) */
04705   running = p->running;
04706 
04707   p->resumed_box = NULL;
04708   if (p->suspended_box) {
04709     SCHEME_PTR2_VAL(p->suspended_box) = (Scheme_Object *)p;
04710     scheme_post_sema_all(SCHEME_PTR1_VAL(p->suspended_box));
04711   }
04712 
04713   if (SAME_OBJ(p, scheme_main_thread)) {
04714     /* p is the main thread, which we're not allowed to
04715        suspend in the normal way. */
04716     p->running |= MZTHREAD_USER_SUSPENDED;
04717     scheme_main_was_once_suspended = 1;
04718     if (p == scheme_current_thread) {
04719       scheme_thread_block(0.0);
04720       p->ran_some = 1;
04721     }
04722   } else if ((running & (MZTHREAD_NEED_KILL_CLEANUP
04723                       | MZTHREAD_NEED_SUSPEND_CLEANUP))
04724             && (running & MZTHREAD_SUSPENDED)) {
04725     /* p probably needs to get out of semaphore-wait lines, etc. */
04726     scheme_weak_resume_thread(p);
04727     p->running |= MZTHREAD_USER_SUSPENDED;
04728   } else {
04729     if (p == scheme_current_thread) {
04730       wait_until_suspend_ok();
04731     }
04732     p->running |= MZTHREAD_USER_SUSPENDED;
04733     scheme_weak_suspend_thread(p); /* ok if p is scheme_current_thread */
04734     if (p == scheme_current_thread) {
04735       /* Need to check for breaks */
04736       scheme_check_break_now();
04737     }
04738   }
04739 }
04740 
04741 static void add_transitive_resume(Scheme_Thread *promote_to, Scheme_Thread *p)
04742 {
04743   Scheme_Object *running_box;
04744   Scheme_Hash_Table *ht;
04745 
04746   if (!p->running_box) {
04747     Scheme_Object *b;
04748     b = scheme_alloc_small_object();
04749     b->type = scheme_thread_dead_type;
04750     SCHEME_PTR_VAL(b) = (Scheme_Object *)p;
04751     p->running_box = b;
04752   }
04753   running_box = p->running_box;
04754 
04755   if (!promote_to->transitive_resumes) {
04756     /* Create table */
04757     ht = scheme_make_hash_table(SCHEME_hash_ptr);
04758     promote_to->transitive_resumes = (Scheme_Object *)ht;
04759   } else {
04760     /* Purge ht entries for threads that are now dead: */
04761     Scheme_Hash_Table *gone= NULL;
04762     int i;
04763 
04764     ht = (Scheme_Hash_Table *)promote_to->transitive_resumes;
04765     for (i = ht->size; i--; ) {
04766       if (ht->vals[i]) {
04767        if (!SCHEME_PTR_VAL(ht->keys[i])) {
04768          /* This one is dead */
04769          if (!gone)
04770            gone = scheme_make_hash_table(SCHEME_hash_ptr);
04771          scheme_hash_set(gone, ht->keys[i], scheme_true);
04772        }
04773       }
04774     }
04775 
04776     if (gone) {
04777       /* Remove dead ones: */
04778       for (i = gone->size; i--; ) {
04779        if (gone->vals[i]) {
04780          scheme_hash_set(ht, gone->keys[i], NULL);
04781        }
04782       }
04783     }
04784   }
04785 
04786   scheme_hash_set(ht, running_box, scheme_true);
04787 }
04788 
04789 static Scheme_Object *transitive_resume_k(void)
04790 {
04791   Scheme_Thread *p = scheme_current_thread;
04792   Scheme_Object *r = (Scheme_Object *)p->ku.k.p1;
04793   
04794   p->ku.k.p1 = NULL;
04795 
04796   transitive_resume(r);
04797 
04798   return scheme_true;
04799 }
04800 
04801 static void transitive_resume(Scheme_Object *resumes)
04802 {
04803   Scheme_Hash_Table *ht;
04804   Scheme_Object *a[1];
04805   int i;
04806 
04807 #ifdef DO_STACK_CHECK
04808 #include "mzstkchk.h"
04809   {
04810     Scheme_Thread *p = scheme_current_thread;
04811     
04812     p->ku.k.p1 = resumes;
04813 
04814     p->suspend_break++;
04815     scheme_start_atomic();
04816     scheme_handle_stack_overflow(transitive_resume_k);
04817     scheme_end_atomic_no_swap();
04818     --p->suspend_break;
04819 
04820     return;
04821   }
04822 #endif
04823 
04824   ht = (Scheme_Hash_Table *)resumes;
04825   
04826   for (i = ht->size; i--; ) {
04827     if (ht->vals[i]) {
04828       a[0] = SCHEME_PTR_VAL(ht->keys[i]);
04829       if (a[0])
04830        thread_resume(1, a);
04831     }
04832   }
04833 }
04834 
04835 static Scheme_Object *transitive_promote_k(void)
04836 {
04837   Scheme_Thread *p = scheme_current_thread;
04838   Scheme_Thread *pp = (Scheme_Thread *)p->ku.k.p1;
04839   Scheme_Custodian *c = (Scheme_Custodian *)p->ku.k.p2;
04840   
04841   p->ku.k.p1 = NULL;
04842   p->ku.k.p2 = NULL;
04843 
04844   transitive_promote(pp, c);
04845 
04846   return scheme_true;
04847 }
04848 
04849 static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c)
04850 {
04851   Scheme_Hash_Table *ht;
04852   Scheme_Object *t;
04853   int i;
04854 
04855 #ifdef DO_STACK_CHECK
04856 #include "mzstkchk.h"
04857   {
04858     Scheme_Thread *pp = scheme_current_thread;
04859     
04860     pp->ku.k.p1 = p;
04861     pp->ku.k.p2 = c;
04862 
04863     pp->suspend_break++;
04864     scheme_start_atomic();
04865     scheme_handle_stack_overflow(transitive_promote_k);
04866     scheme_end_atomic_no_swap();
04867     --pp->suspend_break;
04868 
04869     return;
04870   }
04871 #endif
04872 
04873   if (!p->transitive_resumes)
04874     return;
04875 
04876   ht = (Scheme_Hash_Table *)p->transitive_resumes;
04877   
04878   for (i = ht->size; i--; ) {
04879     if (ht->vals[i]) {
04880       t = SCHEME_PTR_VAL(ht->keys[i]);
04881       if (t)
04882        promote_thread((Scheme_Thread *)t, c);
04883     }
04884   }
04885 }
04886 
04887 static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c)
04888 {
04889   Scheme_Custodian *c, *cx;
04890   Scheme_Custodian_Reference *mref;  
04891   Scheme_Object *l;
04892 
04893   /* This function also handles transitive promotion. Every transitive
04894      target for p always has at least the custodians of p, so if we don't
04895      add a custodian to p, we don't need to check the rest. */
04896   
04897   if (!p->mref || !CUSTODIAN_FAM(p->mref)) {
04898     /* The thread has no running custodian, so fall through to
04899        just use to_c */
04900   } else {
04901     c = CUSTODIAN_FAM(p->mref);
04902 
04903     /* Check whether c is an ancestor of to_c (in which case we do nothing) */
04904     for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
04905       cx = CUSTODIAN_FAM(cx->parent);
04906     }
04907     if (cx) return;
04908 
04909     /* Check whether any of the extras are super to to_c. 
04910        If so, do nothing. */
04911     for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
04912       mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
04913       c = CUSTODIAN_FAM(mref);
04914       
04915       for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
04916        cx = CUSTODIAN_FAM(cx->parent);
04917       }
04918       if (cx) return;
04919     }
04920 
04921     /* Check whether to_c is super of c: */
04922     for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
04923       cx = CUSTODIAN_FAM(cx->parent);
04924     }
04925     
04926     /* If cx, fall through to replace the main custodian with to_c, 
04927        because it's an ancestor of the current one. Otherwise, they're
04928        unrelated. */
04929     if (!cx) {
04930       /* Check whether any of the extras should be replaced by to_c */
04931       for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
04932        /* Is to_c super of c? */
04933        for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
04934          cx = CUSTODIAN_FAM(cx->parent);
04935        }
04936        if (cx) {
04937          /* Replace this custodian with to_c */
04938          mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
04939          scheme_remove_managed(mref, (Scheme_Object *)p->mr_hop);
04940          mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
04941          SCHEME_CAR(l) = (Scheme_Object *)mref;
04942 
04943          /* It's possible that one of the other custodians is also
04944             junior to to_c. Remove it if we find one. */
04945          {
04946            Scheme_Object *prev;
04947            prev = l;
04948            for (l = SCHEME_CDR(l); !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
04949              mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
04950              c = CUSTODIAN_FAM(mref);
04951              for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
04952               cx = CUSTODIAN_FAM(cx->parent);
04953              }
04954              if (cx)
04955               SCHEME_CDR(prev) = SCHEME_CDR(l);
04956            }
04957          }
04958 
04959          transitive_promote(p, to_c);
04960 
04961          return;
04962        }
04963       }
04964 
04965       /* Otherwise, this is custodian is unrelated to the existing ones.
04966         Add it as an extra custodian. */
04967       mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
04968       l = scheme_make_raw_pair((Scheme_Object *)mref, p->extra_mrefs);
04969       p->extra_mrefs = l;
04970 
04971       transitive_promote(p, to_c);
04972       return;
04973     }
04974   }
04975 
04976   /* Replace p's main custodian (if any) with to_c */
04977   scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
04978   mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
04979   p->mref = mref;
04980 #ifdef MZ_PRECISE_GC
04981   GC_register_thread(p, to_c);
04982 #endif
04983   
04984   transitive_promote(p, to_c);
04985 }
04986 
04987 static Scheme_Object *thread_resume(int argc, Scheme_Object *argv[])
04988 {
04989   Scheme_Thread *p, *promote_to = NULL;
04990   Scheme_Custodian *promote_c = NULL;
04991 
04992   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
04993     scheme_wrong_type("thread-resume", "thread", 0, argc, argv);
04994 
04995   p = (Scheme_Thread *)argv[0];
04996 
04997   if (argc > 1) {
04998     if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_thread_type))
04999       promote_to = (Scheme_Thread *)argv[1];
05000     else if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_custodian_type)) {
05001       promote_c = (Scheme_Custodian *)argv[1];
05002       if (promote_c->shut_down)
05003        promote_c = NULL;
05004     } else {
05005       scheme_wrong_type("thread-resume", "thread or custodian", 1, argc, argv);
05006       return NULL;
05007     }
05008   }
05009 
05010   if (!MZTHREAD_STILL_RUNNING(p->running))
05011     return scheme_void;
05012 
05013   /* Change/add custodians for p from promote_p */
05014   if (promote_to) {
05015     Scheme_Object *l;
05016     Scheme_Custodian_Reference *mref;
05017 
05018     /* If promote_to doesn't have a working custodian, there's
05019        nothing to donate */
05020     if (promote_to->mref && CUSTODIAN_FAM(promote_to->mref)) {
05021       promote_thread(p, CUSTODIAN_FAM(promote_to->mref));
05022       
05023       for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
05024        mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
05025        promote_thread(p, CUSTODIAN_FAM(mref));
05026       }
05027     }
05028   }
05029   if (promote_c)
05030     promote_thread(p, promote_c);
05031 
05032   /* Set up transitive resume for future resumes of promote_to: */
05033   if (promote_to 
05034       && MZTHREAD_STILL_RUNNING(promote_to->running)
05035       && !SAME_OBJ(promote_to, p))
05036     add_transitive_resume(promote_to, p);
05037 
05038   /* Check whether the thread has a non-shut-down custodian */
05039   {
05040     Scheme_Custodian *c;
05041     
05042     if (p->mref)
05043       c = CUSTODIAN_FAM(p->mref);
05044     else
05045       c = NULL;
05046 
05047     if (!c || c->shut_down)
05048       return scheme_void;
05049   }
05050 
05051   if (p->running & MZTHREAD_USER_SUSPENDED) {
05052     p->suspended_box = NULL;
05053     if (p->resumed_box) {
05054       SCHEME_PTR2_VAL(p->resumed_box) = (Scheme_Object *)p;
05055       scheme_post_sema_all(SCHEME_PTR1_VAL(p->resumed_box));
05056     }
05057     
05058     p->running -= MZTHREAD_USER_SUSPENDED;
05059     
05060     scheme_weak_resume_thread(p);
05061 
05062     if (p->transitive_resumes)
05063       transitive_resume(p->transitive_resumes);
05064   }
05065 
05066   return scheme_void;
05067 }
05068 
05069 static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *argv[])
05070 {
05071   Scheme_Thread *p;
05072 
05073   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
05074     scheme_wrong_type("thread-suspend-evt", "thread", 0, argc, argv);
05075 
05076   p = (Scheme_Thread *)argv[0];
05077 
05078   return scheme_get_thread_suspend(p);
05079 }
05080 
05081 Scheme_Object *scheme_get_thread_suspend(Scheme_Thread *p)
05082 {
05083   if (!p->suspended_box) {
05084     Scheme_Object *b;
05085     b = scheme_alloc_object();
05086     b->type = scheme_thread_suspend_type;
05087     if (MZTHREAD_STILL_RUNNING(p->running) && (p->running & MZTHREAD_USER_SUSPENDED))
05088       SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
05089     else {
05090       Scheme_Object *sema;
05091       sema = scheme_make_sema(0);
05092       SCHEME_PTR1_VAL(b) = sema;
05093     }
05094     p->suspended_box = b;
05095   }
05096 
05097   return p->suspended_box;
05098 }
05099 
05100 static Scheme_Object *make_thread_resume(int argc, Scheme_Object *argv[])
05101 {
05102   Scheme_Thread *p;
05103 
05104   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
05105     scheme_wrong_type("thread-resume-evt", "thread", 0, argc, argv);
05106 
05107   p = (Scheme_Thread *)argv[0];
05108 
05109   if (!p->resumed_box) {
05110     Scheme_Object *b;
05111     b = scheme_alloc_object();
05112     b->type = scheme_thread_resume_type;
05113     if (MZTHREAD_STILL_RUNNING(p->running) && !(p->running & MZTHREAD_USER_SUSPENDED))
05114       SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
05115     else {
05116       Scheme_Object *sema;
05117       sema = scheme_make_sema(0);
05118       SCHEME_PTR1_VAL(b) = sema;
05119     }
05120     p->resumed_box = b;
05121   }
05122 
05123   return p->resumed_box;
05124 }
05125 
05126 static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
05127 {
05128   Scheme_Object *t;
05129 
05130   t = SCHEME_PTR2_VAL(o);
05131   if (t) {
05132     scheme_set_sync_target(sinfo, o, t, NULL, 0, 0, NULL);
05133     return 1;
05134   }
05135 
05136   scheme_set_sync_target(sinfo, SCHEME_PTR1_VAL(o), o, NULL, 0, 1, NULL);
05137   return 0;
05138 }
05139 
05140 static Scheme_Object *make_thread_dead(int argc, Scheme_Object *argv[])
05141 {
05142   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
05143     scheme_wrong_type("thread-dead-evt", "thread", 0, argc, argv);
05144 
05145   return scheme_get_thread_dead((Scheme_Thread *)argv[0]);
05146 }
05147 
05148 Scheme_Object *scheme_get_thread_dead(Scheme_Thread *p)
05149 {
05150   if (!p->dead_box) {
05151     Scheme_Object *b;
05152     Scheme_Object *sema;
05153 
05154     b = scheme_alloc_small_object();
05155     b->type = scheme_thread_dead_type;
05156     sema = scheme_make_sema(0);
05157     SCHEME_PTR_VAL(b) = sema;
05158     if (!MZTHREAD_STILL_RUNNING(p->running))
05159       scheme_post_sema_all(sema);
05160 
05161     p->dead_box = b;
05162   }
05163 
05164   return p->dead_box;
05165 }
05166 
05167 static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
05168 {
05169   scheme_set_sync_target(sinfo, SCHEME_PTR_VAL(o), o, NULL, 0, 1, NULL);
05170   return 0;
05171 }
05172 
05173 /*========================================================================*/
05174 /*                              syncing                                   */
05175 /*========================================================================*/
05176 
05177 static void syncing_needs_wakeup(Scheme_Object *s, void *fds);
05178 
05179 typedef struct Evt {
05180   MZTAG_IF_REQUIRED
05181   Scheme_Type sync_type;
05182   Scheme_Ready_Fun_FPC ready;
05183   Scheme_Needs_Wakeup_Fun needs_wakeup;
05184   Scheme_Sync_Sema_Fun get_sema;
05185   Scheme_Sync_Filter_Fun filter;
05186   int can_redirect;
05187 } Evt;
05188 
05189 static THREAD_LOCAL int evts_array_size;
05190 static THREAD_LOCAL Evt **evts;
05191 
05192 void scheme_add_evt(Scheme_Type type,
05193                   Scheme_Ready_Fun ready, 
05194                   Scheme_Needs_Wakeup_Fun wakeup, 
05195                   Scheme_Sync_Filter_Fun filter,
05196                   int can_redirect)
05197 {
05198   Evt *naya;
05199 
05200   if (!evts) {
05201     REGISTER_SO(evts);
05202   }
05203 
05204   if (evts_array_size <= type) {
05205     Evt **nevts;
05206     int new_size;
05207     new_size = type + 1;
05208     if (new_size < _scheme_last_type_)
05209       new_size = _scheme_last_type_;
05210     nevts = MALLOC_N(Evt*, new_size);
05211     memcpy(nevts, evts, evts_array_size * sizeof(Evt*));
05212     evts = nevts;
05213     evts_array_size = new_size;
05214   }
05215 
05216   naya = MALLOC_ONE_RT(Evt);
05217 #ifdef MZTAG_REQUIRED
05218   naya->type = scheme_rt_evt;
05219 #endif
05220   naya->sync_type = type;
05221   naya->ready = (Scheme_Ready_Fun_FPC)ready;
05222   naya->needs_wakeup = wakeup;
05223   naya->filter = filter;
05224   naya->can_redirect = can_redirect;
05225 
05226   evts[type] = naya;
05227 }
05228 
05229 void scheme_add_evt_through_sema(Scheme_Type type,
05230                               Scheme_Sync_Sema_Fun get_sema, 
05231                               Scheme_Sync_Filter_Fun filter)
05232 {
05233   scheme_add_evt(type, NULL, NULL, filter, 0);
05234   evts[type]->get_sema = get_sema;
05235 }
05236 
05237 static Evt *find_evt(Scheme_Object *o)
05238 {
05239   Scheme_Type t;
05240   Evt *w;
05241 
05242   t = SCHEME_TYPE(o);
05243   w = evts[t];
05244   if (w) {
05245     if (w->filter) {
05246       Scheme_Sync_Filter_Fun filter;
05247       filter = w->filter;
05248       if (!filter(o))
05249        return NULL;
05250     }
05251     return w;
05252   }
05253 
05254   return NULL;
05255 }
05256 
05257 int scheme_is_evt(Scheme_Object *o)
05258 {
05259   if (SCHEME_EVTSETP(o))
05260     return 1;
05261 
05262   return !!find_evt(o);
05263 }
05264 
05265 static Syncing *make_syncing(Evt_Set *evt_set, float timeout, double start_time) 
05266 {
05267   Syncing *syncing;
05268   int pos;
05269 
05270   syncing = MALLOC_ONE_RT(Syncing);
05271 #ifdef MZTAG_REQUIRED
05272   syncing->type = scheme_rt_syncing;
05273 #endif
05274   syncing->set = evt_set;
05275   syncing->timeout = timeout;
05276   if (timeout >= 0)
05277     syncing->sleep_end = start_time + (timeout * 1000);
05278   else
05279     syncing->sleep_end = 0.0;
05280 
05281   if (evt_set->argc > 1) {
05282     Scheme_Config *config;
05283     Scheme_Object *rand_state;
05284     config = scheme_current_config();
05285     rand_state = scheme_get_param(config, MZCONFIG_SCHEDULER_RANDOM_STATE);
05286     pos = scheme_rand((Scheme_Random_State *)rand_state);
05287     syncing->start_pos = (pos % evt_set->argc);
05288   }
05289 
05290   return syncing;
05291 }
05292 
05293 static void *splice_ptr_array(void **a, int al, void **b, int bl, int i)
05294 {
05295   void **r;
05296   int j;
05297   
05298   r = MALLOC_N(void*, al + bl - 1);
05299 
05300   if (a)
05301     memcpy(r, a, i * sizeof(void*));
05302   if (b)
05303     memcpy(r + i, b, bl * sizeof(void*));
05304   else {
05305     for (j = 0; j < bl; j++) {
05306       r[i+j] = a[i];
05307     }
05308   }
05309   if (a)
05310     memcpy(r + (i + bl), a + (i + 1), (al - i - 1) * sizeof(void*));
05311 
05312   return r;
05313 }
05314 
05315 static void set_sync_target(Syncing *syncing, int i, Scheme_Object *target, 
05316                          Scheme_Object *wrap, Scheme_Object *nack, 
05317                          int repost, int retry, Scheme_Accept_Sync accept)
05318 /* Not ready, deferred to target. */
05319 {
05320   Evt_Set *evt_set = syncing->set;
05321 
05322   if (wrap) {
05323     if (!syncing->wrapss) {
05324       Scheme_Object **wrapss;
05325       wrapss = MALLOC_N(Scheme_Object*, evt_set->argc);
05326       syncing->wrapss = wrapss;
05327     }
05328     if (!syncing->wrapss[i])
05329       syncing->wrapss[i] = scheme_null;
05330     wrap = scheme_make_pair(wrap, syncing->wrapss[i]);
05331     syncing->wrapss[i] = wrap;
05332   }
05333 
05334   if (nack) {
05335     if (!syncing->nackss) {
05336       Scheme_Object **nackss;
05337       nackss = MALLOC_N(Scheme_Object*, evt_set->argc);
05338       syncing->nackss = nackss;
05339     }
05340     if (!syncing->nackss[i])
05341       syncing->nackss[i] = scheme_null;
05342     nack = scheme_make_pair(nack, syncing->nackss[i]);
05343     syncing->nackss[i] = nack;
05344   }
05345 
05346   if (repost) {
05347     if (!syncing->reposts) {
05348       char *s;
05349       s = (char *)scheme_malloc_atomic(evt_set->argc);
05350       memset(s, 0, evt_set->argc);
05351       syncing->reposts = s;
05352     }
05353     syncing->reposts[i] = 1;
05354   }
05355 
05356   if (accept) {
05357     if (!syncing->accepts) {
05358       Scheme_Accept_Sync *s;
05359       s = (Scheme_Accept_Sync *)scheme_malloc_atomic(sizeof(Scheme_Accept_Sync) * evt_set->argc);
05360       memset(s, 0, evt_set->argc * sizeof(Scheme_Accept_Sync));
05361       syncing->accepts = s;
05362     }
05363     syncing->accepts[i] = accept;
05364   }
05365 
05366   if (SCHEME_EVTSETP(target) && retry) {
05367     /* Flatten the set into this one */
05368     Evt_Set *wts = (Evt_Set *)target;
05369     if (wts->argc == 1) {
05370       /* 1 thing in set? Flattening is easy! */
05371       evt_set->argv[i] = wts->argv[0];
05372       evt_set->ws[i] = wts->ws[0];
05373     } else {
05374       /* Inline the set (in place) */
05375       Scheme_Object **argv;
05376       Evt **ws;
05377        
05378       argv = (Scheme_Object **)splice_ptr_array((void **)evt_set->argv, 
05379                                           evt_set->argc,
05380                                           (void **)wts->argv, 
05381                                           wts->argc,
05382                                           i);
05383       ws = (Evt **)splice_ptr_array((void **)evt_set->ws, 
05384                                     evt_set->argc,
05385                                     (void **)wts->ws, 
05386                                     wts->argc,
05387                                     i);
05388 
05389       evt_set->argv = argv;
05390       evt_set->ws = ws;
05391 
05392       if (syncing->wrapss) {
05393        argv = (Scheme_Object **)splice_ptr_array((void **)syncing->wrapss, 
05394                                             evt_set->argc,
05395                                             (void **)NULL,
05396                                             wts->argc,
05397                                             i);
05398        syncing->wrapss = argv;
05399       }
05400       if (syncing->nackss) {
05401        argv = (Scheme_Object **)splice_ptr_array((void **)syncing->nackss, 
05402                                             evt_set->argc,
05403                                             (void **)NULL,
05404                                             wts->argc,
05405                                             i);
05406        syncing->nackss = argv;
05407       }
05408       if (syncing->reposts) {
05409        char *s;
05410        int len;
05411        
05412        len = evt_set->argc + wts->argc - 1;
05413        
05414        s = (char *)scheme_malloc_atomic(len);
05415        memset(s, 0, len);
05416        
05417        memcpy(s, syncing->reposts, i);
05418        memcpy(s + i + wts->argc, syncing->reposts + i + 1, evt_set->argc - i - 1);
05419        syncing->reposts = s;
05420       }
05421       if (syncing->accepts) {
05422        Scheme_Accept_Sync *s;
05423        int len;
05424        
05425        len = evt_set->argc + wts->argc - 1;
05426        
05427        s = (Scheme_Accept_Sync *)scheme_malloc_atomic(len * sizeof(Scheme_Accept_Sync));
05428        memset(s, 0, len * sizeof(Scheme_Accept_Sync));
05429        
05430        memcpy(s, syncing->accepts, i * sizeof(Scheme_Accept_Sync));
05431        memcpy(s + i + wts->argc, syncing->accepts + i + 1, (evt_set->argc - i - 1) * sizeof(Scheme_Accept_Sync));
05432        syncing->accepts = s;
05433       }
05434 
05435       evt_set->argc += (wts->argc - 1);
05436 
05437       /* scheme_channel_syncer_type needs to know its location, which
05438         might have changed: */
05439       argv = evt_set->argv;
05440       for (i = evt_set->argc; i--; ) {
05441        if (SAME_TYPE(SCHEME_TYPE(argv[i]), scheme_channel_syncer_type)) {
05442          ((Scheme_Channel_Syncer *)argv[i])->syncing_i = i;
05443        }
05444       }
05445 
05446     }
05447   } else {
05448     Evt *ww;
05449     evt_set->argv[i] = target;
05450     ww = find_evt(target);
05451     evt_set->ws[i] = ww;
05452   }
05453 }
05454 
05455 void scheme_set_sync_target(Scheme_Schedule_Info *sinfo, Scheme_Object *target, 
05456                          Scheme_Object *wrap, Scheme_Object *nack, 
05457                          int repost, int retry, Scheme_Accept_Sync accept)
05458 {
05459   set_sync_target((Syncing *)sinfo->current_syncing, sinfo->w_i,
05460                 target, wrap, nack, repost, retry, accept);
05461   if (retry) {
05462     /* Rewind one step to try new ones (or continue
05463        if the set was empty). */
05464     sinfo->w_i--;
05465   }
05466 }
05467 
05468 static int syncing_ready(Scheme_Object *s, Scheme_Schedule_Info *sinfo)
05469 {
05470   int i, redirections = 0, all_semas = 1, j, result = 0;
05471   Evt *w;
05472   Scheme_Object *o;
05473   Scheme_Schedule_Info r_sinfo;
05474   Syncing *syncing = (Syncing *)s;
05475   Evt_Set *evt_set;
05476   int is_poll;
05477   double sleep_end;
05478   
05479   sleep_end = syncing->sleep_end;
05480 
05481   if (syncing->result) {
05482     result = 1;
05483     goto set_sleep_end_and_return;
05484   }
05485 
05486   /* We must handle target redirections in the objects on which we're
05487      syncing. We never have to redirect the evt_set itself, but
05488      a evt_set can show up as a target, and we inline it in
05489      that case. */
05490 
05491   evt_set = syncing->set;
05492 
05493   is_poll = (syncing->timeout == 0.0);
05494 
05495   /* Anything ready? */
05496   for (j = 0; j < evt_set->argc; j++) {
05497     Scheme_Ready_Fun_FPC ready;
05498 
05499     i = (j + syncing->start_pos) % evt_set->argc;
05500 
05501     o = evt_set->argv[i];
05502     w = evt_set->ws[i];
05503     ready = w->ready;
05504 
05505     if (!SCHEME_SEMAP(o)
05506        && !SCHEME_CHANNELP(o) && !SCHEME_CHANNEL_PUTP(o)
05507        && !SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)
05508        && !SAME_TYPE(SCHEME_TYPE(o), scheme_never_evt_type))
05509       all_semas = 0;
05510 
05511     if (ready) {
05512       int yep;
05513 
05514       init_schedule_info(&r_sinfo, sinfo->false_positive_ok, sleep_end);
05515 
05516       r_sinfo.current_syncing = (Scheme_Object *)syncing;
05517       r_sinfo.w_i = i;
05518       r_sinfo.is_poll = is_poll;
05519 
05520       yep = ready(o, &r_sinfo);
05521 
05522       sleep_end = r_sinfo.sleep_end;
05523 
05524       if ((i > r_sinfo.w_i) && sinfo->false_positive_ok) {
05525        /* There was a redirection. Assert: !yep. 
05526           Give up if we've chained too much. */
05527        redirections++;
05528        if (redirections > 10) {
05529          sinfo->potentially_false_positive = 1;
05530          result = 1;
05531          goto set_sleep_end_and_return;
05532        }
05533       }
05534 
05535       j += (r_sinfo.w_i - i);
05536 
05537       if (yep) {
05538        /* If it was a potentially false positive,
05539           don't set result permanently. Otherwise,
05540           propagate the false-positive indicator.*/
05541        if (!r_sinfo.potentially_false_positive) {
05542          syncing->result = i + 1;
05543          if (syncing->disable_break)
05544            syncing->disable_break->suspend_break++;
05545          if (syncing->reposts && syncing->reposts[i])
05546            scheme_post_sema(o);
05547           if (syncing->accepts && syncing->accepts[i])
05548             scheme_accept_sync(syncing, i);
05549          scheme_post_syncing_nacks(syncing);
05550          result = 1;
05551          goto set_sleep_end_and_return;
05552        } else {
05553          sinfo->potentially_false_positive = 1;
05554          result = 1;
05555          goto set_sleep_end_and_return;
05556        }
05557       } else if (r_sinfo.spin) {
05558        sinfo->spin = 1;
05559       }
05560     } else if (w->get_sema) {
05561       int repost = 0;
05562       Scheme_Sync_Sema_Fun get_sema = w->get_sema;
05563       Scheme_Object *sema;
05564       
05565       sema = get_sema(o, &repost);
05566       set_sync_target(syncing, i, sema, o, NULL, repost, 1, NULL);
05567       j--; /* try again with this sema */
05568     }
05569   }
05570 
05571   if (syncing->timeout >= 0.0) {
05572     if (syncing->sleep_end <= scheme_get_inexact_milliseconds())
05573       result = 1;
05574   } else if (all_semas) {
05575     /* Try to block in a GCable way: */
05576     if (sinfo->false_positive_ok) {
05577       /* In scheduler. Swap us in so we can suspend. */
05578       sinfo->potentially_false_positive = 1;
05579       result = 1;
05580     } else {
05581       /* Not in scheduler --- we're allowed to block via suspend,
05582         which makes the thread GCable. */
05583       scheme_wait_semas_chs(syncing->set->argc, syncing->set->argv, 0, syncing);
05584 
05585       /* In case a break appeared after we chose something,
05586         check for a break, because scheme_wait_semas_chs() won't: */
05587       scheme_check_break_now();
05588 
05589       result = 1;
05590     }
05591   }
05592 
05593  set_sleep_end_and_return:
05594 
05595   syncing->sleep_end = sleep_end;
05596   if (syncing->sleep_end
05597       && (!sinfo->sleep_end
05598          || (sinfo->sleep_end > syncing->sleep_end)))
05599     sinfo->sleep_end = syncing->sleep_end;
05600 
05601   return result;
05602 }
05603 
05604 void scheme_accept_sync(Syncing *syncing, int i)
05605 {
05606   /* run atomic accept action to revise the wrap */
05607   Scheme_Accept_Sync accept;
05608   Scheme_Object *v, *pr;
05609   
05610   accept = syncing->accepts[i];
05611   syncing->accepts[i] = NULL;
05612   pr = syncing->wrapss[i];
05613   
05614   v = SCHEME_CAR(pr);
05615   pr = SCHEME_CDR(pr);
05616   
05617   v = accept(v);
05618   
05619   pr = scheme_make_pair(v, pr);
05620   syncing->wrapss[i] = pr;
05621 }
05622 
05623 static void syncing_needs_wakeup(Scheme_Object *s, void *fds)
05624 {
05625   int i;
05626   Scheme_Object *o;
05627   Evt *w;
05628   Evt_Set *evt_set = ((Syncing *)s)->set;
05629 
05630   for (i = 0; i < evt_set->argc; i++) {
05631     o = evt_set->argv[i];
05632     w = evt_set->ws[i];
05633 
05634     if (w->needs_wakeup) {
05635       Scheme_Needs_Wakeup_Fun nw = w->needs_wakeup;
05636       
05637       nw(o, fds);
05638     }
05639   }
05640 }
05641 
05642 static Scheme_Object *evt_p(int argc, Scheme_Object *argv[])
05643 {
05644   return (scheme_is_evt(argv[0])
05645          ? scheme_true
05646          : scheme_false);
05647 }
05648 
05649 Evt_Set *make_evt_set(const char *name, int argc, Scheme_Object **argv, int delta)
05650 {
05651   Evt *w, **iws, **ws;
05652   Evt_Set *evt_set, *subset;
05653   Scheme_Object **args;
05654   int i, j, count = 0, reuse = 1;
05655 
05656   iws = MALLOC_N(Evt*, argc-delta);
05657   
05658   /* Find Evt record for each non-set argument, and compute flattened size. */
05659   for (i = 0; i < (argc - delta); i++) {
05660     if (!SCHEME_EVTSETP(argv[i+delta])) {
05661       w = find_evt(argv[i+delta]);
05662       if (!w) {
05663        scheme_wrong_type(name, "evt", i+delta, argc, argv);
05664        return NULL;
05665       }
05666       iws[i] = w;
05667       count++;
05668     } else {
05669       int n;
05670       n = ((Evt_Set *)argv[i+delta])->argc;
05671       if (n != 1)
05672         reuse = 0;
05673       count += n;
05674     }
05675   }
05676 
05677   evt_set = MALLOC_ONE_TAGGED(Evt_Set);
05678   evt_set->so.type = scheme_evt_set_type;
05679   evt_set->argc = count;
05680 
05681   if (reuse && (count == (argc - delta)))
05682     ws = iws;
05683   else
05684     ws = MALLOC_N(Evt*, count);
05685 
05686   args = MALLOC_N(Scheme_Object*, count);
05687   for (i = delta, j = 0; i < argc; i++, j++) {
05688     if (SCHEME_EVTSETP(argv[i])) {
05689       int k, n;
05690       subset = (Evt_Set *)argv[i];
05691       n = subset->argc;
05692       for (k = 0; k < n; k++, j++) {
05693        args[j] = subset->argv[k];
05694        ws[j] = subset->ws[k];
05695       }
05696       --j;
05697     } else {
05698       ws[j] = iws[i-delta];
05699       args[j] = argv[i];
05700     }
05701   }
05702 
05703   evt_set->ws = ws;
05704   evt_set->argv = args;
05705 
05706   return evt_set;
05707 }
05708 
05709 Scheme_Object *scheme_make_evt_set(int argc, Scheme_Object **argv)
05710 {
05711   return (Scheme_Object *)make_evt_set("internal-make-evt-set", argc, argv, 0);
05712 }
05713 
05714 void scheme_post_syncing_nacks(Syncing *syncing)
05715      /* Also removes channel-syncers. Can be called multiple times. */
05716 {
05717   int i, c;
05718   Scheme_Object *l;
05719 
05720   if (syncing->set) {
05721     c = syncing->set->argc;
05722     
05723     for (i = 0; i < c; i++) {
05724       if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_channel_syncer_type))
05725        scheme_get_outof_line((Scheme_Channel_Syncer *)syncing->set->argv[i]);
05726       if (syncing->nackss) {
05727        if ((i + 1) != syncing->result) {
05728          l = syncing->nackss[i];
05729          if (l) {
05730            for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
05731              scheme_post_sema_all(SCHEME_CAR(l));
05732            }
05733          }
05734          syncing->nackss[i] = NULL;
05735        }
05736       }
05737     }
05738   }
05739 }
05740 
05741 static Scheme_Object *do_sync(const char *name, int argc, Scheme_Object *argv[], 
05742                            int with_break, int with_timeout, int _tailok)
05743 {
05744   volatile int tailok = _tailok;
05745   Evt_Set * volatile evt_set;
05746   Syncing * volatile syncing;
05747   volatile float timeout = -1.0;
05748   double start_time;
05749   Scheme_Cont_Frame_Data cframe;
05750 
05751   if (with_timeout) {
05752     if (!SCHEME_FALSEP(argv[0])) {
05753       if (SCHEME_REALP(argv[0]))
05754        timeout = (float)scheme_real_to_double(argv[0]);
05755       
05756       if (timeout < 0.0) {
05757        scheme_wrong_type(name, "non-negative real number", 0, argc, argv);
05758        return NULL;
05759       }
05760       
05761       start_time = scheme_get_inexact_milliseconds();
05762     } else
05763       start_time = 0;
05764   } else {
05765     start_time = 0;
05766   }
05767 
05768   /* Special case: no timeout, only object is a semaphore */
05769   if (argc == (with_timeout + 1) && !start_time && SCHEME_SEMAP(argv[with_timeout])) {
05770     scheme_wait_sema(argv[with_timeout], with_break ? -1 : 0);
05771     return argv[with_timeout];
05772   }
05773 
05774   evt_set = NULL;
05775 
05776   /* Special case: only argument is an immutable evt set: */
05777   if ((argc == (with_timeout + 1)) && SCHEME_EVTSETP(argv[with_timeout])) {
05778     int i;
05779     evt_set = (Evt_Set *)argv[with_timeout];
05780     for (i = evt_set->argc; i--; ) {
05781       if (evt_set->ws[i]->can_redirect) {
05782        /* Need to copy this set to handle redirections. */
05783        evt_set = NULL;
05784        break;
05785       }
05786     }
05787   }
05788 
05789   if (!evt_set)
05790     evt_set = make_evt_set(name, argc, argv, with_timeout);
05791 
05792   if (with_break) {
05793     scheme_push_break_enable(&cframe, 1, 1);
05794   }
05795 
05796   /* Check for another special case: syncing on a set of semaphores
05797      without a timeout. Use general code for channels.
05798      (Note that we check for this case after evt-set flattening.) */
05799   if (timeout < 0.0) {
05800     int i;
05801     for (i = evt_set->argc; i--; ) {
05802       if (!SCHEME_SEMAP(evt_set->argv[i]))
05803        break;
05804     }
05805     if (i < 0) {
05806       /* Hit the special case. */
05807       i = scheme_wait_semas_chs(evt_set->argc, evt_set->argv, 0, NULL);
05808 
05809       if (with_break) {
05810        scheme_pop_break_enable(&cframe, 1);
05811       } else {
05812        /* In case a break appeared after we received a post,
05813           check for a break, because scheme_wait_semas_chs() won't: */
05814        scheme_check_break_now();
05815       }
05816 
05817       if (i)
05818        return evt_set->argv[i - 1];
05819       else
05820        return (tailok ? scheme_false : NULL);
05821     }
05822   }
05823 
05824   syncing = make_syncing(evt_set, timeout, start_time);
05825 
05826   if (timeout < 0.0)
05827     timeout = 0.0; /* means "no timeout" to block_until */
05828 
05829   if (with_break) {
05830     /* Suspended breaks when something is selected. */
05831     syncing->disable_break = scheme_current_thread;
05832   }
05833 
05834   BEGIN_ESCAPEABLE(scheme_post_syncing_nacks, syncing);
05835   scheme_block_until((Scheme_Ready_Fun)syncing_ready, syncing_needs_wakeup, 
05836                    (Scheme_Object *)syncing, timeout);
05837   END_ESCAPEABLE();
05838 
05839   if (!syncing->result)
05840     scheme_post_syncing_nacks(syncing);
05841 
05842   if (with_break) {
05843     scheme_pop_break_enable(&cframe, 0);
05844   }
05845 
05846   if (with_break) {
05847     /* Reverse low-level break disable: */
05848     --syncing->disable_break->suspend_break;
05849   }
05850 
05851   if (syncing->result) {
05852     /* Apply wrap functions to the selected evt: */
05853     Scheme_Object *o, *l, *a, *to_call = NULL, *args[1];
05854     int to_call_is_cont = 0;
05855 
05856     o = evt_set->argv[syncing->result - 1];
05857     if (SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)) {
05858       /* This is a put that got changed to a syncer, but not changed back */
05859       o = ((Scheme_Channel_Syncer *)o)->obj;
05860     }
05861     if (syncing->wrapss) {
05862       l = syncing->wrapss[syncing->result - 1];
05863       if (l) {
05864        for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
05865          a = SCHEME_CAR(l);
05866          if (to_call) {
05867            args[0] = o;
05868            
05869            /* Call wrap proc with breaks disabled */
05870            scheme_push_break_enable(&cframe, 0, 0);
05871            
05872            o = scheme_apply(to_call, 1, args);
05873            
05874            scheme_pop_break_enable(&cframe, 0);
05875 
05876            to_call = NULL;
05877          }
05878          if (SCHEME_BOXP(a) || SCHEME_PROCP(a)) {
05879            if (SCHEME_BOXP(a)) {
05880              a = SCHEME_BOX_VAL(a);
05881              to_call_is_cont = 1;
05882            }
05883            to_call = a;
05884          } else if (SAME_TYPE(scheme_thread_suspend_type, SCHEME_TYPE(a))
05885                    || SAME_TYPE(scheme_thread_resume_type, SCHEME_TYPE(a)))
05886            o = SCHEME_PTR2_VAL(a);
05887          else
05888            o = a;
05889        }
05890 
05891        if (to_call) {
05892          args[0] = o;
05893          
05894          /* If to_call is still a wrap-evt (not a cont-evt),
05895             then set the config one more time: */
05896          if (!to_call_is_cont) {
05897            scheme_push_break_enable(&cframe, 0, 0);
05898            tailok = 0;
05899          }
05900 
05901          if (tailok) {
05902            return _scheme_tail_apply(to_call, 1, args);
05903          } else {
05904            o = scheme_apply(to_call, 1, args);
05905            if (!to_call_is_cont)
05906              scheme_pop_break_enable(&cframe, 1);
05907            return o;
05908          }
05909        }
05910       }
05911     }
05912     return o;
05913   } else if (tailok)
05914     return scheme_false;
05915   else
05916     return NULL;
05917 }
05918 
05919 static Scheme_Object *sch_sync(int argc, Scheme_Object *argv[])
05920 {
05921   return do_sync("sync", argc, argv, 0, 0, 1);
05922 }
05923 
05924 static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *argv[])
05925 {
05926   return do_sync("sync/timeout", argc, argv, 0, 1, 1);
05927 }
05928 
05929 Scheme_Object *scheme_sync(int argc, Scheme_Object *argv[])
05930 {
05931   return do_sync("sync", argc, argv, 0, 0, 0);
05932 }
05933 
05934 Scheme_Object *scheme_sync_timeout(int argc, Scheme_Object *argv[])
05935 {
05936   return do_sync("sync/timeout", argc, argv, 0, 1, 0);
05937 }
05938 
05939 static Scheme_Object *do_scheme_sync_enable_break(const char *who, int with_timeout, int tailok, int argc, Scheme_Object *argv[])
05940 {
05941   if (argc == 2 && SCHEME_FALSEP(argv[0]) && SCHEME_SEMAP(argv[1])) {
05942     scheme_wait_sema(argv[1], -1);
05943     return scheme_void;
05944   }
05945 
05946   return do_sync(who, argc, argv, 1, with_timeout, tailok);
05947 }
05948 
05949 Scheme_Object *scheme_sync_enable_break(int argc, Scheme_Object *argv[])
05950 {
05951   return do_scheme_sync_enable_break("sync/enable-break", 0, 0, argc, argv);
05952 }
05953 
05954 static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *argv[])
05955 {
05956   return do_scheme_sync_enable_break("sync/enable-break", 0, 1, argc, argv);
05957 }
05958 
05959 static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *argv[])
05960 {
05961   return do_scheme_sync_enable_break("sync/timeout/enable-break", 1, 1, argc, argv);
05962 }
05963 
05964 static Scheme_Object *evts_to_evt(int argc, Scheme_Object *argv[])
05965 {
05966   return (Scheme_Object *)make_evt_set("choice-evt", argc, argv, 0);
05967 }
05968 
05969 /*========================================================================*/
05970 /*                             thread cells                               */
05971 /*========================================================================*/
05972 
05973 #define SCHEME_THREAD_CELLP(x) (SAME_TYPE(SCHEME_TYPE(x), scheme_thread_cell_type))
05974 
05975 Scheme_Object *scheme_make_thread_cell(Scheme_Object *def_val, int inherited)
05976 {
05977   Thread_Cell *c;
05978 
05979   c = MALLOC_ONE_TAGGED(Thread_Cell);
05980   c->so.type = scheme_thread_cell_type;
05981   c->def_val = def_val;
05982   c->inherited = !!inherited;
05983 
05984   return (Scheme_Object *)c;
05985 }
05986 
05987 Scheme_Object *scheme_thread_cell_get(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells)
05988 {
05989   Scheme_Object *v;
05990 
05991   if (((Thread_Cell *)cell)->assigned) {
05992     v = scheme_lookup_in_table(cells, (const char *)cell);
05993     if (v)
05994       return scheme_ephemeron_value(v);
05995   }
05996 
05997   return ((Thread_Cell *)cell)->def_val;
05998 }
05999 
06000 void scheme_thread_cell_set(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells, Scheme_Object *v)
06001 {
06002   if (!((Thread_Cell *)cell)->assigned)
06003     ((Thread_Cell *)cell)->assigned = 1;
06004   v = scheme_make_ephemeron(cell, v);
06005   scheme_add_to_table(cells, (const char *)cell, (void *)v, 0);
06006 }
06007 
06008 static Scheme_Thread_Cell_Table *inherit_cells(Scheme_Thread_Cell_Table *cells,
06009                                           Scheme_Thread_Cell_Table *t,
06010                                           int inherited)
06011 {
06012   Scheme_Bucket *bucket;
06013   Scheme_Object *cell, *v;
06014   int i;
06015 
06016   if (!cells)
06017     cells = scheme_current_thread->cell_values;
06018   
06019   if (!t)
06020     t = scheme_make_bucket_table(20, SCHEME_hash_weak_ptr);
06021   
06022   for (i = cells->size; i--; ) {
06023     bucket = cells->buckets[i];
06024     if (bucket && bucket->val && bucket->key) {
06025       cell = (Scheme_Object *)HT_EXTRACT_WEAK(bucket->key);
06026       if (cell && (((Thread_Cell *)cell)->inherited == inherited)) {
06027        v = (Scheme_Object *)bucket->val;
06028        scheme_add_to_table(t, (char *)cell, v, 0);
06029       }
06030     }
06031   }
06032 
06033   return t;
06034 }
06035 
06036 Scheme_Thread_Cell_Table *scheme_inherit_cells(Scheme_Thread_Cell_Table *cells)
06037 {
06038   return inherit_cells(cells, NULL, 1);
06039 }
06040 
06041 static Scheme_Object *thread_cell_values(int argc, Scheme_Object *argv[])
06042 {
06043   if (argc == 1) {
06044     Scheme_Thread_Cell_Table *naya;
06045 
06046     if (!SAME_TYPE(scheme_thread_cell_values_type, SCHEME_TYPE(argv[0]))) {
06047       scheme_wrong_type("current-preserved-thread-cell-values", "thread cell values", 0, argc, argv);
06048       return NULL;
06049     }
06050 
06051     naya = inherit_cells(NULL, NULL, 0);
06052     inherit_cells((Scheme_Thread_Cell_Table *)SCHEME_PTR_VAL(argv[0]), naya, 1);
06053 
06054     scheme_current_thread->cell_values = naya;
06055 
06056     return scheme_void;
06057   } else {
06058     Scheme_Object *o, *ht;
06059 
06060     ht = (Scheme_Object *)inherit_cells(NULL, NULL, 1);
06061 
06062     o = scheme_alloc_small_object();
06063     o->type = scheme_thread_cell_values_type;
06064     SCHEME_PTR_VAL(o) = ht;
06065 
06066     return o;
06067   }
06068 }
06069 
06070 static Scheme_Object *make_thread_cell(int argc, Scheme_Object *argv[])
06071 {
06072   return scheme_make_thread_cell(argv[0], (argc > 1) && SCHEME_TRUEP(argv[1]));
06073 }
06074 
06075 static Scheme_Object *thread_cell_p(int argc, Scheme_Object *argv[])
06076 {
06077   return (SCHEME_THREAD_CELLP(argv[0])
06078          ? scheme_true
06079          : scheme_false);
06080 }
06081 
06082 static Scheme_Object *thread_cell_get(int argc, Scheme_Object *argv[])
06083 {
06084   if (!SCHEME_THREAD_CELLP(argv[0]))
06085     scheme_wrong_type("thread-cell-ref", "thread cell", 0, argc, argv);
06086   return scheme_thread_cell_get(argv[0], scheme_current_thread->cell_values);
06087 }
06088 
06089 static Scheme_Object *thread_cell_set(int argc, Scheme_Object *argv[])
06090 {
06091   if (!SCHEME_THREAD_CELLP(argv[0]))
06092     scheme_wrong_type("thread-cell-set!", "thread cell", 0, argc, argv);
06093   scheme_thread_cell_set(argv[0], scheme_current_thread->cell_values, argv[1]);
06094   return scheme_void;
06095 }
06096 
06097 
06098 /*========================================================================*/
06099 /*                              parameters                                */
06100 /*========================================================================*/
06101 
06102 static int max_configs = __MZCONFIG_BUILTIN_COUNT__;
06103 static Scheme_Object *do_param(void *data, int argc, Scheme_Object *argv[]);
06104 
06105 Scheme_Config *scheme_current_config()
06106 {
06107   Scheme_Object *v;
06108 
06109   v = scheme_extract_one_cc_mark(NULL, scheme_parameterization_key);
06110 
06111   if (!SAME_TYPE(scheme_config_type, SCHEME_TYPE(v))) {
06112     /* Someone has grabbed parameterization-key out of #%paramz
06113        and misused it.
06114        Printing an error message requires consulting parameters,
06115        so just escape. */
06116     scheme_longjmp(scheme_error_buf, 1);
06117   }
06118 
06119   return (Scheme_Config *)v;
06120 }
06121 
06122 static Scheme_Config *do_extend_config(Scheme_Config *c, Scheme_Object *key, Scheme_Object *cell)
06123 {
06124   Scheme_Config *naya;
06125 
06126   /* In principle, the key+cell link should be weak, but it's
06127      difficult to imagine a parameter being GC'ed while an active
06128      `parameterize' is still on the stack (or, at least, difficult to
06129      imagine that it matters). */
06130 
06131   if (c->depth > 50)
06132     scheme_flatten_config(c);
06133 
06134   naya = MALLOC_ONE_TAGGED(Scheme_Config);
06135   naya->so.type = scheme_config_type;
06136   naya->depth = c->depth + 1;
06137   naya->key = key;
06138   naya->cell = cell; /* could be just a value */
06139   naya->next = c;
06140 
06141   return naya;
06142 }
06143 
06144 Scheme_Config *scheme_extend_config(Scheme_Config *c, int pos, Scheme_Object *init_val)
06145 {
06146   return do_extend_config(c, scheme_make_integer(pos), init_val);
06147 }
06148 
06149 void scheme_install_config(Scheme_Config *config)
06150 {
06151   scheme_set_cont_mark(scheme_parameterization_key, (Scheme_Object *)config);
06152 }
06153 
06154 #ifdef MZTAG_REQUIRED
06155 # define IS_VECTOR(c) SCHEME_VECTORP((c)->content)
06156 #else
06157 # define IS_VECTOR(c) (!(c)->is_param)
06158 #endif
06159 
06160 Scheme_Object *find_param_cell(Scheme_Config *c, Scheme_Object *k, int force_cell)
06161      /* Unless force_cell, the result may actually be a value, if there has been
06162        no reason to set it before */
06163 {
06164   while (1) {
06165     if (SAME_OBJ(c->key, k)) {
06166       if (force_cell && !SCHEME_THREAD_CELLP(c->cell)) {
06167        Scheme_Object *cell;
06168        cell = scheme_make_thread_cell(c->cell, 1);
06169        c->cell = cell;
06170       }
06171       return c->cell;
06172     } else if (!c->next) {
06173       /* Eventually bottoms out here */
06174       Scheme_Parameterization *p = (Scheme_Parameterization *)c->cell;
06175       if (SCHEME_INTP(k))
06176        return p->prims[SCHEME_INT_VAL(k)];
06177       else {
06178        if (p->extensions)
06179          return scheme_lookup_in_table(p->extensions, (const char *)k);
06180        else
06181          return NULL;
06182       }
06183     } else
06184       c = c->next;
06185   }
06186 }
06187 
06188 Scheme_Object *scheme_get_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos)
06189 {
06190   Scheme_Object *cell;
06191 
06192   cell = find_param_cell(c, scheme_make_integer(pos), 0);
06193   if (SCHEME_THREAD_CELLP(cell))
06194     return scheme_thread_cell_get(cell, cells);
06195   return cell;
06196 }
06197 
06198 Scheme_Object *scheme_get_param(Scheme_Config *c, int pos)
06199 {
06200   return scheme_get_thread_param(c, scheme_current_thread->cell_values, pos);
06201 }
06202 
06203 void scheme_set_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos, Scheme_Object *o)
06204 {
06205   scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1), cells, o);
06206 }
06207 
06208 void scheme_set_param(Scheme_Config *c, int pos, Scheme_Object *o)
06209 {
06210   scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1), 
06211                       scheme_current_thread->cell_values, o);
06212 }
06213 
06214 void scheme_flatten_config(Scheme_Config *orig_c)
06215 {
06216   int pos, i;
06217   Scheme_Parameterization *paramz, *paramz2;
06218   Scheme_Object *key;
06219   Scheme_Bucket *b, *b2;
06220   Scheme_Config *c;
06221 
06222   if (orig_c->next) {
06223     paramz = (Scheme_Parameterization *)scheme_malloc_tagged(sizeof(Scheme_Parameterization) + 
06224                                                       (max_configs - 1) * sizeof(Scheme_Object*));
06225 #ifdef MZTAG_REQUIRED
06226     paramz->type = scheme_rt_parameterization;
06227 #endif
06228     
06229     c = orig_c;
06230     while (1) {
06231       if (c->key) {
06232        if (SCHEME_INTP(c->key)) {
06233          pos = SCHEME_INT_VAL(c->key);
06234          if (!paramz->prims[pos]) {
06235            if (!SCHEME_THREAD_CELLP(c->cell)) {
06236              Scheme_Object *cell;
06237              cell = scheme_make_thread_cell(c->cell, 1);
06238              c->cell = cell;
06239            }
06240            paramz->prims[pos] = c->cell;
06241          }
06242        } else {
06243          if (!paramz->extensions) {
06244            Scheme_Bucket_Table *t;
06245            t = scheme_make_bucket_table(20, SCHEME_hash_weak_ptr);
06246            paramz->extensions = t;
06247          }
06248          b = scheme_bucket_from_table(paramz->extensions, (const char *)c->key);
06249          if (!b->val) {
06250            if (!SCHEME_THREAD_CELLP(c->cell)) {
06251              Scheme_Object *cell;
06252              cell = scheme_make_thread_cell(c->cell, 1);
06253              c->cell = cell;
06254            }
06255            b->val = c->cell;
06256          }
06257        }
06258        c = c->next;
06259       } else {
06260        paramz2 = (Scheme_Parameterization *)c->cell;
06261 
06262        for (i = 0; i < max_configs; i++) {
06263          if (!paramz->prims[i])
06264            paramz->prims[i] = paramz2->prims[i];
06265        }
06266 
06267        if (paramz2->extensions) {
06268          if (!paramz->extensions) {
06269            /* Re-use the old hash table */
06270            paramz->extensions = paramz2->extensions;
06271          } else {
06272            for (i = paramz2->extensions->size; i--; ) {
06273              b = paramz2->extensions->buckets[i];
06274              if (b && b->val && b->key) {
06275               key = (Scheme_Object *)HT_EXTRACT_WEAK(b->key);
06276               if (key) {
06277                 b2 = scheme_bucket_from_table(paramz->extensions, (const char *)key);
06278                 if (!b2->val)
06279                   b2->val = b->val;
06280               }
06281              }
06282            }
06283          }
06284        }
06285 
06286        break;
06287       }
06288     }
06289 
06290     orig_c->cell = (Scheme_Object *)paramz;
06291     orig_c->key = NULL;
06292     orig_c->next = NULL;
06293   }
06294 }
06295 
06296 static Scheme_Object *parameterization_p(int argc, Scheme_Object **argv)
06297 {
06298   Scheme_Object *v = argv[0];
06299 
06300   return (SCHEME_CONFIGP(v)
06301          ? scheme_true
06302          : scheme_false);
06303 }
06304 
06305 
06306 #define SCHEME_PARAMETERP(v) ((SCHEME_PRIMP(v) || SCHEME_CLSD_PRIMP(v)) \
06307                               && (((Scheme_Primitive_Proc *)v)->pp.flags & SCHEME_PRIM_IS_PARAMETER))
06308 
06309 static Scheme_Object *extend_parameterization(int argc, Scheme_Object *argv[])
06310 {
06311   Scheme_Object *key, *a[2], *param;
06312   Scheme_Config *c;
06313   int i;
06314 
06315   c = (Scheme_Config *)argv[0];
06316 
06317   if (argc < 2) {
06318     scheme_flatten_config(c);
06319   } else if (SCHEME_CONFIGP(c) && (argc & 1)) {
06320     for (i = 1; i < argc; i += 2) {
06321       if (!SCHEME_PARAMETERP(argv[i])) {
06322        scheme_wrong_type("parameterize", "parameter", i, argc, argv);
06323        return NULL;
06324       }
06325       a[0] = argv[i + 1];
06326       a[1] = scheme_false;
06327       param = argv[i];
06328       while (1) {
06329         if (SCHEME_PRIMP(param)) {
06330           Scheme_Prim *proc;
06331           proc = (Scheme_Prim *)((Scheme_Primitive_Proc *)param)->prim_val;
06332           key = proc(2, a); /* leads to scheme_param_config to set a[1] */
06333           break;
06334         } else {
06335           /* sets a[1] */
06336           key = do_param(((Scheme_Closed_Primitive_Proc *)param)->data, 2, a);
06337           if (SCHEME_PARAMETERP(key)) {
06338             param = key;
06339             a[0] = a[1];
06340           } else
06341             break;
06342         }
06343       }
06344       c = do_extend_config(c, key, a[1]);
06345     }
06346   }
06347 
06348   return (Scheme_Object *)c;
06349 }
06350 
06351 static Scheme_Object *parameter_p(int argc, Scheme_Object **argv)
06352 {
06353   Scheme_Object *v = argv[0];
06354 
06355   return (SCHEME_PARAMETERP(v)
06356          ? scheme_true
06357          : scheme_false);
06358 }
06359 
06360 static Scheme_Object *do_param(void *_data, int argc, Scheme_Object *argv[])
06361 {
06362   Scheme_Object *guard, **argv2, *pos[2];
06363   ParamData *data = (ParamData *)_data;
06364 
06365   if (argc && argv[0]) {
06366     guard = data->guard;
06367     if (guard) {
06368       Scheme_Object *v;
06369       
06370       v = scheme_apply(guard, 1, argv);
06371 
06372       if (argc == 2) {
06373        /* Special hook for parameterize: */
06374        argv[1] = v;
06375        return data->key;
06376       }
06377 
06378       argv2 = MALLOC_N(Scheme_Object *, argc);
06379       memcpy(argv2, argv, argc * sizeof(Scheme_Object *));
06380       argv2[0] = v;
06381     } else if (argc == 2) {
06382       /* Special hook for parameterize: */
06383       argv[1] = argv[0];
06384       return data->key;
06385     } else
06386       argv2 = argv;
06387   } else
06388     argv2 = argv;
06389 
06390   if (data->is_derived) {
06391     if (!argc) {
06392       Scheme_Object *v;
06393       v = _scheme_apply(data->key, argc, argv2);
06394       pos[0] = v;
06395       return _scheme_tail_apply(data->extract_guard, 1, pos);
06396     } else {
06397       return _scheme_tail_apply(data->key, argc, argv2);
06398     }
06399   }
06400 
06401   pos[0] = data->key;
06402   pos[1] = data->defcell;
06403   
06404   return scheme_param_config("parameter-procedure", 
06405                           (Scheme_Object *)(void *)pos,
06406                           argc, argv2,
06407                           -2, NULL, NULL, 0);
06408 }
06409 
06410 static Scheme_Object *make_parameter(int argc, Scheme_Object **argv)
06411 {
06412   Scheme_Object *p, *cell;
06413   ParamData *data;
06414   void *k;
06415 
06416   k = scheme_make_pair(scheme_true, scheme_false); /* generates a key */
06417 
06418   if (argc > 1)
06419     scheme_check_proc_arity("make-parameter", 1, 1, argc, argv);
06420 
06421   data = MALLOC_ONE_RT(ParamData);
06422 #ifdef MZTAG_REQUIRED
06423   data->type = scheme_rt_param_data;
06424 #endif
06425   data->key = (Scheme_Object *)k;
06426   cell = scheme_make_thread_cell(argv[0], 1);
06427   data->defcell = cell;
06428   data->guard = ((argc > 1) ? argv[1] : NULL);
06429 
06430   p = scheme_make_closed_prim_w_arity(do_param, (void *)data, 
06431                                   "parameter-procedure", 0, 1);
06432   ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_IS_PARAMETER;
06433 
06434   return p;
06435 }
06436 
06437 static Scheme_Object *make_derived_parameter(int argc, Scheme_Object **argv)
06438 {
06439   Scheme_Object *p;
06440   ParamData *data;
06441 
06442   if (!SCHEME_PARAMETERP(argv[0]))
06443     scheme_wrong_type("make-derived-parameter", "parameter", 0, argc, argv);
06444 
06445   scheme_check_proc_arity("make-derived-parameter", 1, 1, argc, argv);
06446   scheme_check_proc_arity("make-derived-parameter", 1, 2, argc, argv);
06447 
06448   data = MALLOC_ONE_RT(ParamData);
06449 #ifdef MZTAG_REQUIRED
06450   data->type = scheme_rt_param_data;
06451 #endif
06452   data->is_derived = 1;
06453   data->key = argv[0];
06454   data->guard = argv[1];
06455   data->extract_guard = argv[2];
06456 
06457   p = scheme_make_closed_prim_w_arity(do_param, (void *)data, 
06458                                   "parameter-procedure", 0, 1);
06459   ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_IS_PARAMETER;
06460 
06461   return p;
06462 }
06463 
06464 static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object **argv)
06465 {
06466   Scheme_Object *a, *b;
06467 
06468   a = argv[0];
06469   b = argv[1];
06470 
06471   if (!((SCHEME_PRIMP(a) || SCHEME_CLSD_PRIMP(a))
06472        && (((Scheme_Primitive_Proc *)a)->pp.flags & SCHEME_PRIM_IS_PARAMETER)))
06473     scheme_wrong_type("parameter-procedure=?", "parameter-procedure", 0, argc, argv);
06474   if (!((SCHEME_PRIMP(b) || SCHEME_CLSD_PRIMP(b))
06475        && (((Scheme_Primitive_Proc *)b)->pp.flags & SCHEME_PRIM_IS_PARAMETER)))
06476     scheme_wrong_type("parameter-procedure=?", "parameter-procedure", 1, argc, argv);
06477 
06478   return (SAME_OBJ(a, b)
06479          ? scheme_true
06480          : scheme_false);
06481 }
06482 
06483 int scheme_new_param(void)
06484 {
06485   return max_configs++;
06486 }
06487 
06488 static void init_param(Scheme_Thread_Cell_Table *cells,
06489                      Scheme_Parameterization *params,
06490                      int pos,
06491                      Scheme_Object *v)
06492 {
06493   Scheme_Object *cell;
06494   cell = scheme_make_thread_cell(v, 1);
06495   params->prims[pos] = cell;
06496 }
06497 
06498 void scheme_set_root_param(int p, Scheme_Object *v)
06499 {
06500   Scheme_Parameterization *paramz;
06501   paramz = (Scheme_Parameterization *)scheme_current_thread->init_config->cell;
06502   ((Thread_Cell *)(paramz->prims[p]))->def_val = v;
06503 }
06504 
06505 static void make_initial_config(Scheme_Thread *p)
06506 {
06507   Scheme_Thread_Cell_Table *cells;
06508   Scheme_Parameterization *paramz;
06509   Scheme_Config *config;
06510 
06511   cells = scheme_make_bucket_table(5, SCHEME_hash_weak_ptr);
06512   p->cell_values = cells;
06513 
06514   paramz = (Scheme_Parameterization *)scheme_malloc_tagged(sizeof(Scheme_Parameterization) + 
06515                                                     (max_configs - 1) * sizeof(Scheme_Object*));
06516 #ifdef MZTAG_REQUIRED
06517   paramz->type = scheme_rt_parameterization;
06518 #endif
06519 
06520   config = MALLOC_ONE_TAGGED(Scheme_Config);
06521   config->so.type = scheme_config_type;
06522   config->cell = (Scheme_Object *)paramz;
06523 
06524   p->init_config = config;
06525 
06526   init_param(cells, paramz, MZCONFIG_READTABLE, scheme_make_default_readtable());
06527   
06528   init_param(cells, paramz, MZCONFIG_CAN_READ_GRAPH, scheme_true);
06529   init_param(cells, paramz, MZCONFIG_CAN_READ_COMPILED, scheme_false);
06530   init_param(cells, paramz, MZCONFIG_CAN_READ_BOX, scheme_true);
06531   init_param(cells, paramz, MZCONFIG_CAN_READ_PIPE_QUOTE, scheme_true);
06532   init_param(cells, paramz, MZCONFIG_CAN_READ_DOT, scheme_true);
06533   init_param(cells, paramz, MZCONFIG_CAN_READ_INFIX_DOT, scheme_true);
06534   init_param(cells, paramz, MZCONFIG_CAN_READ_QUASI, scheme_true);
06535   init_param(cells, paramz, MZCONFIG_READ_DECIMAL_INEXACT, scheme_true);
06536   init_param(cells, paramz, MZCONFIG_CAN_READ_READER, scheme_false);
06537   init_param(cells, paramz, MZCONFIG_LOAD_DELAY_ENABLED, init_load_on_demand ? scheme_true : scheme_false);
06538   init_param(cells, paramz, MZCONFIG_DELAY_LOAD_INFO, scheme_false);
06539 
06540   init_param(cells, paramz, MZCONFIG_PRINT_GRAPH, scheme_false);
06541   init_param(cells, paramz, MZCONFIG_PRINT_STRUCT, scheme_true);
06542   init_param(cells, paramz, MZCONFIG_PRINT_BOX, scheme_true);
06543   init_param(cells, paramz, MZCONFIG_PRINT_VEC_SHORTHAND, scheme_false);
06544   init_param(cells, paramz, MZCONFIG_PRINT_HASH_TABLE, scheme_true);
06545   init_param(cells, paramz, MZCONFIG_PRINT_UNREADABLE, scheme_true);
06546   init_param(cells, paramz, MZCONFIG_PRINT_PAIR_CURLY, scheme_false);
06547   init_param(cells, paramz, MZCONFIG_PRINT_MPAIR_CURLY, scheme_true);
06548 
06549   init_param(cells, paramz, MZCONFIG_HONU_MODE, scheme_false);
06550 
06551   init_param(cells, paramz, MZCONFIG_COMPILE_MODULE_CONSTS, scheme_true);
06552   init_param(cells, paramz, MZCONFIG_USE_JIT, scheme_startup_use_jit ? scheme_true : scheme_false);
06553 
06554   {
06555     Scheme_Object *s;
06556     s = scheme_make_immutable_sized_utf8_string("", 0);
06557     init_param(cells, paramz, MZCONFIG_LOCALE, s);
06558   }
06559 
06560   init_param(cells, paramz, MZCONFIG_CASE_SENS, (scheme_case_sensitive ? scheme_true : scheme_false));
06561   init_param(cells, paramz, MZCONFIG_SQUARE_BRACKETS_ARE_PARENS, (scheme_square_brackets_are_parens
06562                                                          ? scheme_true : scheme_false));
06563   init_param(cells, paramz, MZCONFIG_CURLY_BRACES_ARE_PARENS, (scheme_curly_braces_are_parens
06564                                                        ? scheme_true : scheme_false));
06565 
06566   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_WIDTH, scheme_make_integer(256));
06567   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_CONTEXT_LENGTH, scheme_make_integer(16));
06568   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_SRCLOC, scheme_true);
06569 
06570   REGISTER_SO(main_custodian);
06571   REGISTER_SO(last_custodian);
06572   REGISTER_SO(limited_custodians);
06573   main_custodian = scheme_make_custodian(NULL);
06574 #ifdef MZ_PRECISE_GC
06575   GC_register_root_custodian(main_custodian);
06576 #endif
06577   last_custodian = main_custodian;
06578   init_param(cells, paramz, MZCONFIG_CUSTODIAN, (Scheme_Object *)main_custodian);
06579 
06580   init_param(cells, paramz, MZCONFIG_ALLOW_SET_UNDEFINED, (scheme_allow_set_undefined
06581                                                    ? scheme_true
06582                                                    : scheme_false));
06583 
06584   init_param(cells, paramz, MZCONFIG_COLLECTION_PATHS,  scheme_null);
06585 
06586   {
06587     Scheme_Object *s;
06588     s = scheme_make_path(scheme_os_getcwd(NULL, 0, NULL, 1));
06589     s = scheme_path_to_directory_path(s);
06590     init_param(cells, paramz, MZCONFIG_CURRENT_DIRECTORY, s);
06591     scheme_set_original_dir(s);
06592   }
06593 
06594   {
06595     Scheme_Object *rs;
06596     rs = scheme_make_random_state(scheme_get_milliseconds());
06597     init_param(cells, paramz, MZCONFIG_RANDOM_STATE, rs);
06598     rs = scheme_make_random_state(scheme_get_milliseconds());
06599     init_param(cells, paramz, MZCONFIG_SCHEDULER_RANDOM_STATE, rs);
06600   }
06601 
06602   {
06603     Scheme_Object *eh;
06604     eh = scheme_make_prim_w_arity2(scheme_default_eval_handler,
06605                                "default-eval-handler",
06606                                1, 1,
06607                                0, -1);
06608     init_param(cells, paramz, MZCONFIG_EVAL_HANDLER, eh);
06609   }
06610   
06611   {
06612     Scheme_Object *eh;
06613     eh = scheme_make_prim_w_arity(scheme_default_compile_handler,
06614                               "default-compile-handler",
06615                               2, 2);
06616     init_param(cells, paramz, MZCONFIG_COMPILE_HANDLER, eh);
06617   }
06618   
06619   {
06620     Scheme_Object *ph, *prh;
06621 
06622     ph = scheme_make_prim_w_arity(scheme_default_print_handler,
06623                               "default-print-handler",
06624                               1, 1);
06625     init_param(cells, paramz, MZCONFIG_PRINT_HANDLER, ph);
06626 
06627     prh = scheme_make_prim_w_arity(scheme_default_prompt_read_handler,
06628                                "default-prompt-read-handler",
06629                                0, 0);
06630     init_param(cells, paramz, MZCONFIG_PROMPT_READ_HANDLER, prh);
06631   }
06632   init_param(cells, paramz, MZCONFIG_PORT_COUNT_LINES, scheme_false);
06633 
06634   {
06635     Scheme_Object *lh;
06636     lh = scheme_make_prim_w_arity2(scheme_default_load_extension,
06637                                "default-load-extension-handler",
06638                                2, 2,
06639                                0, -1);
06640     init_param(cells, paramz, MZCONFIG_LOAD_EXTENSION_HANDLER, lh);
06641   }
06642 
06643   {
06644     Scheme_Object *ins;
06645     if (initial_inspector) {
06646       ins = initial_inspector;
06647     } else {
06648       ins = scheme_make_initial_inspectors();
06649       /* Keep the initial inspector in case someone resets Scheme (by
06650          calling scheme_basic_env() a second time. Using the same
06651          inspector after a reset lets us use the same initial module
06652          instances. */
06653       REGISTER_SO(initial_inspector);
06654       initial_inspector = ins;
06655     }
06656     init_param(cells, paramz, MZCONFIG_INSPECTOR, ins);
06657     init_param(cells, paramz, MZCONFIG_CODE_INSPECTOR, ins);
06658   }
06659   
06660   {
06661     Scheme_Object *zlv;
06662     zlv = scheme_make_vector(0, NULL);
06663     init_param(cells, paramz, MZCONFIG_CMDLINE_ARGS, zlv);
06664   }
06665 
06666   {
06667     Scheme_Security_Guard *sg;
06668 
06669     sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
06670     sg->so.type = scheme_security_guard_type;
06671     init_param(cells, paramz, MZCONFIG_SECURITY_GUARD, (Scheme_Object *)sg);
06672   }
06673 
06674   {
06675     Scheme_Thread_Set *t_set;
06676     t_set = create_thread_set(NULL);
06677     init_param(cells, paramz, MZCONFIG_THREAD_SET, (Scheme_Object *)t_set);
06678   }
06679   
06680   init_param(cells, paramz, MZCONFIG_THREAD_INIT_STACK_SIZE, scheme_make_integer(DEFAULT_INIT_STACK_SIZE));
06681 
06682   {
06683     int i;
06684     for (i = 0; i < max_configs; i++) {
06685       if (!paramz->prims[i])
06686        init_param(cells, paramz, i, scheme_false);      
06687     }
06688   }
06689 }
06690 
06691 void scheme_set_startup_load_on_demand(int on)
06692 {
06693   init_load_on_demand = on;
06694 }
06695 
06696 Scheme_Object *scheme_register_parameter(Scheme_Prim *function, char *name, int which)
06697 {
06698   Scheme_Object *o;
06699 
06700   if (!config_map) {
06701     REGISTER_SO(config_map);
06702     config_map = MALLOC_N(Scheme_Object*, max_configs);
06703   }
06704 
06705   if (config_map[which])
06706     return config_map[which];
06707 
06708   o = scheme_make_prim_w_arity(function, name, 0, 1);
06709   ((Scheme_Primitive_Proc *)o)->pp.flags |= SCHEME_PRIM_IS_PARAMETER;
06710 
06711   config_map[which] = o;
06712 
06713   return o;
06714 }
06715 
06716 typedef Scheme_Object *(*PCheck_Proc)(int, Scheme_Object **, Scheme_Config *);
06717 
06718 Scheme_Object *scheme_param_config(char *name, Scheme_Object *pos,
06719                                int argc, Scheme_Object **argv,
06720                                int arity,
06721                                /* -3 => like -1, plus use check to unmarshall the value
06722                                       -2 => user parameter; pos is array [key, defcell]
06723                                   -1 => use check; if isboolorfilter, check is a filter
06724                                             (and expected is ignored), and if check is NULL,
06725                                             parameter is boolean-valued
06726                                   0+ => check argument for this arity */
06727                                Scheme_Object *(*check)(int, Scheme_Object **), 
06728                                /* Actually called with (int, S_O **, Scheme_Config *) */
06729                                char *expected,
06730                                int isboolorfilter)
06731 {
06732   Scheme_Config *config;
06733 
06734   config = scheme_current_config();
06735 
06736   if (argc == 0) {
06737     if (arity == -2) {
06738       Scheme_Object *cell;
06739 
06740       cell = find_param_cell(config, ((Scheme_Object **)pos)[0], 0);
06741       if (!cell)
06742        cell = ((Scheme_Object **)pos)[1];
06743 
06744       if (SCHEME_THREAD_CELLP(cell))
06745        return scheme_thread_cell_get(cell, scheme_current_thread->cell_values);
06746       else
06747        return cell; /* it's really the value, instead of a cell */
06748     } else {
06749       Scheme_Object *s;
06750       s = scheme_get_param(config, SCHEME_INT_VAL(pos));
06751       if (arity == -3) {
06752        Scheme_Object *a[1];
06753        PCheck_Proc checkp = (PCheck_Proc)check;
06754        a[0] = s;
06755        s = checkp(1, a, config);
06756       }
06757       return s;
06758     }
06759   } else {
06760     Scheme_Object *naya = argv[0];
06761 
06762     if (arity != -2) {
06763       if (arity < 0) {
06764        if (check) {
06765          PCheck_Proc checkp = (PCheck_Proc)check;
06766          Scheme_Object *r;
06767 
06768          r = checkp(1, argv, config);
06769          
06770          if (!isboolorfilter && SCHEME_FALSEP(r))
06771            r = NULL;
06772          
06773          if (!r) {
06774            scheme_wrong_type(name, expected, 0, 1, argv);
06775            return NULL;
06776          }
06777          
06778          if (isboolorfilter)
06779            naya = r;
06780        }
06781       } else 
06782        scheme_check_proc_arity(name, arity, 0, argc, argv);
06783 
06784       if (isboolorfilter && !check)
06785        naya = ((SCHEME_TRUEP(naya)) ? scheme_true : scheme_false);
06786 
06787       if (argc == 2) {
06788        /* Special hook for parameterize: */
06789        argv[1] = naya;
06790        return pos;
06791       } else
06792        scheme_set_param(config, SCHEME_INT_VAL(pos), naya);
06793     } else {
06794       Scheme_Object *cell;
06795       
06796       cell = find_param_cell(config, ((Scheme_Object **)pos)[0], 1);
06797       if (!cell)
06798        cell = ((Scheme_Object **)pos)[1];
06799 
06800       scheme_thread_cell_set(cell, scheme_current_thread->cell_values, naya);
06801     }
06802   
06803     return scheme_void;
06804   }
06805 }
06806 
06807 static Scheme_Object *
06808 exact_positive_integer_p (int argc, Scheme_Object *argv[])
06809 {
06810   Scheme_Object *n = argv[0];
06811   if (SCHEME_INTP(n) && (SCHEME_INT_VAL(n) > 0))
06812     return scheme_true;
06813   if (SCHEME_BIGNUMP(n) && SCHEME_BIGPOS(n))
06814     return scheme_true;
06815 
06816   return scheme_false;
06817 }
06818 
06819 static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[])
06820 {
06821   return scheme_param_config("current-thread-initial-stack-size", 
06822                           scheme_make_integer(MZCONFIG_THREAD_INIT_STACK_SIZE),
06823                           argc, argv,
06824                           -1, exact_positive_integer_p, "exact positive integer", 0);
06825 }
06826 
06827 /*========================================================================*/
06828 /*                              namespaces                                */
06829 /*========================================================================*/
06830 
06831 #ifdef MZ_XFORM
06832 START_XFORM_SKIP;
06833 #endif
06834 
06835 Scheme_Env *scheme_get_env(Scheme_Config *c)
06836 {
06837   Scheme_Object *o;
06838 
06839   if (!c)
06840     c = scheme_current_config();
06841 
06842   o = scheme_get_param(c, MZCONFIG_ENV);
06843   return (Scheme_Env *)o;
06844 }
06845 
06846 #ifdef MZ_XFORM
06847 END_XFORM_SKIP;
06848 #endif
06849 
06850 void scheme_add_namespace_option(Scheme_Object *key, void (*f)(Scheme_Env *))
06851 {
06852   Scheme_NSO *old = namespace_options;
06853   
06854   namespace_options = MALLOC_N_RT(Scheme_NSO, (num_nsos + 1));
06855 
06856   memcpy(namespace_options, old, num_nsos * sizeof(Scheme_NSO));
06857 
06858 #ifdef MZTAG_REQUIRED
06859   namespace_options[num_nsos].type = scheme_rt_namespace_option;
06860 #endif
06861   namespace_options[num_nsos].key = key;
06862   namespace_options[num_nsos].f = f;
06863   
06864   num_nsos++;
06865 }
06866 
06867 Scheme_Object *scheme_make_namespace(int argc, Scheme_Object *argv[])
06868 {
06869   Scheme_Env *genv, *env;
06870   long phase;
06871 
06872   genv = scheme_get_env(NULL);
06873   env = scheme_make_empty_env();
06874   
06875   for (phase = genv->phase; phase--; ) {
06876     scheme_prepare_exp_env(env);
06877     env = env->exp_env;
06878   }
06879 
06880   return (Scheme_Object *)env;
06881 }
06882 
06883 static Scheme_Object *namespace_p(int argc, Scheme_Object **argv)
06884 {
06885   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_namespace_type)) 
06886          ? scheme_true 
06887          : scheme_false);
06888 }
06889 
06890 static Scheme_Object *current_namespace(int argc, Scheme_Object *argv[])
06891 {
06892   return scheme_param_config("current-namespace", 
06893                           scheme_make_integer(MZCONFIG_ENV),
06894                           argc, argv,
06895                           -1, namespace_p, "namespace", 0);
06896 }
06897 
06898 /*========================================================================*/
06899 /*                           security guards                              */
06900 /*========================================================================*/
06901 
06902 static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[])
06903 {
06904   Scheme_Security_Guard *sg;
06905 
06906   if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type)))
06907     scheme_wrong_type("make-security-guard", "security-guard", 0, argc, argv);
06908   scheme_check_proc_arity("make-security-guard", 3, 1, argc, argv);
06909   scheme_check_proc_arity("make-security-guard", 4, 2, argc, argv);
06910   if (argc > 3)
06911     scheme_check_proc_arity2("make-security-guard", 3, 3, argc, argv, 1);
06912 
06913   sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
06914   sg->so.type = scheme_security_guard_type;
06915   sg->parent = (Scheme_Security_Guard *)argv[0];
06916   sg->file_proc = argv[1];
06917   sg->network_proc = argv[2];
06918   if ((argc > 3) && SCHEME_TRUEP(argv[3]))
06919     sg->link_proc = argv[3];
06920 
06921   return (Scheme_Object *)sg;
06922 }
06923 
06924 static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[])
06925 {
06926   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type)) 
06927          ? scheme_true 
06928          : scheme_false);
06929 }
06930 
06931 static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[])
06932 {
06933   return scheme_param_config("current-security-guard", 
06934                           scheme_make_integer(MZCONFIG_SECURITY_GUARD),
06935                           argc, argv,
06936                           -1, security_guard_p, "security-guard", 0);
06937 }
06938 
06939 
06940 void scheme_security_check_file(const char *who, const char *filename, int guards)
06941 {
06942   Scheme_Security_Guard *sg;
06943 
06944   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
06945 
06946   if (sg->file_proc) {
06947     Scheme_Object *l = scheme_null, *a[3];
06948 
06949     if (!read_symbol) {
06950       REGISTER_SO(read_symbol);
06951       REGISTER_SO(write_symbol);
06952       REGISTER_SO(execute_symbol);
06953       REGISTER_SO(delete_symbol);
06954       REGISTER_SO(exists_symbol);
06955 
06956       read_symbol = scheme_intern_symbol("read");
06957       write_symbol = scheme_intern_symbol("write");
06958       execute_symbol = scheme_intern_symbol("execute");
06959       delete_symbol = scheme_intern_symbol("delete");
06960       exists_symbol = scheme_intern_symbol("exists");
06961     }
06962 
06963     if (guards & SCHEME_GUARD_FILE_EXISTS)
06964       l = scheme_make_pair(exists_symbol, l);
06965     if (guards & SCHEME_GUARD_FILE_DELETE)
06966       l = scheme_make_pair(delete_symbol, l);
06967     if (guards & SCHEME_GUARD_FILE_EXECUTE)
06968       l = scheme_make_pair(execute_symbol, l);
06969     if (guards & SCHEME_GUARD_FILE_WRITE)
06970       l = scheme_make_pair(write_symbol, l);
06971     if (guards & SCHEME_GUARD_FILE_READ)
06972       l = scheme_make_pair(read_symbol, l);
06973 
06974     a[0] = scheme_intern_symbol(who);
06975     a[1] = (filename ? scheme_make_sized_path((char *)filename, -1, 1) : scheme_false);
06976     a[2] = l;
06977 
06978     while (sg->parent) {
06979       scheme_apply(sg->file_proc, 3, a);
06980       sg = sg->parent;
06981     }
06982   }
06983 }
06984 
06985 void scheme_security_check_file_link(const char *who, const char *filename, const char *content)
06986 {
06987   Scheme_Security_Guard *sg;
06988 
06989   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
06990 
06991   if (sg->file_proc) {
06992     Scheme_Object *a[3];
06993 
06994     a[0] = scheme_intern_symbol(who);
06995     a[1] = scheme_make_sized_path((char *)filename, -1, 1);
06996     a[2] = scheme_make_sized_path((char *)content, -1, 1);
06997 
06998     while (sg->parent) {
06999       if (sg->link_proc)
07000        scheme_apply(sg->link_proc, 3, a);
07001       else {
07002        scheme_signal_error("%s: security guard does not allow any link operation; attempted from: %s to: %s",
07003                          who,
07004                          filename,
07005                          content);
07006       }
07007       sg = sg->parent;
07008     }
07009   }
07010 }
07011 
07012 void scheme_security_check_network(const char *who, const char *host, int port, int client)
07013 {
07014   Scheme_Security_Guard *sg;
07015 
07016   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
07017 
07018   if (sg->network_proc) {
07019     Scheme_Object *a[4];
07020 
07021     if (!client_symbol) {
07022       REGISTER_SO(client_symbol);
07023       REGISTER_SO(server_symbol);
07024 
07025       client_symbol = scheme_intern_symbol("client");
07026       server_symbol = scheme_intern_symbol("server");
07027     }
07028 
07029     a[0] = scheme_intern_symbol(who);
07030     a[1] = (host ? scheme_make_sized_utf8_string((char *)host, -1) : scheme_false);
07031     a[2] = ((port < 1) ? scheme_false : scheme_make_integer(port));
07032     a[3] = (client ? client_symbol : server_symbol);
07033 
07034     while (sg->parent) {
07035       scheme_apply(sg->network_proc, 4, a);
07036       sg = sg->parent;
07037     }
07038   }
07039 }
07040 
07041 /*========================================================================*/
07042 /*                         wills and will executors                       */
07043 /*========================================================================*/
07044 
07045 typedef struct ActiveWill {
07046   MZTAG_IF_REQUIRED
07047   Scheme_Object *o;
07048   Scheme_Object *proc;
07049   struct WillExecutor *w;  /* Set to will executor when executed */
07050   struct ActiveWill *next;
07051 } ActiveWill;
07052 
07053 typedef struct WillExecutor {
07054   Scheme_Object so;
07055   Scheme_Object *sema;
07056   ActiveWill *first, *last;
07057 } WillExecutor;
07058 
07059 static void activate_will(void *o, void *data) 
07060 {
07061   ActiveWill *a;
07062   WillExecutor *w;
07063   Scheme_Object *proc;
07064 
07065   w = (WillExecutor *)scheme_ephemeron_key(data);
07066   proc = scheme_ephemeron_value(data);
07067 
07068   if (w) {
07069     a = MALLOC_ONE_RT(ActiveWill);
07070 #ifdef MZTAG_REQUIRED
07071     a->type = scheme_rt_will;
07072 #endif
07073     a->o = (Scheme_Object *)o;
07074     a->proc = proc;
07075   
07076     if (w->last)
07077       w->last->next = a;
07078     else
07079       w->first = a;
07080     w->last = a;
07081     scheme_post_sema(w->sema);
07082   }
07083 }
07084 
07085 static Scheme_Object *do_next_will(WillExecutor *w)
07086 {
07087   ActiveWill *a;
07088   Scheme_Object *o[1];
07089 
07090   a = w->first;
07091   w->first = a->next;
07092   if (!w->first)
07093     w->last = NULL;
07094   
07095   o[0] = a->o;
07096   a->o = NULL;
07097 
07098   return scheme_apply_multi(a->proc, 1, o);
07099 }
07100 
07101 static Scheme_Object *make_will_executor(int argc, Scheme_Object **argv)
07102 {
07103   WillExecutor *w;
07104   Scheme_Object *sema;
07105 
07106   w = MALLOC_ONE_TAGGED(WillExecutor);
07107   sema = scheme_make_sema(0);
07108 
07109   w->so.type = scheme_will_executor_type;
07110   w->first = NULL;
07111   w->last = NULL;
07112   w->sema = sema;
07113 
07114   return (Scheme_Object *)w;
07115 }
07116 
07117 static Scheme_Object *will_executor_p(int argc, Scheme_Object **argv)
07118 {
07119   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type)) 
07120          ? scheme_true 
07121          : scheme_false);
07122 }
07123 
07124 static Scheme_Object *register_will(int argc, Scheme_Object **argv)
07125 {
07126   Scheme_Object *e;
07127 
07128   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
07129     scheme_wrong_type("will-register", "will-executor", 0, argc, argv);
07130   scheme_check_proc_arity("will-register", 1, 2, argc, argv);
07131 
07132   /* If we lose track of the will executor, then drop the finalizer. */
07133   e = scheme_make_ephemeron(argv[0], argv[2]);
07134 
07135   scheme_add_scheme_finalizer(argv[1], activate_will, e);
07136 
07137   return scheme_void;
07138 }
07139 
07140 static Scheme_Object *will_executor_try(int argc, Scheme_Object **argv)
07141 {
07142   WillExecutor *w;
07143 
07144   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
07145     scheme_wrong_type("will-try-execute", "will-executor", 0, argc, argv);
07146   
07147   w = (WillExecutor *)argv[0];
07148 
07149   if (scheme_wait_sema(w->sema, 1))
07150     return do_next_will(w);
07151   else
07152     return scheme_false;
07153 }
07154 
07155 static Scheme_Object *will_executor_go(int argc, Scheme_Object **argv)
07156 {
07157   WillExecutor *w;
07158 
07159   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
07160     scheme_wrong_type("will-execute", "will-executor", 0, argc, argv);
07161   
07162   w = (WillExecutor *)argv[0];
07163 
07164   scheme_wait_sema(w->sema, 0);
07165 
07166   return do_next_will(w);
07167 }
07168 
07169 static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost)
07170 {
07171   *repost = 1;
07172   return ((WillExecutor *)w)->sema;
07173 }
07174 
07175 /*========================================================================*/
07176 /*                         GC preparation and timing                      */
07177 /*========================================================================*/
07178 
07179 #ifdef MZ_XFORM
07180 START_XFORM_SKIP;
07181 #endif
07182 
07183 void scheme_zero_unneeded_rands(Scheme_Thread *p)
07184 {
07185   /* Call this procedure before GC or before copying out
07186      a thread's stack. */
07187 }
07188 
07189 static void prepare_thread_for_GC(Scheme_Object *t)
07190 {
07191   Scheme_Thread *p = (Scheme_Thread *)t;
07192 
07193   /* zero ununsed part of env stack in each thread */
07194 
07195   if (!p->nestee) {
07196     Scheme_Saved_Stack *saved;
07197 # define RUNSTACK_TUNE(x) /* x   - Used for performance tuning */
07198     RUNSTACK_TUNE( long size; );
07199 
07200     if ((!p->runstack_owner
07201          || (p == *p->runstack_owner))
07202         && p->runstack_start) {
07203       long rs_end;
07204       Scheme_Object **rs_start;
07205 
07206       /* If there's a meta-prompt, we can also zero out past the unused part */
07207       if (p->meta_prompt && (p->meta_prompt->runstack_boundary_start == p->runstack_start)) {
07208         rs_end = p->meta_prompt->runstack_boundary_offset;
07209       } else {
07210         rs_end = p->runstack_size;
07211       }
07212 
07213       if ((p->runstack_tmp_keep >= p->runstack_start)
07214           && (p->runstack_tmp_keep < p->runstack))
07215         rs_start = p->runstack_tmp_keep;
07216       else
07217         rs_start = p->runstack;
07218 
07219       scheme_set_runstack_limits(p->runstack_start, 
07220                                  p->runstack_size,
07221                                  rs_start - p->runstack_start,
07222                                  rs_end);
07223       
07224       RUNSTACK_TUNE( size = p->runstack_size - (p->runstack - p->runstack_start); );
07225       
07226       for (saved = p->runstack_saved; saved; saved = saved->prev) {
07227        RUNSTACK_TUNE( size += saved->runstack_size; );
07228 
07229         if (p->meta_prompt && (p->meta_prompt->runstack_boundary_start == saved->runstack_start)) {
07230           rs_end = p->meta_prompt->runstack_boundary_offset;
07231         } else {
07232           rs_end = saved->runstack_size;
07233         }
07234 
07235         scheme_set_runstack_limits(saved->runstack_start,
07236                                    saved->runstack_size,
07237                                    saved->runstack_offset,
07238                                    rs_end);
07239       }
07240     }
07241 
07242     RUNSTACK_TUNE( printf("%ld\n", size); );
07243 
07244     if (p->tail_buffer && (p->tail_buffer != p->runstack_tmp_keep)) {
07245       int i;
07246       for (i = 0; i < p->tail_buffer_size; i++) {
07247        p->tail_buffer[i] = NULL;
07248       }
07249     }
07250   }
07251       
07252   if ((!p->cont_mark_stack_owner
07253        || (p == *p->cont_mark_stack_owner))
07254       && p->cont_mark_stack) {
07255     int segcount, i, segpos;
07256 
07257     /* release unused cont mark stack segments */
07258     if (p->cont_mark_stack)
07259       segcount = ((long)(p->cont_mark_stack - 1) >> SCHEME_LOG_MARK_SEGMENT_SIZE) + 1;
07260     else
07261       segcount = 0;
07262     for (i = segcount; i < p->cont_mark_seg_count; i++) {
07263       p->cont_mark_stack_segments[i] = NULL;
07264     }
07265     if (segcount < p->cont_mark_seg_count)
07266       p->cont_mark_seg_count = segcount;
07267       
07268     /* zero unused part of last mark stack segment */
07269     segpos = ((long)p->cont_mark_stack >> SCHEME_LOG_MARK_SEGMENT_SIZE);
07270     
07271     if (segpos < p->cont_mark_seg_count) {
07272       Scheme_Cont_Mark *seg = p->cont_mark_stack_segments[segpos];
07273       int stackpos = ((long)p->cont_mark_stack & SCHEME_MARK_SEGMENT_MASK);
07274       if (seg) {
07275         for (i = stackpos; i < SCHEME_MARK_SEGMENT_SIZE; i++) {
07276           if (seg[i].key) {
07277             seg[i].key = NULL;
07278             seg[i].val = NULL;
07279             seg[i].cache = NULL;
07280           } else {
07281             /* NULL means we already cleared from here on. */
07282             break;
07283           }
07284         }
07285       }
07286     }
07287 
07288     {
07289       MZ_MARK_STACK_TYPE pos;
07290       /* also zero out slots before the current bottom */
07291       for (pos = 0; pos < p->cont_mark_stack_bottom; pos++) {
07292         Scheme_Cont_Mark *seg;
07293         int stackpos;
07294         segpos = ((long)pos >> SCHEME_LOG_MARK_SEGMENT_SIZE);
07295         seg = p->cont_mark_stack_segments[segpos];
07296         if (seg) {
07297           stackpos = ((long)pos & SCHEME_MARK_SEGMENT_MASK);
07298           seg[stackpos].key = NULL;
07299           seg[stackpos].val = NULL;
07300           seg[stackpos].cache = NULL;
07301         }
07302       }
07303     }
07304   }
07305 
07306   if (p->values_buffer) {
07307     if (p->values_buffer_size > 128)
07308       p->values_buffer = NULL;
07309     else {
07310       memset(p->values_buffer, 0, sizeof(Scheme_Object*) * p->values_buffer_size);
07311     }
07312   }
07313 
07314   p->spare_runstack = NULL;
07315 
07316   /* zero ununsed part of list stack */
07317   scheme_clean_list_stack(p);
07318 }
07319 
07320 static void prepare_this_thread_for_GC(Scheme_Thread *p)
07321 {
07322   if (p == scheme_current_thread) {
07323 #ifdef RUNSTACK_IS_GLOBAL
07324     scheme_current_thread->runstack = MZ_RUNSTACK;
07325     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
07326     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
07327     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
07328 #endif
07329   }
07330   prepare_thread_for_GC((Scheme_Object *)p);
07331 }
07332 
07333 static void get_ready_for_GC()
07334 {
07335   start_this_gc_time = scheme_get_process_milliseconds();
07336 
07337   scheme_zero_unneeded_rands(scheme_current_thread);
07338 
07339   scheme_clear_modidx_cache();
07340   scheme_clear_shift_cache();
07341   scheme_clear_prompt_cache();
07342   scheme_clear_rx_buffers();
07343   scheme_clear_bignum_cache();
07344   scheme_clear_delayed_load_cache();
07345 
07346 #ifdef RUNSTACK_IS_GLOBAL
07347   if (scheme_current_thread->running) {
07348     scheme_current_thread->runstack = MZ_RUNSTACK;
07349     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
07350     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
07351     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
07352   }
07353 #endif
07354 
07355   for_each_managed(scheme_thread_type, prepare_thread_for_GC);
07356 
07357 #ifdef MZ_PRECISE_GC
07358   scheme_flush_stack_copy_cache();
07359 #endif
07360 
07361   scheme_fuel_counter = 0;
07362   scheme_jit_stack_boundary = (unsigned long)-1;
07363 
07364 #ifdef WINDOWS_PROCESSES
07365   scheme_suspend_remembered_threads();
07366 #endif
07367 #ifdef UNIX_PROCESSES
07368   scheme_block_child_signals(1);
07369 #endif
07370 
07371   {
07372     GC_CAN_IGNORE void *data;
07373     data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
07374     scheme_current_thread->gmp_tls_data = data;
07375   }
07376 
07377   did_gc_count++;
07378 }
07379 
07380 extern int GC_words_allocd;
07381 
07382 static void done_with_GC()
07383 {
07384   scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
07385   scheme_current_thread->gmp_tls_data = NULL;
07386 
07387 #ifdef RUNSTACK_IS_GLOBAL
07388 # ifdef MZ_PRECISE_GC
07389   if (scheme_current_thread->running) {
07390     MZ_RUNSTACK = scheme_current_thread->runstack;
07391     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
07392   }
07393 # endif
07394 #endif
07395 #ifdef WINDOWS_PROCESSES
07396   scheme_resume_remembered_threads();
07397 #endif
07398 #ifdef UNIX_PROCESSES
07399   scheme_block_child_signals(0);
07400 #endif
07401 
07402   end_this_gc_time = scheme_get_process_milliseconds();
07403   scheme_total_gc_time += (end_this_gc_time - start_this_gc_time);
07404 }
07405 
07406 #ifdef MZ_PRECISE_GC
07407 static void inform_GC(int major_gc, long pre_used, long post_used)
07408 {
07409   if (scheme_main_logger) {
07410     /* Don't use scheme_log(), because it wants to allocate a buffer
07411        based on the max value-print width, and we may not be at a
07412        point where parameters are available. */
07413     char buf[128];
07414     long buflen;
07415 
07416     sprintf(buf,
07417             "GC [%s] at %ld bytes; %ld collected in %ld msec",
07418             (major_gc ? "major" : "minor"),
07419             pre_used, pre_used - post_used,
07420             end_this_gc_time - start_this_gc_time);
07421     buflen = strlen(buf);
07422 
07423     scheme_log_message(scheme_main_logger,
07424                        SCHEME_LOG_DEBUG,
07425                        buf, buflen,
07426                        NULL);
07427   }
07428 
07429 }
07430 #endif
07431 
07432 
07433 #ifdef MZ_XFORM
07434 END_XFORM_SKIP;
07435 #endif
07436 
07437 /*========================================================================*/
07438 /*                                 stats                                  */
07439 /*========================================================================*/
07440 
07441 static Scheme_Object *current_stats(int argc, Scheme_Object *argv[])
07442 {
07443   Scheme_Object *v;
07444   Scheme_Thread *t = NULL;
07445   
07446   v = argv[0];
07447 
07448   if (!SCHEME_MUTABLE_VECTORP(v))
07449     scheme_wrong_type("vector-set-performance-stats!", "mutable vector", 0, argc, argv);
07450   if (argc > 1) {
07451     if (!SCHEME_FALSEP(argv[1])) {
07452       if (!SCHEME_THREADP(argv[1]))
07453        scheme_wrong_type("vector-set-performance-stats!", "thread or #f", 0, argc, argv);
07454       t = (Scheme_Thread *)argv[1];
07455     }
07456   }
07457   
07458   if (t) {
07459     switch (SCHEME_VEC_SIZE(v)) {
07460     default:
07461     case 4:
07462       {
07463        /* Stack size: */
07464        long sz = 0;
07465 
07466        if (MZTHREAD_STILL_RUNNING(t->running)) {
07467          Scheme_Overflow *overflow;
07468          Scheme_Saved_Stack *runstack_saved;
07469          
07470          /* C stack */
07471          if (t == scheme_current_thread) {
07472            void *stk_start, *stk_end;
07473            stk_start = t->stack_start;
07474            stk_end = (void *)&stk_end;
07475 #         ifdef STACK_GROWS_UP
07476            sz = (long)stk_end XFORM_OK_MINUS (long)stk_start;
07477 #         endif
07478 #         ifdef STACK_GROWS_DOWN
07479            sz = (long)stk_start XFORM_OK_MINUS (long)stk_end;
07480 #         endif
07481          } else {
07482            if (t->jmpup_buf.stack_copy)
07483              sz = t->jmpup_buf.stack_size;
07484          }
07485          for (overflow = t->overflow; overflow; overflow = overflow->prev) {
07486            sz += overflow->jmp->cont.stack_size;
07487          }
07488          
07489          /* Scheme stack */
07490          {
07491            int ssz;
07492            if (t == scheme_current_thread) {
07493              ssz = (MZ_RUNSTACK_START + t->runstack_size) - MZ_RUNSTACK;
07494            } else {
07495              ssz = (t->runstack_start + t->runstack_size) - t->runstack;
07496            }
07497            for (runstack_saved = t->runstack_saved; runstack_saved; runstack_saved = runstack_saved->prev) {
07498              ssz += runstack_saved->runstack_size;
07499            }
07500            sz += sizeof(Scheme_Object *) * ssz;
07501          }
07502          
07503          /* Mark stack */
07504          if (t == scheme_current_thread) {
07505            sz += ((long)scheme_current_cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
07506          } else {
07507            sz += ((long)t->cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
07508          }
07509        }
07510 
07511        SCHEME_VEC_ELS(v)[3] = scheme_make_integer(sz);
07512       }
07513     case 3:
07514       SCHEME_VEC_ELS(v)[2] = (t->block_descriptor 
07515                            ? scheme_true 
07516                            : ((t->running & MZTHREAD_SUSPENDED)
07517                              ? scheme_true
07518                              : scheme_false));
07519     case 2:
07520       {
07521        Scheme_Object *dp;
07522        dp = thread_dead_p(1, (Scheme_Object **) mzALIAS &t);
07523        SCHEME_VEC_ELS(v)[1] = dp;
07524       }
07525     case 1:
07526       {
07527        Scheme_Object *rp;
07528        rp = thread_running_p(1, (Scheme_Object **) mzALIAS &t);
07529        SCHEME_VEC_ELS(v)[0] = rp;
07530       }
07531     case 0:
07532       break;
07533     }
07534   } else {
07535     long cpuend, end, gcend;
07536 
07537     cpuend = scheme_get_process_milliseconds();
07538     end = scheme_get_milliseconds();
07539     gcend = scheme_total_gc_time;
07540     
07541     switch (SCHEME_VEC_SIZE(v)) {
07542     default:
07543     case 11:
07544       SCHEME_VEC_ELS(v)[10] = scheme_make_integer(scheme_jit_malloced);
07545     case 10:
07546       SCHEME_VEC_ELS(v)[9] = scheme_make_integer(scheme_hash_iteration_count);
07547     case 9:
07548       SCHEME_VEC_ELS(v)[8] = scheme_make_integer(scheme_hash_request_count);
07549     case 8:
07550       SCHEME_VEC_ELS(v)[7] = scheme_make_integer(scheme_num_read_syntax_objects);
07551     case 7:
07552       SCHEME_VEC_ELS(v)[6] = scheme_make_integer(num_running_threads);
07553     case 6:
07554       SCHEME_VEC_ELS(v)[5] = scheme_make_integer(scheme_overflow_count);
07555     case 5:
07556       SCHEME_VEC_ELS(v)[4] = scheme_make_integer(thread_swap_count);
07557     case 4:
07558       SCHEME_VEC_ELS(v)[3] = scheme_make_integer(did_gc_count);
07559     case 3:
07560       SCHEME_VEC_ELS(v)[2] = scheme_make_integer(gcend);
07561     case 2:
07562       SCHEME_VEC_ELS(v)[1] = scheme_make_integer(end);
07563     case 1:
07564       SCHEME_VEC_ELS(v)[0] = scheme_make_integer(cpuend);
07565     case 0:
07566       break;
07567     }
07568   }
07569 
07570   return scheme_void;
07571 }
07572 
07573 /*========================================================================*/
07574 /*                             gmp allocation                             */
07575 /*========================================================================*/
07576 
07577 /* Allocate atomic, immobile memory for GMP. Although we have set up
07578    GMP to reliably free anything that it allocates, we allocate via
07579    the GC to get accounting with 3m. The set of allocated blocks are
07580    stored in a "mem_pool" variable, which is a linked list; GMP
07581    allocates with a stack discipline, so maintaining the list is easy.
07582    Meanwhile, scheme_gmp_tls_unload, etc., attach to the pool to the
07583    owning thread as needed for GC. */
07584 
07585 void *scheme_malloc_gmp(unsigned long amt, void **mem_pool)
07586 {
07587   void *p, *mp;
07588 
07589 #ifdef MZ_PRECISE_GC      
07590   if (amt < GC_malloc_stays_put_threshold())
07591     amt = GC_malloc_stays_put_threshold();
07592 #endif
07593 
07594   p = scheme_malloc_atomic(amt);
07595 
07596   mp = scheme_make_raw_pair(p, *mem_pool);
07597   *mem_pool = mp;
07598 
07599   return p;
07600 }
07601 
07602 void scheme_free_gmp(void *p, void **mem_pool)
07603 {
07604   if (p != SCHEME_CAR(*mem_pool))
07605     scheme_log(NULL,
07606                SCHEME_LOG_FATAL,
07607                0,
07608                "bad GMP memory free");
07609   *mem_pool = SCHEME_CDR(*mem_pool);
07610 }
07611 
07612 /*========================================================================*/
07613 /*                               precise GC                               */
07614 /*========================================================================*/
07615 
07616 Scheme_Jumpup_Buf_Holder *scheme_new_jmpupbuf_holder(void)
07617 /* Scheme_Jumpup_Buf_Holder exists for precise GC, and for external
07618    programs that want to store Jumpup_Bufs, because the GC interaction
07619    is tricky. For example, MrEd uses it for a special trampoline
07620    implementation. */
07621 {
07622   Scheme_Jumpup_Buf_Holder *h;
07623 
07624   h = MALLOC_ONE_RT(Scheme_Jumpup_Buf_Holder);
07625 #ifdef MZ_PRECISE_GC
07626   h->type = scheme_rt_buf_holder;
07627 #endif
07628 
07629   return h;
07630 }
07631 
07632 #ifdef MZ_PRECISE_GC
07633 unsigned long scheme_get_current_thread_stack_start(void)
07634 {
07635   Scheme_Thread *p;
07636   p = scheme_current_thread;
07637   return (unsigned long)p->stack_start;
07638 }
07639 #endif
07640 
07641 #ifdef MZ_PRECISE_GC
07642 
07643 START_XFORM_SKIP;
07644 
07645 #define MARKS_FOR_THREAD_C
07646 #include "mzmark.c"
07647 
07648 static void register_traversers(void)
07649 {
07650   GC_REG_TRAV(scheme_will_executor_type, mark_will_executor_val);
07651   GC_REG_TRAV(scheme_custodian_type, mark_custodian_val);
07652   GC_REG_TRAV(scheme_cust_box_type, mark_custodian_box_val);
07653   GC_REG_TRAV(scheme_thread_hop_type, mark_thread_hop);
07654   GC_REG_TRAV(scheme_evt_set_type, mark_evt_set);
07655   GC_REG_TRAV(scheme_thread_set_type, mark_thread_set);
07656   GC_REG_TRAV(scheme_config_type, mark_config);
07657   GC_REG_TRAV(scheme_thread_cell_type, mark_thread_cell);
07658 
07659   GC_REG_TRAV(scheme_rt_namespace_option, mark_namespace_option);
07660   GC_REG_TRAV(scheme_rt_param_data, mark_param_data);
07661   GC_REG_TRAV(scheme_rt_will, mark_will);
07662   GC_REG_TRAV(scheme_rt_evt, mark_evt);
07663   GC_REG_TRAV(scheme_rt_syncing, mark_syncing);
07664   GC_REG_TRAV(scheme_rt_parameterization, mark_parameterization);
07665 }
07666 
07667 END_XFORM_SKIP;
07668 
07669 #endif