Back to index

plt-scheme  4.2.1
setjmpup.c
Go to the documentation of this file.
00001 /*
00002   MzScheme
00003   Copyright (c) 2004-2009 PLT Scheme Inc.
00004   Copyright (c) 1995-2001 Matthew Flatt
00005 
00006     This library is free software; you can redistribute it and/or
00007     modify it under the terms of the GNU Library General Public
00008     License as published by the Free Software Foundation; either
00009     version 2 of the License, or (at your option) any later version.
00010 
00011     This library is distributed in the hope that it will be useful,
00012     but WITHOUT ANY WARRANTY; without even the implied warranty of
00013     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014     Library General Public License for more details.
00015 
00016     You should have received a copy of the GNU Library General Public
00017     License along with this library; if not, write to the Free
00018     Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
00019     Boston, MA 02110-1301 USA.
00020 */
00021 
00022 /* Some copilers don't like re-def of GC_malloc in schemef.h: */
00023 #ifndef MZ_PRECISE_GC
00024 # define SCHEME_NO_GC_PROTO
00025 #endif
00026 
00027 #include "schpriv.h"
00028 #include "schmach.h"
00029 #include "schgc.h"
00030 
00031 #ifdef STACK_GROWS_UP
00032 # define DEEPPOS(b) ((unsigned long)(b)->stack_from+(unsigned long)(b)->stack_size)
00033 #else
00034 # define DEEPPOS(b) ((unsigned long)(b)->stack_from)
00035 #endif
00036 
00037 #ifdef MZ_PRECISE_GC
00038 void *(*scheme_get_external_stack_val)(void);
00039 void (*scheme_set_external_stack_val)(void *);
00040 #endif
00041 
00042 #ifndef MZ_PRECISE_GC
00043 
00044 /**********************************************************************/
00045 
00046 /* When we copy the stack, we must set up GC to specially traverse the
00047    stack copy to account for pointers to the interior of collectable
00048    objects. */     
00049 
00050 extern MZ_DLLIMPORT void GC_push_all_stack(void *, void *);
00051 extern MZ_DLLIMPORT void GC_flush_mark_stack(void);
00052 extern MZ_DLLIMPORT void (*GC_push_last_roots)(void);
00053 extern MZ_DLLIMPORT void (*GC_push_last_roots_again)(void);
00054 /* GC_push_last_roots_again is called after marking eager
00055    finalizations (once at each stage). We rely on the fact that no
00056    copied stack will be referenced by (or affected the ordering of)
00057    anything non-eagerly finalized.*/
00058 
00059 #ifdef USE_SENORA_GC
00060 # define GC_is_marked(p) GC_base(p)
00061 # define GC_did_mark_stack_overflow() 0
00062 #else
00063 extern MZ_DLLIMPORT int GC_is_marked(void *);
00064 extern MZ_DLLIMPORT int GC_did_mark_stack_overflow(void);
00065 #endif
00066 
00067 #define get_copy(s_c) (((CopiedStack *)s_c)->_stack_copy)
00068 
00069 #define MALLOC_LINK() MALLOC_ONE_WEAK(CopiedStack*)
00070 #ifdef USE_TAGGED_ALLOCATION
00071 extern void *scheme_malloc_stack(size_t);
00072 # define MALLOC_STACK(size) scheme_malloc_stack(size)
00073 #else
00074 # define MALLOC_STACK(size) scheme_malloc_atomic(size)
00075 #endif
00076 
00077 typedef struct CopiedStack {
00078   void *_stack_copy; /* The actual data */
00079   long size;
00080   int pushed;
00081   struct CopiedStack **next, **prev;
00082 } CopiedStack;
00083 
00084 static CopiedStack **first_copied_stack;
00085 int scheme_num_copied_stacks = 0;
00086 
00087 static void push_copied_stacks(int init)
00088 {
00089   /* This is called after everything else is marked.
00090      Mark from those stacks that are still reachable. If
00091      we mark from a stack, we need to go back though the list
00092      all over to check the previously unmarked stacks. */
00093   CopiedStack *cs;
00094   int pushed_one;
00095 
00096   if (init) {
00097     for (cs = *first_copied_stack; cs; cs = *cs->next) {
00098       if (get_copy(cs))
00099        cs->pushed = 0;
00100       else
00101        cs->pushed = 1;
00102     }
00103   }
00104 
00105   GC_flush_mark_stack();
00106 
00107   do {
00108     pushed_one = 0;
00109     for (cs = *first_copied_stack; cs; cs = *cs->next) {
00110       if (!cs->pushed && GC_is_marked(get_copy(cs))) {
00111        pushed_one = 1;
00112        cs->pushed = 1;
00113        GC_push_all_stack(get_copy(cs), (char *)get_copy(cs) + cs->size);
00114        if (GC_did_mark_stack_overflow()) {
00115          /* printf("mark stack overflow\n"); */
00116          return;
00117        } else {
00118          GC_flush_mark_stack();
00119          if (GC_did_mark_stack_overflow()) {
00120            /* printf("mark stack overflow (late)\n"); */
00121            return;
00122          }
00123        }
00124       }
00125     }
00126   } while (pushed_one);
00127 }
00128 
00129 static void init_push_copied_stacks(void)
00130 {
00131   push_copied_stacks(1);
00132 }
00133 
00134 static void update_push_copied_stacks(void)
00135 {
00136   do {
00137     push_copied_stacks(0);
00138   } while (scheme_propagate_ephemeron_marks());
00139 }
00140 
00141 void scheme_init_setjumpup(void)
00142 {
00143   if (scheme_starting_up) {
00144     REGISTER_SO(first_copied_stack);
00145   }
00146   first_copied_stack = MALLOC_LINK();
00147   *first_copied_stack = NULL;
00148 
00149   GC_push_last_roots = init_push_copied_stacks;
00150   GC_push_last_roots_again = update_push_copied_stacks;
00151 }
00152 
00153 static void remove_cs(void *_cs, void *unused)
00154 {
00155   CopiedStack *cs = (CopiedStack *)_cs;
00156 
00157   if (*cs->prev)
00158     *(*cs->prev)->next = *cs->next;
00159   else
00160     *first_copied_stack = *cs->next;
00161 
00162   if (*cs->next)
00163     *(*cs->next)->prev = *cs->prev;
00164 
00165   if (cs->_stack_copy) {
00166 #ifndef SGC_STD_DEBUGGING
00167     GC_free(cs->_stack_copy);
00168 #else
00169     memset(cs->_stack_copy, 0, cs->size);
00170 #endif
00171     cs->_stack_copy = NULL;
00172   }
00173 
00174   --scheme_num_copied_stacks;
00175 }
00176 
00177 static void *make_stack_copy_rec(long size)
00178 {
00179   CopiedStack *cs, **lk;
00180 
00181   cs = MALLOC_ONE(CopiedStack);
00182   cs->size = size;
00183   lk = MALLOC_LINK();
00184   cs->next = lk;
00185   lk = MALLOC_LINK();
00186   cs->prev = lk;
00187 
00188 
00189   /* double linked list push */
00190   *cs->next = *first_copied_stack;
00191   if (*first_copied_stack)
00192     *(*first_copied_stack)->prev = cs;
00193   *cs->prev = NULL;
00194   *first_copied_stack = cs;
00195 
00196   GC_register_finalizer(cs, remove_cs, NULL, NULL, NULL);
00197 
00198   scheme_num_copied_stacks++;
00199 
00200   return (void *)cs;
00201 }
00202 
00203 static void set_copy(void *s_c, void *c)
00204 {
00205   CopiedStack *cs = (CopiedStack *)s_c;
00206 
00207   cs->_stack_copy = c;
00208 }
00209 
00210 /**********************************************************************/
00211 
00212 #else
00213 
00214 /* Precise GC: */
00215 # define MALLOC_STACK(size) scheme_malloc_atomic(size)
00216 # define get_copy(s_c) (s_c)
00217 # define set_copy(s_c, c) s_c = c
00218 
00219 #define STACK_COPY_CACHE_SIZE 10
00220 static void *stack_copy_cache[STACK_COPY_CACHE_SIZE];
00221 static long stack_copy_size_cache[STACK_COPY_CACHE_SIZE];
00222 static int scc_pos;
00223 #define SCC_OK_EXTRA_AMT 100
00224 
00225 START_XFORM_SKIP;
00226 
00227 void scheme_flush_stack_copy_cache(void)
00228 {
00229   int i;
00230   for (i = 0; i < STACK_COPY_CACHE_SIZE; i++) {
00231     stack_copy_cache[i] = NULL;
00232     stack_copy_size_cache[i] = 0;
00233   }
00234 }
00235 
00236 END_XFORM_SKIP;
00237 
00238 #endif
00239 
00240 /**********************************************************************/
00241 
00242 #ifdef MZ_PRECISE_GC
00243 # define GC_VAR_STACK_ARG_DECL , void *gc_var_stack_in
00244 # define GC_VAR_STACK_ARG      , __gc_var_stack__
00245 #else
00246 # define GC_VAR_STACK_ARG_DECL /* empty */
00247 # define GC_VAR_STACK_ARG      /* empty */
00248 #endif
00249 
00250 /* This function must not be inlined! */
00251 void MZ_NO_INLINE scheme_copy_stack(Scheme_Jumpup_Buf *b, void *base, void *start GC_VAR_STACK_ARG_DECL)
00252 {
00253   long size, msize;
00254   void *here;
00255 
00256   here = &size;
00257 
00258   size = (long)here XFORM_OK_MINUS (long)start;
00259 #ifdef STACK_GROWS_UP
00260   b->stack_from = start;
00261 #else
00262   size = -size;
00263   b->stack_from = here;
00264 #endif
00265 
00266   if (size < 0)
00267     size = 0;
00268 
00269   msize = size;
00270 
00271   if (b->stack_max_size < size) {
00272     /* printf("Stack size: %d\n", size); */
00273     void *copy;
00274 #ifndef MZ_PRECISE_GC
00275     copy = make_stack_copy_rec(size);
00276     b->stack_copy = copy;
00277     set_copy(b->stack_copy, MALLOC_STACK(size));
00278 #else
00279     /* b is a pointer into the middle of `base'; bad for precise gc: */
00280     unsigned long diff;
00281     diff = (unsigned long)b XFORM_OK_MINUS (unsigned long)base;
00282     b = NULL;
00283 
00284     copy = NULL;
00285     /* Look for a reusable freed block: */
00286     {
00287       int i;
00288       for (i = 0; i < STACK_COPY_CACHE_SIZE; i++) {
00289        if ((stack_copy_size_cache[i] >= size)
00290            && (stack_copy_size_cache[i] < (size + SCC_OK_EXTRA_AMT))) {
00291          /* Found one */
00292          copy = stack_copy_cache[i];
00293          msize = stack_copy_size_cache[i];
00294          stack_copy_cache[i] = NULL;
00295          stack_copy_size_cache[i] = 0;
00296          break;
00297        }
00298       }
00299     }
00300     if (!copy) {
00301       /* No reusable block found */
00302       copy = MALLOC_STACK(size);
00303     }
00304 
00305     /* Restore b: */
00306     b = (Scheme_Jumpup_Buf *)(((char *)base) XFORM_OK_PLUS diff);
00307 
00308     set_copy(b->stack_copy, copy);
00309 #endif
00310     b->stack_max_size = msize;
00311   }
00312   b->stack_size = size;
00313 
00314 #ifdef MZ_PRECISE_GC
00315   b->gc_var_stack = gc_var_stack_in;
00316   if (scheme_get_external_stack_val) {
00317     void *es;
00318     es = scheme_get_external_stack_val();
00319     b->external_stack = es;
00320   }
00321 #endif
00322 
00323   memcpy(get_copy(b->stack_copy),
00324         b->stack_from,
00325         size);
00326 }
00327 
00328 MZ_DO_NOT_INLINE(void scheme_uncopy_stack(int ok, Scheme_Jumpup_Buf *b, long *prev));
00329 
00330 void scheme_uncopy_stack(int ok, Scheme_Jumpup_Buf *b, long *prev)
00331 {
00332   GC_CAN_IGNORE Scheme_Jumpup_Buf *c;
00333   long top_delta = 0, bottom_delta = 0, size;
00334   void *cfrom, *cto;
00335 
00336   if (!ok) {
00337     unsigned long z;
00338     long junk[200];
00339 
00340     z = (unsigned long)&junk[0];
00341 
00342     scheme_uncopy_stack(STK_COMP(z, DEEPPOS(b)), b, junk);
00343   }
00344 
00345   /* Vague attempt to prevent the compiler from optimizing away `prev': */
00346   prev[199] = 0;
00347 
00348   FLUSH_REGISTER_WINDOWS;
00349 
00350   START_XFORM_SKIP;
00351   c = b;
00352   while (c) {
00353     size = c->stack_size - top_delta;
00354     cto = (char *)c->stack_from + bottom_delta;
00355     cfrom = (char *)get_copy(c->stack_copy) + bottom_delta;
00356 
00357     memcpy(cto, cfrom, size);
00358 
00359     if (c->cont) {
00360 #ifdef STACK_GROWS_UP
00361       top_delta = (((unsigned long)c->cont->buf.stack_from
00362                   + c->cont->buf.stack_size)
00363                  - (unsigned long)c->stack_from);
00364 #else
00365       bottom_delta = ((unsigned long)c->stack_from 
00366                     + c->stack_size
00367                     - (unsigned long)c->cont->buf.stack_from);
00368       top_delta = bottom_delta;
00369 #endif
00370       c = &c->cont->buf;
00371     } else
00372       c = NULL;
00373   }
00374   END_XFORM_SKIP;
00375 
00376 #ifdef MZ_PRECISE_GC
00377   GC_variable_stack = b->gc_var_stack;
00378   if (scheme_set_external_stack_val)
00379     scheme_set_external_stack_val(b->external_stack);
00380 #endif
00381 
00382   scheme_longjmp(b->buf, 1);
00383 }
00384 
00385 #ifdef MZ_PRECISE_GC
00386 START_XFORM_SKIP;
00387 #endif
00388 
00389 static long find_same(char *p, char *low, long max_size)
00390 {
00391   long cnt = 0;
00392 
00393   /* We assume a max possible amount of the current stack that should
00394      not be shared with the saved stack. This is ok (or not) in the same
00395      sense as assuming that STACK_SAFETY_MARGIN is enough wiggle room to
00396      prevent stack overflow. */
00397 # define MAX_STACK_DIFF 4096
00398 
00399 #ifdef SIXTY_FOUR_BIT_INTEGERS
00400 # define SHARED_STACK_ALIGNMENT 8
00401 #else
00402 # define SHARED_STACK_ALIGNMENT 4
00403 #endif
00404 
00405   if (max_size > MAX_STACK_DIFF) {
00406     cnt = max_size - MAX_STACK_DIFF;
00407     max_size = MAX_STACK_DIFF;
00408   }
00409 
00410 #ifdef STACK_GROWS_UP
00411   while (max_size--) {
00412     if (p[cnt] != low[cnt])
00413       break;
00414     cnt++;
00415   }
00416 #else
00417   while (max_size--) {
00418     if (p[max_size] != low[max_size])
00419       break;
00420     cnt++;
00421   }
00422 #endif
00423 
00424   if (cnt & (SHARED_STACK_ALIGNMENT - 1)) {
00425     cnt -= (cnt & (SHARED_STACK_ALIGNMENT - 1));
00426   }
00427 
00428   return cnt;
00429 }
00430 
00431 #ifdef MZ_PRECISE_GC
00432 static void *align_var_stack(void **vs, void *s)
00433 {
00434   void **nvs, **next;
00435   long i, cnt;
00436   void *a;
00437   
00438   while (STK_COMP((unsigned long)vs, (unsigned long)s)) {
00439     vs = (void **)(*vs);
00440   }
00441 
00442   s = (void *)vs;
00443 
00444   /* Check next few frames to see whether they refer to variables
00445      before s. This can happen due to inlining, so that an older
00446      frame is shallower in the stack. It shouldn't happen much,
00447      though. */
00448   nvs = *vs;
00449   while (nvs) {
00450     next = NULL;
00451     cnt = ((long *)nvs)[1];
00452     for (i = 0; i < cnt; i++) {
00453       a = nvs[i+2];
00454       if (!a) {
00455        a = nvs[i+3];
00456        i += 2;
00457       }
00458       if (STK_COMP((unsigned long)a, (unsigned long)s)) {
00459        /* We need nvs to update part of copied stack! */
00460        vs = nvs;
00461        s = (void *)vs;
00462        next = *nvs;
00463        break;
00464       }
00465     }
00466     nvs = next;
00467   }
00468 
00469   return s;
00470 }
00471 #define ALIGN_VAR_STACK(vs, s) s = align_var_stack(vs, s)
00472 
00473 static void *shift_var_stack(void *s, long delta)
00474 {
00475 #ifdef STACK_GROWS_UP
00476   return s;
00477 #else
00478   void **vs = (void **)((char *)s + delta);
00479   long cnt;
00480   
00481   /* Set s past end of vs: */
00482   cnt = ((long *)vs)[1];
00483   return (void *)((void **)s + cnt + 2);
00484 #endif
00485 }
00486 #define PAST_VAR_STACK(s) s = shift_var_stack(s, 0);
00487 #define PAST_VAR_STACK_DELTA(s, d) s = shift_var_stack(s, d);
00488 END_XFORM_SKIP;
00489 #else
00490 # define ALIGN_VAR_STACK(vs, s) /* empty */
00491 # define PAST_VAR_STACK(s) /* empty */
00492 # define PAST_VAR_STACK_DELTA(s, d) /* empty */
00493 #endif
00494 
00495 int scheme_setjmpup_relative(Scheme_Jumpup_Buf *b, void *base,
00496                           void * volatile start, struct Scheme_Cont *c)
00497 {
00498   int local;
00499   long disguised_b;
00500 
00501 #ifdef MZ_USE_JIT
00502   scheme_flush_stack_cache();
00503 #endif
00504 
00505   FLUSH_REGISTER_WINDOWS;
00506 
00507   if (!(local = scheme_setjmp(b->buf))) {
00508     if (c) {
00509       /* We'd like to re-use the stack copied for a continuation
00510         that encloses the current one --- but we dont' know exactly
00511         how much the stack is supposed to be shared, since call/cc
00512         is implemented with a trampoline; certainly, the shallowest
00513         bit of the old continuation is not right for this one. So,
00514         we just start from the deepest part of the stack and find
00515         how many bytes match (using find_same)
00516         For chains of continuations C1 < C2 < C3, we assume that the 
00517         discovered-safe part of C1 to be used for C2 is also valid
00518         for C3, so checking for C3 starts with the fresh part in C2,
00519         and that's where asymptotic benefits start to kick in. 
00520          Unfortunately, I can't quite convince myself that this
00521          assumption is definitely correct. I think it's likely correct,
00522          but watch out. */
00523       long same_size;
00524       START_XFORM_SKIP;
00525       same_size = find_same(get_copy(c->buf.stack_copy), c->buf.stack_from, c->buf.stack_size);
00526       b->cont = c;
00527 #ifdef STACK_GROWS_UP
00528       start = (void *)((char *)c->buf.stack_from + same_size);
00529 #else
00530       start = (void *)((char *)c->buf.stack_from + (c->buf.stack_size - same_size));
00531 #endif
00532       /* In 3m-mode, we need `start' on a var-stack boundary: */
00533       ALIGN_VAR_STACK(__gc_var_stack__, start);
00534       END_XFORM_SKIP;
00535     } else
00536       b->cont = NULL;
00537 
00538     /* In 3m-mode, we need `start' at the end of the frame */
00539     PAST_VAR_STACK(start);
00540 
00541     /* b is a pointer into the middle of `base', which bad for precise
00542      gc, so we hide it. */
00543     disguised_b = (long)b;
00544     b = NULL;
00545 
00546     scheme_copy_stack((Scheme_Jumpup_Buf *)disguised_b, base, start GC_VAR_STACK_ARG);
00547 
00548     /* Precise GC: ensure that this frame is pushed. */
00549     if (0) {
00550       base = scheme_malloc(0);
00551     }
00552 
00553     return 0;
00554   }
00555 
00556   return local;
00557 }
00558 
00559 struct Scheme_Overflow_Jmp *scheme_prune_jmpup(struct Scheme_Overflow_Jmp *jmp, void *stack_boundary)
00560 {
00561   void *cur_end;
00562 
00563   PAST_VAR_STACK_DELTA(stack_boundary,  (char *)get_copy(jmp->cont.stack_copy) - (char *)jmp->cont.stack_from);
00564 
00565 #ifdef STACK_GROWS_UP
00566   cur_end = (void *)jmp->cont.stack_from;
00567 #else
00568   cur_end = (void *)((char *)jmp->cont.stack_from + jmp->cont.stack_size);
00569 #endif
00570 
00571   if (stack_boundary != cur_end) {
00572     long new_size, delta;
00573     Scheme_Overflow_Jmp *naya;
00574     void *copy, *base;
00575 
00576 # ifdef STACK_GROWS_UP
00577     delta = (char *)stack_boundary - (char *)jmp->cont.stack_from;
00578     new_size = jmp->cont.stack_size - delta;
00579     base = (char *)stack_boundary;
00580 # else
00581     delta = 0;
00582     new_size = (long)stack_boundary - (long)jmp->cont.stack_from;
00583     base = jmp->cont.stack_from;
00584 # endif
00585 
00586     if ((new_size < 0) || (new_size > jmp->cont.stack_size))
00587       scheme_signal_error("bad C-stack pruigin size: %ld vs. %ld", new_size, jmp->cont.stack_size);
00588 
00589     naya = MALLOC_ONE_RT(Scheme_Overflow_Jmp);
00590     memcpy(naya, jmp, sizeof(Scheme_Overflow_Jmp));
00591     scheme_init_jmpup_buf(&naya->cont);
00592     
00593 #ifndef MZ_PRECISE_GC
00594     copy = make_stack_copy_rec(new_size);
00595     naya->cont.stack_copy = copy;
00596     set_copy(naya->cont.stack_copy, MALLOC_STACK(new_size));
00597 #else
00598     copy = MALLOC_STACK(new_size);
00599     set_copy(naya->cont.stack_copy, copy);
00600 #endif
00601     
00602     memcpy(get_copy(copy), 
00603            (char *)get_copy(jmp->cont.stack_copy) XFORM_OK_PLUS delta,
00604            new_size);
00605 
00606     naya->cont.stack_size = naya->cont.stack_max_size = new_size;
00607     naya->cont.stack_from = base;
00608 
00609     return naya;
00610   }
00611 
00612   return NULL;
00613 }
00614 
00615 void scheme_longjmpup(Scheme_Jumpup_Buf *b)
00616 {
00617   long z;
00618   long junk[200];
00619 
00620 #ifdef MZ_USE_JIT
00621   scheme_flush_stack_cache();
00622 #endif
00623 
00624   scheme_uncopy_stack(STK_COMP((unsigned long)&z, DEEPPOS(b)), b, junk);
00625 }
00626 
00627 void scheme_init_jmpup_buf(Scheme_Jumpup_Buf *b)
00628 {
00629   b->stack_size = b->stack_max_size = 0;
00630   b->stack_from = b->stack_copy = NULL;
00631 }
00632 
00633 void scheme_reset_jmpup_buf(Scheme_Jumpup_Buf *b)
00634 {
00635   if (b->stack_copy) {
00636 #ifdef MZ_PRECISE_GC
00637     /* "Free" the stack copy by putting it into a cache.
00638        (We clear the cache before a GC.) */
00639     stack_copy_cache[scc_pos] = b->stack_copy;
00640     stack_copy_size_cache[scc_pos] = b->stack_max_size;
00641     scc_pos++;
00642     if (scc_pos == STACK_COPY_CACHE_SIZE)
00643       scc_pos = 0;
00644 #else
00645     /* Drop the copy of the stack, */
00646     /* remove the finalizer, */
00647     /* and explicitly call the finalization proc */
00648     GC_register_finalizer(b->stack_copy, NULL, NULL, NULL, NULL);
00649     remove_cs(b->stack_copy, NULL);
00650 #endif
00651 
00652     scheme_init_jmpup_buf(b);
00653   }
00654 
00655   memset(&b->buf, 0, sizeof(mz_jmp_buf));
00656 }
00657 
00658 #ifdef USE_MZ_CYGWIN_SETJMP
00659 /* We have to define setjmp & longjmp to remain compatible
00660    with MSVC-compiled extensions. It's the mostly same code 
00661    as mzsj86.c, just in a slightly different syntax, and it
00662    probably only works with -O2. */
00663 
00664 int scheme_mz_setjmp(mz_pre_jmp_buf b)
00665 {
00666   asm("mov 4(%EBP), %ECX"); /* return address */
00667   asm("mov 8(%EBP), %EAX"); /* jmp_buf ptr */
00668   asm("mov (%EBP), %EDX");  /* old EBP */
00669   asm("mov %EDX, (%EAX)");
00670   asm("mov %EBX, 4(%EAX)");
00671   asm("mov %EDI, 8(%EAX)");
00672   asm("mov %ESI, 12(%EAX)");
00673   asm("mov %ESP, 16(%EAX)");
00674   asm("mov %ECX, 20(%EAX)");
00675  
00676   return 0;
00677 }
00678 
00679 void scheme_mz_longjmp(mz_pre_jmp_buf b, int v)
00680 {
00681   asm("mov 12(%EBP), %EAX"); /* return value */
00682   asm("mov 8(%EBP), %ECX");  /* jmp_buf */
00683   asm("mov 16(%ECX), %ESP"); /* restore stack pointer */
00684   asm("mov (%ECX), %EBP");   /* old EBP */
00685   asm("mov %EBP, (%ESP)");
00686   asm("mov %ESP, %EBP");
00687   asm("mov 4(%ECX), %EBX");
00688   asm("mov 8(%ECX), %EDI");
00689   asm("mov 12(%ECX), %ESI");
00690   asm("mov 20(%ECX), %ECX"); /* return address */
00691   asm("mov %ECX, 4(%EBP)");
00692 }
00693 
00694 #endif