Back to index

plt-scheme  4.2.1
gmplonglong.h
Go to the documentation of this file.
00001 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
00002 
00003 Copyright (C) 1991, 1992, 1993, 1994, 1996, 1997, 1999, 2000 Free Software
00004 Foundation, Inc.
00005 
00006 This file is free software; you can redistribute it and/or modify
00007 it under the terms of the GNU Lesser General Public License as published by
00008 the Free Software Foundation; either version 2.1 of the License, or (at your
00009 option) any later version.
00010 
00011 This file is distributed in the hope that it will be useful, but
00012 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
00013 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00014 License for more details.
00015 
00016 You should have received a copy of the GNU Lesser General Public License
00017 along with this file; see the file COPYING.LIB.  If not, write to
00018 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
00019 MA 02111-1307, USA. */
00020 
00021 /* You have to define the following before including this file:
00022 
00023    UWtype -- An unsigned type, default type for operations (typically a "word")
00024    UHWtype -- An unsigned type, at least half the size of UWtype.
00025    UDWtype -- An unsigned type, at least twice as large a UWtype
00026    W_TYPE_SIZE -- size in bits of UWtype
00027 
00028    SItype, USItype -- Signed and unsigned 32 bit types.
00029    DItype, UDItype -- Signed and unsigned 64 bit types.
00030 
00031    On a 32 bit machine UWtype should typically be USItype;
00032    on a 64 bit machine, UWtype should typically be UDItype.
00033 */
00034 
00035 #define __BITS4 (W_TYPE_SIZE / 4)
00036 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
00037 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
00038 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
00039 
00040 /* This is used to make sure no undesirable sharing between different libraries
00041    that use this file takes place.  */
00042 #ifndef __MPN
00043 #define __MPN(x) __##x
00044 #endif
00045 
00046 #ifndef _PROTO
00047 #if (__STDC__-0) || defined (__cplusplus)
00048 #define _PROTO(x) x
00049 #else
00050 #define _PROTO(x) ()
00051 #endif
00052 #endif
00053 
00054 /* Define auxiliary asm macros.
00055 
00056    1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
00057    UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
00058    word product in HIGH_PROD and LOW_PROD.
00059 
00060    2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
00061    UDWtype product.  This is just a variant of umul_ppmm.
00062 
00063    3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
00064    denominator) divides a UDWtype, composed by the UWtype integers
00065    HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
00066    in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
00067    than DENOMINATOR for correct operation.  If, in addition, the most
00068    significant bit of DENOMINATOR must be 1, then the pre-processor symbol
00069    UDIV_NEEDS_NORMALIZATION is defined to 1.
00070 
00071    4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
00072    denominator).  Like udiv_qrnnd but the numbers are signed.  The quotient
00073    is rounded towards 0.
00074 
00075    5) count_leading_zeros(count, x) counts the number of zero-bits from the
00076    msb to the first non-zero bit in the UWtype X.  This is the number of
00077    steps X needs to be shifted left to set the msb.  Undefined for X == 0,
00078    unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
00079 
00080    6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
00081    from the least significant end.
00082 
00083    7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
00084    high_addend_2, low_addend_2) adds two UWtype integers, composed by
00085    HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
00086    respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
00087    (i.e. carry out) is not stored anywhere, and is lost.
00088 
00089    8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
00090    high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
00091    composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
00092    LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
00093    and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
00094    and is lost.
00095 
00096    If any of these macros are left undefined for a particular CPU,
00097    C macros are used.  */
00098 
00099 #ifndef MZ_GMP_NO_ASM
00100 
00101 /* The CPUs come in alphabetical order below.
00102 
00103    Please add support for more CPUs here, or improve the current support
00104    for the CPUs below!  */
00105 
00106 #if defined (__alpha) && W_TYPE_SIZE == 64
00107 #if defined (__GNUC__)
00108 #define umul_ppmm(ph, pl, m0, m1) \
00109   do {                                                         \
00110     UDItype __m0 = (m0), __m1 = (m1);                                 \
00111     __asm__ ("umulh %r1,%2,%0"                                        \
00112             : "=r" (ph)                                        \
00113             : "%rJ" (m0), "rI" (m1));                                 \
00114     (pl) = __m0 * __m1;                                               \
00115   } while (0)
00116 #define UMUL_TIME 18
00117 #ifndef LONGLONG_STANDALONE
00118 #define udiv_qrnnd(q, r, n1, n0, d) \
00119   do { UDItype __di;                                           \
00120     __di = __MPN(invert_limb) (d);                             \
00121     udiv_qrnnd_preinv (q, r, n1, n0, d, __di);                        \
00122   } while (0)
00123 #define UDIV_NEEDS_NORMALIZATION 1
00124 #define UDIV_TIME 220
00125 long __MPN(count_leading_zeros) ();
00126 #define count_leading_zeros(count, x) \
00127   ((count) = __MPN(count_leading_zeros) (x))
00128 #endif /* LONGLONG_STANDALONE */
00129 #else /* ! __GNUC__ */
00130 #include <machine/builtins.h>
00131 #define umul_ppmm(ph, pl, m0, m1) \
00132   do {                                                         \
00133     UDItype __m0 = (m0), __m1 = (m1);                                 \
00134     (ph) = __UMULH (m0, m1);                                          \
00135     (pl) = __m0 * __m1;                                               \
00136   } while (0)
00137 #endif
00138 #endif /* __alpha */
00139 
00140 #if defined (__hppa) && W_TYPE_SIZE == 64
00141 /* We put the result pointer parameter last here, since it makes passing
00142    of the other parameters more efficient.  */
00143 #define LONGLONG_STANDALONE /* <----------- PLTSCHEME: Avoid extern */
00144 #ifndef LONGLONG_STANDALONE
00145 #define umul_ppmm(wh, wl, u, v) \
00146   do {                                                         \
00147     UDItype __p0;                                              \
00148     (wh) = __MPN(umul_ppmm) (u, v, &__p0);                            \
00149     (wl) = __p0;                                               \
00150   } while (0)
00151 extern UDItype __MPN(umul_ppmm) _PROTO ((UDItype, UDItype, UDItype *));
00152 #define udiv_qrnnd(q, r, n1, n0, d) \
00153   do { UDItype __r;                                            \
00154     (q) = __MPN(udiv_qrnnd) (n1, n0, d, &__r);                        \
00155     (r) = __r;                                                        \
00156   } while (0)
00157 extern UDItype __MPN(udiv_qrnnd) _PROTO ((UDItype, UDItype, UDItype, UDItype *));
00158 #define UMUL_TIME 8
00159 #define UDIV_TIME 60
00160 #endif /* LONGLONG_STANDALONE */
00161 #endif /* hppa */
00162 
00163 /* PLTSCHEME: doesn't seem to work, so disabled: */
00164 #if defined (__ia64) && W_TYPE_SIZE == 64 && 0
00165 #if defined (__GNUC__)
00166 #define umul_ppmm(ph, pl, m0, m1) \
00167   do {                                                         \
00168     UDItype __m0 = (m0), __m1 = (m1);                                 \
00169     __asm__ ("xma.hu %0 = %1, %2, f0"                                 \
00170             : "=e" (ph)                                        \
00171             : "e" (m0), "e" (m1));                             \
00172     (pl) = __m0 * __m1;                                               \
00173   } while (0)
00174 #endif
00175 #endif
00176 
00177 
00178 #if defined (__GNUC__) && !defined (NO_ASM)
00179 
00180 /* We sometimes need to clobber "cc" with gcc2, but that would not be
00181    understood by gcc1.  Use cpp to avoid major code duplication.  */
00182 #if __GNUC__ < 2
00183 #define __CLOBBER_CC
00184 #define __AND_CLOBBER_CC
00185 #else /* __GNUC__ >= 2 */
00186 #define __CLOBBER_CC : "cc"
00187 #define __AND_CLOBBER_CC , "cc"
00188 #endif /* __GNUC__ < 2 */
00189 
00190 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
00191 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00192   __asm__ ("add %1,%4,%5\n\taddc %0,%2,%3"                            \
00193           : "=r" (sh), "=&r" (sl)                              \
00194           : "%r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
00195 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00196   __asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3"                            \
00197           : "=r" (sh),      "=&r" (sl)                                \
00198           : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
00199 #define umul_ppmm(xh, xl, m0, m1) \
00200   do {                                                         \
00201     USItype __m0 = (m0), __m1 = (m1);                                 \
00202     __asm__ ("multiplu %0,%1,%2"                               \
00203             : "=r" (xl)                                        \
00204             : "r" (__m0), "r" (__m1));                                \
00205     __asm__ ("multmu %0,%1,%2"                                        \
00206             : "=r" (xh)                                        \
00207             : "r" (__m0), "r" (__m1));                                \
00208   } while (0)
00209 #define udiv_qrnnd(q, r, n1, n0, d) \
00210   __asm__ ("dividu %0,%3,%4"                                          \
00211           : "=r" (q), "=q" (r)                                        \
00212           : "1" (n1), "r" (n0), "r" (d))
00213 #define count_leading_zeros(count, x) \
00214     __asm__ ("clz %0,%1"                                       \
00215             : "=r" (count)                                     \
00216             : "r" (x))
00217 #define COUNT_LEADING_ZEROS_0 32
00218 #endif /* __a29k__ */
00219 
00220 #if defined (__arm__) && W_TYPE_SIZE == 32
00221 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00222   __asm__ ("adds\t%1, %4, %5\n\tadc\t%0, %2, %3"               \
00223           : "=r" (sh), "=&r" (sl)                              \
00224           : "%r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
00225 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00226   __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3"               \
00227           : "=r" (sh),      "=&r" (sl)                                \
00228           : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
00229 #if 1 || defined (__arm_m__)              /* `M' series has widening multiply support */
00230 #define umul_ppmm(xh, xl, a, b) \
00231   __asm__ ("umull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
00232 #define smul_ppmm(xh, xl, a, b) \
00233   __asm__ ("smull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
00234 #define UMUL_TIME 5
00235 #else
00236 #define umul_ppmm(xh, xl, a, b) \
00237   __asm__ ("%@ Inlined umul_ppmm\n"                                   \
00238 "      mov    %|r0, %2, lsr #16\n"                             \
00239 "      mov    %|r2, %3, lsr #16\n"                             \
00240 "      bic    %|r1, %2, %|r0, lsl #16\n"                       \
00241 "      bic    %|r2, %3, %|r2, lsl #16\n"                       \
00242 "      mul    %1, %|r1, %|r2\n"                                \
00243 "      mul    %|r2, %|r0, %|r2\n"                              \
00244 "      mul    %|r1, %0, %|r1\n"                                \
00245 "      mul    %0, %|r0, %0\n"                                         \
00246 "      adds   %|r1, %|r2, %|r1\n"                              \
00247 "      addcs  %0, %0, #65536\n"                                \
00248 "      adds   %1, %1, %|r1, lsl #16\n"                         \
00249 "      adc    %0, %0, %|r1, lsr #16"                                  \
00250           : "=&r" (xh), "=r" (xl)                              \
00251           : "r" (a), "r" (b)                                          \
00252           : "r0", "r1", "r2")
00253 #define UMUL_TIME 20
00254 #endif
00255 #define UDIV_TIME 100
00256 #endif /* __arm__ */
00257 
00258 #if defined (__clipper__) && W_TYPE_SIZE == 32
00259 #define umul_ppmm(w1, w0, u, v) \
00260   ({union {UDItype __ll;                                       \
00261           struct {USItype __l, __h;} __i;                      \
00262          } __x;                                                \
00263   __asm__ ("mulwux %2,%0"                                      \
00264           : "=r" (__x.__ll)                                    \
00265           : "%0" ((USItype)(u)), "r" ((USItype)(v)));                 \
00266   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
00267 #define smul_ppmm(w1, w0, u, v) \
00268   ({union {DItype __ll;                                               \
00269           struct {SItype __l, __h;} __i;                       \
00270          } __x;                                                \
00271   __asm__ ("mulwx %2,%0"                                       \
00272           : "=r" (__x.__ll)                                    \
00273           : "%0" ((SItype)(u)), "r" ((SItype)(v)));                   \
00274   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
00275 #define __umulsidi3(u, v) \
00276   ({UDItype __w;                                               \
00277     __asm__ ("mulwux %2,%0"                                    \
00278             : "=r" (__w) : "%0" ((USItype)(u)), "r" ((USItype)(v)));  \
00279     __w; })
00280 #endif /* __clipper__ */
00281 
00282 /* Fujitsu vector computers.  */
00283 #if defined (__uxp__) && W_TYPE_SIZE == 32
00284 #define umul_ppmm(ph, pl, u, v) \
00285   do {                                                         \
00286     union {UDItype __ll;                                       \
00287           struct {USItype __h, __l;} __i;                      \
00288          } __x;                                                \
00289     __asm__ ("mult.lu %1,%2,%0"    : "=r" (__x.__ll) : "%r" (u), "rK" (v));\
00290     (ph) = __x.__i.__h;                                               \
00291     (pl) = __x.__i.__l;                                               \
00292   } while (0)
00293 #define smul_ppmm(ph, pl, u, v) \
00294   do {                                                         \
00295     union {UDItype __ll;                                       \
00296           struct {USItype __h, __l;} __i;                      \
00297          } __x;                                                \
00298     __asm__ ("mult.l %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v));      \
00299     (ph) = __x.__i.__h;                                               \
00300     (pl) = __x.__i.__l;                                               \
00301   } while (0)
00302 #endif
00303 
00304 #if defined (__gmicro__) && W_TYPE_SIZE == 32
00305 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00306   __asm__ ("add.w %5,%1\n\taddx %3,%0"                                \
00307           : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl))        \
00308           : "%0" ((USItype)(ah)), "g" ((USItype)(bh)),                \
00309             "%1" ((USItype)(al)), "g" ((USItype)(bl)))
00310 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00311   __asm__ ("sub.w %5,%1\n\tsubx %3,%0"                                \
00312           : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl))        \
00313           : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                 \
00314             "1" ((USItype)(al)), "g" ((USItype)(bl)))
00315 #define umul_ppmm(ph, pl, m0, m1) \
00316   __asm__ ("mulx %3,%0,%1"                                     \
00317           : "=g" ((USItype)(ph)), "=r" ((USItype)(pl))                \
00318           : "%0" ((USItype)(m0)), "g" ((USItype)(m1)))
00319 #define udiv_qrnnd(q, r, nh, nl, d) \
00320   __asm__ ("divx %4,%0,%1"                                     \
00321           : "=g" ((USItype)(q)), "=r" ((USItype)(r))                  \
00322           : "1" ((USItype)(nh)), "0" ((USItype)(nl)), "g" ((USItype)(d)))
00323 #define count_leading_zeros(count, x) \
00324   __asm__ ("bsch/1 %1,%0"                                      \
00325           : "=g" (count) : "g" ((USItype)(x)), "0" ((USItype)0))
00326 #endif
00327 
00328 #if defined (__hppa) && W_TYPE_SIZE == 32
00329 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00330   __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0"                            \
00331           : "=r" (sh), "=&r" (sl)                              \
00332           : "%rM" (ah), "rM" (bh), "%rM" (al), "rM" (bl))
00333 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00334   __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0"                            \
00335           : "=r" (sh), "=&r" (sl)                              \
00336           : "rM" (ah), "rM" (bh), "rM" (al), "rM" (bl))
00337 #if defined (_PA_RISC1_1)
00338 #define umul_ppmm(wh, wl, u, v) \
00339   do {                                                         \
00340     union {UDItype __ll;                                       \
00341           struct {USItype __h, __l;} __i;                      \
00342          } __x;                                                \
00343     __asm__ ("xmpyu %1,%2,%0" : "=*f" (__x.__ll) : "*f" (u), "*f" (v));      \
00344     (wh) = __x.__i.__h;                                               \
00345     (wl) = __x.__i.__l;                                               \
00346   } while (0)
00347 #define UMUL_TIME 8
00348 #define UDIV_TIME 60
00349 #else
00350 #define UMUL_TIME 40
00351 #define UDIV_TIME 80
00352 #endif
00353 #define LONGLONG_STANDALONE /* <----------- PLTSCHEME: Avoid extern */
00354 #ifndef LONGLONG_STANDALONE
00355 #define udiv_qrnnd(q, r, n1, n0, d) \
00356   do { USItype __r;                                            \
00357     (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d));                  \
00358     (r) = __r;                                                        \
00359   } while (0)
00360 extern USItype __MPN(udiv_qrnnd) _PROTO ((USItype *, USItype, USItype, USItype));
00361 #endif /* LONGLONG_STANDALONE */
00362 #define count_leading_zeros(count, x) \
00363   do {                                                         \
00364     USItype __tmp;                                             \
00365     __asm__ (                                                  \
00366        "ldi          1,%0\n"                                          \
00367 "      extru,=              %1,15,16,%%r0 ; Bits 31..16 zero?\n"             \
00368 "      extru,tr      %1,15,16,%1   ; No.  Shift down, skip add.\n"    \
00369 "      ldo           16(%0),%0     ; Yes.  Perform add.\n"            \
00370 "      extru,=              %1,23,8,%%r0  ; Bits 15..8 zero?\n"              \
00371 "      extru,tr      %1,23,8,%1    ; No.  Shift down, skip add.\n"    \
00372 "      ldo           8(%0),%0      ; Yes.  Perform add.\n"            \
00373 "      extru,=              %1,27,4,%%r0  ; Bits 7..4 zero?\n"        \
00374 "      extru,tr      %1,27,4,%1    ; No.  Shift down, skip add.\n"    \
00375 "      ldo           4(%0),%0      ; Yes.  Perform add.\n"            \
00376 "      extru,=              %1,29,2,%%r0  ; Bits 3..2 zero?\n"        \
00377 "      extru,tr      %1,29,2,%1    ; No.  Shift down, skip add.\n"    \
00378 "      ldo           2(%0),%0      ; Yes.  Perform add.\n"            \
00379 "      extru         %1,30,1,%1    ; Extract bit 1.\n"         \
00380 "      sub           %0,%1,%0      ; Subtract it.\n"           \
00381        : "=r" (count), "=r" (__tmp) : "1" (x));                \
00382   } while (0)
00383 #endif /* hppa */
00384 
00385 #if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
00386 #define smul_ppmm(xh, xl, m0, m1) \
00387   do {                                                         \
00388     union {DItype __ll;                                               \
00389           struct {USItype __h, __l;} __i;                      \
00390          } __x;                                                \
00391     __asm__ ("mr %0,%3"                                               \
00392             : "=r" (__x.__i.__h), "=r" (__x.__i.__l)                  \
00393             : "%1" (m0), "r" (m1));                                   \
00394     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                           \
00395   } while (0)
00396 #define sdiv_qrnnd(q, r, n1, n0, d) \
00397   do {                                                         \
00398     union {DItype __ll;                                               \
00399           struct {USItype __h, __l;} __i;                      \
00400          } __x;                                                \
00401     __x.__i.__h = n1; __x.__i.__l = n0;                               \
00402     __asm__ ("dr %0,%2"                                               \
00403             : "=r" (__x.__ll)                                         \
00404             : "0" (__x.__ll), "r" (d));                        \
00405     (q) = __x.__i.__l; (r) = __x.__i.__h;                      \
00406   } while (0)
00407 #endif
00408 
00409 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
00410 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00411   __asm__ ("addl %5,%1\n\tadcl %3,%0"                                 \
00412           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00413           : "%0" ((USItype)(ah)), "g" ((USItype)(bh)),                \
00414             "%1" ((USItype)(al)), "g" ((USItype)(bl)))
00415 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00416   __asm__ ("subl %5,%1\n\tsbbl %3,%0"                                 \
00417           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00418           : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                 \
00419             "1" ((USItype)(al)), "g" ((USItype)(bl)))
00420 #define umul_ppmm(w1, w0, u, v) \
00421   __asm__ ("mull %3"                                           \
00422           : "=a" (w0), "=d" (w1)                               \
00423           : "%0" ((USItype)(u)), "rm" ((USItype)(v)))
00424 #define udiv_qrnnd(q, r, n1, n0, d) \
00425   __asm__ ("divl %4"                                           \
00426           : "=a" (q), "=d" (r)                                        \
00427           : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "rm" ((USItype)(d)))
00428 #define count_leading_zeros(count, x) \
00429   do {                                                         \
00430     USItype __cbtmp;                                           \
00431     __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x)));    \
00432     (count) = __cbtmp ^ 31;                                    \
00433   } while (0)
00434 #define count_trailing_zeros(count, x) \
00435   __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
00436 #ifndef UMUL_TIME
00437 #define UMUL_TIME 10
00438 #endif
00439 #ifndef UDIV_TIME
00440 #define UDIV_TIME 40
00441 #endif
00442 #endif /* 80x86 */
00443 
00444 #if defined (__i860__) && W_TYPE_SIZE == 32
00445 #define rshift_rhlc(r,h,l,c) \
00446   __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0"                       \
00447           "=r" (r) : "r" (h), "r" (l), "rn" (c))
00448 #endif /* i860 */
00449 
00450 #if defined (__i960__) && W_TYPE_SIZE == 32
00451 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00452   __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0"                   \
00453           : "=r" (sh), "=&r" (sl)                              \
00454           : "%dI" (ah), "dI" (bh), "%dI" (al), "dI" (bl))
00455 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00456   __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0"                   \
00457           : "=r" (sh), "=&r" (sl)                              \
00458           : "dI" (ah), "dI" (bh), "dI" (al), "dI" (bl))
00459 #define umul_ppmm(w1, w0, u, v) \
00460   ({union {UDItype __ll;                                       \
00461           struct {USItype __l, __h;} __i;                      \
00462          } __x;                                                \
00463   __asm__ ("emul %2,%1,%0"                                     \
00464           : "=d" (__x.__ll) : "%dI" (u), "dI" (v));                   \
00465   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
00466 #define __umulsidi3(u, v) \
00467   ({UDItype __w;                                               \
00468     __asm__ ("emul %2,%1,%0" : "=d" (__w) : "%dI" (u), "dI" (v));     \
00469     __w; })
00470 #define udiv_qrnnd(q, r, nh, nl, d) \
00471   do {                                                         \
00472     union {UDItype __ll;                                       \
00473           struct {USItype __l, __h;} __i;                      \
00474          } __nn;                                               \
00475     __nn.__i.__h = (nh); __nn.__i.__l = (nl);                         \
00476     __asm__ ("ediv %d,%n,%0"                                          \
00477           : "=d" (__rq.__ll) : "dI" (__nn.__ll), "dI" (d));           \
00478     (r) = __rq.__i.__l; (q) = __rq.__i.__h;                           \
00479   } while (0)
00480 #define count_leading_zeros(count, x) \
00481   do {                                                         \
00482     USItype __cbtmp;                                           \
00483     __asm__ ("scanbit %1,%0" : "=r" (__cbtmp) : "r" (x));             \
00484     (count) = __cbtmp ^ 31;                                    \
00485   } while (0)
00486 #define COUNT_LEADING_ZEROS_0 (-32) /* sic */
00487 #if defined (__i960mx)             /* what is the proper symbol to test??? */
00488 #define rshift_rhlc(r,h,l,c) \
00489   do {                                                         \
00490     union {UDItype __ll;                                       \
00491           struct {USItype __l, __h;} __i;                      \
00492          } __nn;                                               \
00493     __nn.__i.__h = (h); __nn.__i.__l = (l);                           \
00494     __asm__ ("shre %2,%1,%0" : "=d" (r) : "dI" (__nn.__ll), "dI" (c));       \
00495   }
00496 #endif /* i960mx */
00497 #endif /* i960 */
00498 
00499 #if (defined (__mc68000__) || defined (__mc68020__) || defined(mc68020) \
00500      || defined (__m68k__) || defined (__mc5200__) || defined (__mc5206e__) \
00501      || defined (__mc5307__)) && W_TYPE_SIZE == 32
00502 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00503   __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0"                            \
00504           : "=d" ((USItype)(sh)), "=&d" ((USItype)(sl))        \
00505           : "%0" ((USItype)(ah)), "d" ((USItype)(bh)),                \
00506             "%1" ((USItype)(al)), "g" ((USItype)(bl)))
00507 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00508   __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0"                            \
00509           : "=d" ((USItype)(sh)), "=&d" ((USItype)(sl))        \
00510           : "0" ((USItype)(ah)), "d" ((USItype)(bh)),                 \
00511             "1" ((USItype)(al)), "g" ((USItype)(bl)))
00512 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r.  */
00513 #if defined (__mc68020__) || defined(mc68020) \
00514      || defined (__mc68030__) || defined (mc68030) \
00515      || defined (__mc68040__) || defined (mc68040) \
00516      || defined (__mc68332__) || defined (mc68332) \
00517      || defined (__NeXT__)
00518 #define umul_ppmm(w1, w0, u, v) \
00519   __asm__ ("mulu%.l %3,%1:%0"                                         \
00520           : "=d" ((USItype)(w0)), "=d" ((USItype)(w1))                \
00521           : "%0" ((USItype)(u)), "dmi" ((USItype)(v)))
00522 #define UMUL_TIME 45
00523 #define udiv_qrnnd(q, r, n1, n0, d) \
00524   __asm__ ("divu%.l %4,%1:%0"                                         \
00525           : "=d" ((USItype)(q)), "=d" ((USItype)(r))                  \
00526           : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
00527 #define UDIV_TIME 90
00528 #define sdiv_qrnnd(q, r, n1, n0, d) \
00529   __asm__ ("divs%.l %4,%1:%0"                                         \
00530           : "=d" ((USItype)(q)), "=d" ((USItype)(r))                  \
00531           : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
00532 #else /* for other 68k family members use 16x16->32 multiplication */
00533 #define umul_ppmm(xh, xl, a, b) \
00534   do { USItype __umul_tmp1, __umul_tmp2;                       \
00535        __asm__ ("| Inlined umul_ppmm\n"                        \
00536 "      move%.l       %5,%3\n"                                         \
00537 "      move%.l       %2,%0\n"                                         \
00538 "      move%.w       %3,%1\n"                                         \
00539 "      swap   %3\n"                                            \
00540 "      swap   %0\n"                                            \
00541 "      mulu%.w       %2,%1\n"                                         \
00542 "      mulu%.w       %3,%0\n"                                         \
00543 "      mulu%.w       %2,%3\n"                                         \
00544 "      swap   %2\n"                                            \
00545 "      mulu%.w       %5,%2\n"                                         \
00546 "      add%.l %3,%2\n"                                         \
00547 "      jcc    1f\n"                                            \
00548 "      add%.l %#0x10000,%0\n"                                         \
00549 "1:    move%.l       %2,%3\n"                                         \
00550 "      clr%.w %2\n"                                            \
00551 "      swap   %2\n"                                            \
00552 "      swap   %3\n"                                            \
00553 "      clr%.w %3\n"                                            \
00554 "      add%.l %3,%1\n"                                         \
00555 "      addx%.l       %2,%0\n"                                         \
00556 "      | End inlined umul_ppmm"                                \
00557              : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)),          \
00558               "=d" (__umul_tmp1), "=&d" (__umul_tmp2)                 \
00559              : "%2" ((USItype)(a)), "d" ((USItype)(b)));              \
00560   } while (0)
00561 #define UMUL_TIME 100
00562 #define UDIV_TIME 400
00563 #endif /* not mc68020 */
00564 /* The '020, '030, '040 and '060 have bitfield insns.  */
00565 #if defined (__mc68020__) || defined (mc68020) \
00566      || defined (__mc68030__) || defined (mc68030) \
00567      || defined (__mc68040__) || defined (mc68040) \
00568      || defined (__mc68060__) || defined (mc68060) \
00569      || defined (__NeXT__)
00570 #define count_leading_zeros(count, x) \
00571   __asm__ ("bfffo %1{%b2:%b2},%0"                              \
00572           : "=d" ((USItype) (count))                                  \
00573           : "od" ((USItype) (x)), "n" (0))
00574 #define COUNT_LEADING_ZEROS_0 32
00575 #endif
00576 #endif /* mc68000 */
00577 
00578 #if defined (__m88000__) && W_TYPE_SIZE == 32
00579 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00580   __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3"                 \
00581           : "=r" (sh), "=&r" (sl)                              \
00582           : "%rJ" (ah), "rJ" (bh), "%rJ" (al), "rJ" (bl))
00583 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00584   __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3"                 \
00585           : "=r" (sh), "=&r" (sl)                              \
00586           : "rJ" (ah), "rJ" (bh), "rJ" (al), "rJ" (bl))
00587 #define count_leading_zeros(count, x) \
00588   do {                                                         \
00589     USItype __cbtmp;                                           \
00590     __asm__ ("ff1 %0,%1" : "=r" (__cbtmp) : "r" (x));                 \
00591     (count) = __cbtmp ^ 31;                                    \
00592   } while (0)
00593 #define COUNT_LEADING_ZEROS_0 63 /* sic */
00594 #if defined (__m88110__)
00595 #define umul_ppmm(wh, wl, u, v) \
00596   do {                                                         \
00597     union {UDItype __ll;                                       \
00598           struct {USItype __h, __l;} __i;                      \
00599          } __x;                                                \
00600     __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
00601     (wh) = __x.__i.__h;                                               \
00602     (wl) = __x.__i.__l;                                               \
00603   } while (0)
00604 #define udiv_qrnnd(q, r, n1, n0, d) \
00605   ({union {UDItype __ll;                                       \
00606           struct {USItype __h, __l;} __i;                      \
00607          } __x, __q;                                           \
00608   __x.__i.__h = (n1); __x.__i.__l = (n0);                      \
00609   __asm__ ("divu.d %0,%1,%2"                                          \
00610           : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d));        \
00611   (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
00612 #define UMUL_TIME 5
00613 #define UDIV_TIME 25
00614 #else
00615 #define UMUL_TIME 17
00616 #define UDIV_TIME 150
00617 #endif /* __m88110__ */
00618 #endif /* __m88000__ */
00619 
00620 #if defined (__mips) && W_TYPE_SIZE == 32
00621 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
00622 #define umul_ppmm(w1, w0, u, v) \
00623   __asm__ ("multu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
00624 #else
00625 #define umul_ppmm(w1, w0, u, v) \
00626   __asm__ ("multu %2,%3\n\tmflo %0\n\tmfhi %1"                        \
00627           : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
00628 #endif
00629 #define UMUL_TIME 10
00630 #define UDIV_TIME 100
00631 #endif /* __mips */
00632 
00633 #if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
00634 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
00635 #define umul_ppmm(w1, w0, u, v) \
00636   __asm__ ("dmultu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
00637 #else
00638 #define umul_ppmm(w1, w0, u, v) \
00639   __asm__ ("dmultu %2,%3\n\tmflo %0\n\tmfhi %1"                       \
00640           : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
00641 #endif
00642 #define UMUL_TIME 20
00643 #define UDIV_TIME 140
00644 #endif /* __mips */
00645 
00646 #if defined (__ns32000__) && W_TYPE_SIZE == 32
00647 #define umul_ppmm(w1, w0, u, v) \
00648   ({union {UDItype __ll;                                       \
00649           struct {USItype __l, __h;} __i;                      \
00650          } __x;                                                \
00651   __asm__ ("meid %2,%0"                                               \
00652           : "=g" (__x.__ll)                                    \
00653           : "%0" ((USItype)(u)), "g" ((USItype)(v)));                 \
00654   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
00655 #define __umulsidi3(u, v) \
00656   ({UDItype __w;                                               \
00657     __asm__ ("meid %2,%0"                                      \
00658             : "=g" (__w)                                       \
00659             : "%0" ((USItype)(u)), "g" ((USItype)(v)));        \
00660     __w; })
00661 #define udiv_qrnnd(q, r, n1, n0, d) \
00662   ({union {UDItype __ll;                                       \
00663           struct {USItype __l, __h;} __i;                      \
00664          } __x;                                                \
00665   __x.__i.__h = (n1); __x.__i.__l = (n0);                      \
00666   __asm__ ("deid %2,%0"                                               \
00667           : "=g" (__x.__ll)                                    \
00668           : "0" (__x.__ll), "g" ((USItype)(d)));               \
00669   (r) = __x.__i.__l; (q) = __x.__i.__h; })
00670 #define count_trailing_zeros(count,x) \
00671   do {                                                         \
00672     __asm__ ("ffsd   %2,%0"                                    \
00673             : "=r" ((USItype) (count))                                \
00674             : "0" ((USItype) 0), "r" ((USItype) (x)));                \
00675   } while (0)
00676 #endif /* __ns32000__ */
00677 
00678 /* We should test _IBMR2 here when we add assembly support for the system
00679    vendor compilers.  */
00680 #if (defined (_ARCH_PPC) || defined (_ARCH_PWR) || defined (__powerpc__)) && W_TYPE_SIZE == 32
00681 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00682   do {                                                         \
00683     if (__builtin_constant_p (bh) && (bh) == 0)                       \
00684       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"         \
00685             : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
00686     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)              \
00687       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"         \
00688             : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
00689     else                                                       \
00690       __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"        \
00691             : "=r" (sh), "=&r" (sl)                                   \
00692             : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));             \
00693   } while (0)
00694 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00695   do {                                                         \
00696     if (__builtin_constant_p (ah) && (ah) == 0)                       \
00697       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"     \
00698               : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
00699     else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0)              \
00700       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"     \
00701               : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
00702     else if (__builtin_constant_p (bh) && (bh) == 0)                  \
00703       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"              \
00704               : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
00705     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)              \
00706       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"              \
00707               : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
00708     else                                                       \
00709       __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"    \
00710               : "=r" (sh), "=&r" (sl)                                 \
00711               : "r" (ah), "r" (bh), "rI" (al), "r" (bl));             \
00712   } while (0)
00713 #define count_leading_zeros(count, x) \
00714   __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
00715 #define COUNT_LEADING_ZEROS_0 32
00716 #if defined (_ARCH_PPC) || defined (__powerpc__)
00717 #define umul_ppmm(ph, pl, m0, m1) \
00718   do {                                                         \
00719     USItype __m0 = (m0), __m1 = (m1);                                 \
00720     __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));    \
00721     (pl) = __m0 * __m1;                                               \
00722   } while (0)
00723 #define UMUL_TIME 15
00724 #define smul_ppmm(ph, pl, m0, m1) \
00725   do {                                                         \
00726     SItype __m0 = (m0), __m1 = (m1);                                  \
00727     __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));     \
00728     (pl) = __m0 * __m1;                                               \
00729   } while (0)
00730 #define SMUL_TIME 14
00731 #define UDIV_TIME 120
00732 #else
00733 #define UMUL_TIME 8
00734 #define smul_ppmm(xh, xl, m0, m1) \
00735   __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
00736 #define SMUL_TIME 4
00737 #define sdiv_qrnnd(q, r, nh, nl, d) \
00738   __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
00739 #define UDIV_TIME 100
00740 #endif
00741 #endif /* 32-bit POWER architecture variants.  */
00742 
00743 /* We should test _IBMR2 here when we add assembly support for the system
00744    vendor compilers.  */
00745 #if (defined (_ARCH_PPC) || defined (__powerpc__)) && W_TYPE_SIZE == 64
00746 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00747   do {                                                         \
00748     if (__builtin_constant_p (bh) && (bh) == 0)                       \
00749       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"         \
00750             : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
00751     else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)              \
00752       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"         \
00753             : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
00754     else                                                       \
00755       __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"        \
00756             : "=r" (sh), "=&r" (sl)                                   \
00757             : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));             \
00758   } while (0)
00759 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00760   do {                                                         \
00761     if (__builtin_constant_p (ah) && (ah) == 0)                       \
00762       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"     \
00763               : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
00764     else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0)              \
00765       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"     \
00766               : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
00767     else if (__builtin_constant_p (bh) && (bh) == 0)                  \
00768       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"              \
00769               : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
00770     else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)              \
00771       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"              \
00772               : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
00773     else                                                       \
00774       __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"    \
00775               : "=r" (sh), "=&r" (sl)                                 \
00776               : "r" (ah), "r" (bh), "rI" (al), "r" (bl));             \
00777   } while (0)
00778 #define count_leading_zeros(count, x) \
00779   __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
00780 #define COUNT_LEADING_ZEROS_0 64
00781 #define umul_ppmm(ph, pl, m0, m1) \
00782   do {                                                         \
00783     UDItype __m0 = (m0), __m1 = (m1);                                 \
00784     __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));    \
00785     (pl) = __m0 * __m1;                                               \
00786   } while (0)
00787 #define UMUL_TIME 15
00788 #define smul_ppmm(ph, pl, m0, m1) \
00789   do {                                                         \
00790     DItype __m0 = (m0), __m1 = (m1);                                  \
00791     __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));     \
00792     (pl) = __m0 * __m1;                                               \
00793   } while (0)
00794 #define SMUL_TIME 14  /* ??? */
00795 #define UDIV_TIME 120 /* ??? */
00796 #endif /* 64-bit PowerPC.  */
00797 
00798 #if defined (__pyr__) && W_TYPE_SIZE == 32
00799 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00800   __asm__ ("addw %5,%1\n\taddwc %3,%0"                                \
00801           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00802           : "%0" ((USItype)(ah)), "g" ((USItype)(bh)),                \
00803             "%1" ((USItype)(al)), "g" ((USItype)(bl)))
00804 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00805   __asm__ ("subw %5,%1\n\tsubwb %3,%0"                                \
00806           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00807           : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                 \
00808             "1" ((USItype)(al)), "g" ((USItype)(bl)))
00809 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP.  */
00810 #define umul_ppmm(w1, w0, u, v) \
00811   ({union {UDItype __ll;                                       \
00812           struct {USItype __h, __l;} __i;                      \
00813          } __x;                                                \
00814   __asm__ ("movw %1,%R0\n\tuemul %2,%0"                               \
00815           : "=&r" (__x.__ll)                                          \
00816           : "g" ((USItype) (u)), "g" ((USItype)(v)));                 \
00817   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
00818 #endif /* __pyr__ */
00819 
00820 #if defined (__ibm032__) /* RT/ROMP */  && W_TYPE_SIZE == 32
00821 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00822   __asm__ ("a %1,%5\n\tae %0,%3"                               \
00823           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00824           : "%0" ((USItype)(ah)), "r" ((USItype)(bh)),                \
00825             "%1" ((USItype)(al)), "r" ((USItype)(bl)))
00826 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00827   __asm__ ("s %1,%5\n\tse %0,%3"                               \
00828           : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl))        \
00829           : "0" ((USItype)(ah)), "r" ((USItype)(bh)),                 \
00830             "1" ((USItype)(al)), "r" ((USItype)(bl)))
00831 #define smul_ppmm(ph, pl, m0, m1) \
00832   __asm__ (                                                    \
00833        "s     r2,r2\n"                                         \
00834 "      mts r10,%2\n"                                           \
00835 "      m      r2,%3\n"                                         \
00836 "      m      r2,%3\n"                                         \
00837 "      m      r2,%3\n"                                         \
00838 "      m      r2,%3\n"                                         \
00839 "      m      r2,%3\n"                                         \
00840 "      m      r2,%3\n"                                         \
00841 "      m      r2,%3\n"                                         \
00842 "      m      r2,%3\n"                                         \
00843 "      m      r2,%3\n"                                         \
00844 "      m      r2,%3\n"                                         \
00845 "      m      r2,%3\n"                                         \
00846 "      m      r2,%3\n"                                         \
00847 "      m      r2,%3\n"                                         \
00848 "      m      r2,%3\n"                                         \
00849 "      m      r2,%3\n"                                         \
00850 "      m      r2,%3\n"                                         \
00851 "      cas    %0,r2,r0\n"                                      \
00852 "      mfs    r10,%1"                                                 \
00853           : "=r" ((USItype)(ph)), "=r" ((USItype)(pl))                \
00854           : "%r" ((USItype)(m0)), "r" ((USItype)(m1))                 \
00855           : "r2")
00856 #define UMUL_TIME 20
00857 #define UDIV_TIME 200
00858 #define count_leading_zeros(count, x) \
00859   do {                                                         \
00860     if ((x) >= 0x10000)                                               \
00861       __asm__ ("clz  %0,%1"                                    \
00862               : "=r" ((USItype)(count)) : "r" ((USItype)(x) >> 16));  \
00863     else                                                       \
00864       {                                                               \
00865        __asm__ ("clz %0,%1"                                    \
00866                : "=r" ((USItype)(count)) : "r" ((USItype)(x)));       \
00867        (count) += 16;                                                 \
00868       }                                                               \
00869   } while (0)
00870 #endif /* RT/ROMP */
00871 
00872 #if defined (__sh2__) && W_TYPE_SIZE == 32
00873 #define umul_ppmm(w1, w0, u, v) \
00874   __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0"              \
00875           : "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "macl", "mach")
00876 #define UMUL_TIME 5
00877 #endif
00878 
00879 #if defined (__sparc__) && W_TYPE_SIZE == 32
00880 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
00881   __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0"                        \
00882           : "=r" (sh), "=&r" (sl)                              \
00883           : "%rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl)        \
00884           __CLOBBER_CC)
00885 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
00886   __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0"                        \
00887           : "=r" (sh), "=&r" (sl)                              \
00888           : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl)  \
00889           __CLOBBER_CC)
00890 #if defined (__sparc_v9__) || defined (__sparcv9)
00891 /* Perhaps we should use floating-point operations here?  */
00892 #if 0
00893 /* Triggers a bug making mpz/tests/t-gcd.c fail.
00894    Perhaps we simply need explicitly zero-extend the inputs?  */
00895 #define umul_ppmm(w1, w0, u, v) \
00896   __asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" :        \
00897           "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
00898 #else
00899 /* Use v8 umul until above bug is fixed.  */
00900 #define umul_ppmm(w1, w0, u, v) \
00901   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
00902 #endif
00903 /* Use a plain v8 divide for v9.  */
00904 #define udiv_qrnnd(q, r, n1, n0, d) \
00905   do {                                                         \
00906     USItype __q;                                               \
00907     __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0"                   \
00908             : "=r" (__q) : "r" (n1), "r" (n0), "r" (d));              \
00909     (r) = (n0) - __q * (d);                                    \
00910     (q) = __q;                                                        \
00911   } while (0)
00912 #else
00913 #if defined (__sparc_v8__)
00914 /* Don't match immediate range because, 1) it is not often useful,
00915    2) the 'I' flag thinks of the range as a 13 bit signed interval,
00916    while we want to match a 13 bit interval, sign extended to 32 bits,
00917    but INTERPRETED AS UNSIGNED.  */
00918 #define umul_ppmm(w1, w0, u, v) \
00919   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
00920 #define UMUL_TIME 5
00921 #ifndef SUPERSPARC   /* SuperSPARC's udiv only handles 53 bit dividends */
00922 #define udiv_qrnnd(q, r, n1, n0, d) \
00923   do {                                                         \
00924     USItype __q;                                               \
00925     __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0"                   \
00926             : "=r" (__q) : "r" (n1), "r" (n0), "r" (d));              \
00927     (r) = (n0) - __q * (d);                                    \
00928     (q) = __q;                                                        \
00929   } while (0)
00930 #define UDIV_TIME 25
00931 #else
00932 #define UDIV_TIME 60        /* SuperSPARC timing */
00933 #endif /* SUPERSPARC */
00934 #else /* ! __sparc_v8__ */
00935 #if defined (__sparclite__)
00936 /* This has hardware multiply but not divide.  It also has two additional
00937    instructions scan (ffs from high bit) and divscc.  */
00938 #define umul_ppmm(w1, w0, u, v) \
00939   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
00940 #define UMUL_TIME 5
00941 #define udiv_qrnnd(q, r, n1, n0, d) \
00942   __asm__ ("! Inlined udiv_qrnnd\n"                                   \
00943 "      wr     %%g0,%2,%%y   ! Not a delayed write for sparclite\n"    \
00944 "      tst    %%g0\n"                                                 \
00945 "      divscc %3,%4,%%g1\n"                                    \
00946 "      divscc %%g1,%4,%%g1\n"                                         \
00947 "      divscc %%g1,%4,%%g1\n"                                         \
00948 "      divscc %%g1,%4,%%g1\n"                                         \
00949 "      divscc %%g1,%4,%%g1\n"                                         \
00950 "      divscc %%g1,%4,%%g1\n"                                         \
00951 "      divscc %%g1,%4,%%g1\n"                                         \
00952 "      divscc %%g1,%4,%%g1\n"                                         \
00953 "      divscc %%g1,%4,%%g1\n"                                         \
00954 "      divscc %%g1,%4,%%g1\n"                                         \
00955 "      divscc %%g1,%4,%%g1\n"                                         \
00956 "      divscc %%g1,%4,%%g1\n"                                         \
00957 "      divscc %%g1,%4,%%g1\n"                                         \
00958 "      divscc %%g1,%4,%%g1\n"                                         \
00959 "      divscc %%g1,%4,%%g1\n"                                         \
00960 "      divscc %%g1,%4,%%g1\n"                                         \
00961 "      divscc %%g1,%4,%%g1\n"                                         \
00962 "      divscc %%g1,%4,%%g1\n"                                         \
00963 "      divscc %%g1,%4,%%g1\n"                                         \
00964 "      divscc %%g1,%4,%%g1\n"                                         \
00965 "      divscc %%g1,%4,%%g1\n"                                         \
00966 "      divscc %%g1,%4,%%g1\n"                                         \
00967 "      divscc %%g1,%4,%%g1\n"                                         \
00968 "      divscc %%g1,%4,%%g1\n"                                         \
00969 "      divscc %%g1,%4,%%g1\n"                                         \
00970 "      divscc %%g1,%4,%%g1\n"                                         \
00971 "      divscc %%g1,%4,%%g1\n"                                         \
00972 "      divscc %%g1,%4,%%g1\n"                                         \
00973 "      divscc %%g1,%4,%%g1\n"                                         \
00974 "      divscc %%g1,%4,%%g1\n"                                         \
00975 "      divscc %%g1,%4,%%g1\n"                                         \
00976 "      divscc %%g1,%4,%0\n"                                    \
00977 "      rd     %%y,%1\n"                                        \
00978 "      bl,a 1f\n"                                              \
00979 "      add    %1,%4,%1\n"                                      \
00980 "1:    ! End of inline udiv_qrnnd"                             \
00981           : "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d)         \
00982           : "%g1" __AND_CLOBBER_CC)
00983 #define UDIV_TIME 37
00984 #define count_leading_zeros(count, x) \
00985   __asm__ ("scan %1,0,%0" : "=r" (x) : "r" (count))
00986 /* Early sparclites return 63 for an argument of 0, but they warn that future
00987    implementations might change this.  Therefore, leave COUNT_LEADING_ZEROS_0
00988    undefined.  */
00989 #endif /* __sparclite__ */
00990 #endif /* __sparc_v8__ */
00991 #endif /* __sparc_v9__ */
00992 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd.  */
00993 #ifndef umul_ppmm
00994 #define umul_ppmm(w1, w0, u, v) \
00995   __asm__ ("! Inlined umul_ppmm\n"                             \
00996 "      wr     %%g0,%2,%%y   ! SPARC has 0-3 delay insn after a wr\n" \
00997 "      sra    %3,31,%%g2    ! Don't move this insn\n"          \
00998 "      and    %2,%%g2,%%g2  ! Don't move this insn\n"          \
00999 "      andcc  %%g0,0,%%g1   ! Don't move this insn\n"          \
01000 "      mulscc %%g1,%3,%%g1\n"                                         \
01001 "      mulscc %%g1,%3,%%g1\n"                                         \
01002 "      mulscc %%g1,%3,%%g1\n"                                         \
01003 "      mulscc %%g1,%3,%%g1\n"                                         \
01004 "      mulscc %%g1,%3,%%g1\n"                                         \
01005 "      mulscc %%g1,%3,%%g1\n"                                         \
01006 "      mulscc %%g1,%3,%%g1\n"                                         \
01007 "      mulscc %%g1,%3,%%g1\n"                                         \
01008 "      mulscc %%g1,%3,%%g1\n"                                         \
01009 "      mulscc %%g1,%3,%%g1\n"                                         \
01010 "      mulscc %%g1,%3,%%g1\n"                                         \
01011 "      mulscc %%g1,%3,%%g1\n"                                         \
01012 "      mulscc %%g1,%3,%%g1\n"                                         \
01013 "      mulscc %%g1,%3,%%g1\n"                                         \
01014 "      mulscc %%g1,%3,%%g1\n"                                         \
01015 "      mulscc %%g1,%3,%%g1\n"                                         \
01016 "      mulscc %%g1,%3,%%g1\n"                                         \
01017 "      mulscc %%g1,%3,%%g1\n"                                         \
01018 "      mulscc %%g1,%3,%%g1\n"                                         \
01019 "      mulscc %%g1,%3,%%g1\n"                                         \
01020 "      mulscc %%g1,%3,%%g1\n"                                         \
01021 "      mulscc %%g1,%3,%%g1\n"                                         \
01022 "      mulscc %%g1,%3,%%g1\n"                                         \
01023 "      mulscc %%g1,%3,%%g1\n"                                         \
01024 "      mulscc %%g1,%3,%%g1\n"                                         \
01025 "      mulscc %%g1,%3,%%g1\n"                                         \
01026 "      mulscc %%g1,%3,%%g1\n"                                         \
01027 "      mulscc %%g1,%3,%%g1\n"                                         \
01028 "      mulscc %%g1,%3,%%g1\n"                                         \
01029 "      mulscc %%g1,%3,%%g1\n"                                         \
01030 "      mulscc %%g1,%3,%%g1\n"                                         \
01031 "      mulscc %%g1,%3,%%g1\n"                                         \
01032 "      mulscc %%g1,0,%%g1\n"                                          \
01033 "      add    %%g1,%%g2,%0\n"                                         \
01034 "      rd     %%y,%1"                                                 \
01035           : "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v)                 \
01036           : "%g1", "%g2" __AND_CLOBBER_CC)
01037 #define UMUL_TIME 39        /* 39 instructions */
01038 #endif
01039 #ifndef udiv_qrnnd
01040 #define LONGLONG_STANDALONE /* <----------- PLTSCHEME: Avoid extern */
01041 #ifndef LONGLONG_STANDALONE
01042 #define XXX_udiv_qrnnd(q, r, n1, n0, d) \
01043   do { USItype __r;                                            \
01044     (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d));                  \
01045     (r) = __r;                                                        \
01046   } while (0)
01047 extern USItype __MPN(udiv_qrnnd) _PROTO ((USItype *, USItype, USItype, USItype));
01048 #ifndef UDIV_TIME
01049 #define UDIV_TIME 140
01050 #endif
01051 #endif /* LONGLONG_STANDALONE */
01052 #endif /* udiv_qrnnd */
01053 #endif /* __sparc__ */
01054 
01055 #if defined (__vax__) && W_TYPE_SIZE == 32
01056 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
01057   __asm__ ("addl2 %5,%1\n\tadwc %3,%0"                                \
01058           : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl))        \
01059           : "%0" ((USItype)(ah)), "g" ((USItype)(bh)),                \
01060             "%1" ((USItype)(al)), "g" ((USItype)(bl)))
01061 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
01062   __asm__ ("subl2 %5,%1\n\tsbwc %3,%0"                                \
01063           : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl))        \
01064           : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                 \
01065             "1" ((USItype)(al)), "g" ((USItype)(bl)))
01066 #define smul_ppmm(xh, xl, m0, m1) \
01067   do {                                                         \
01068     union {UDItype __ll;                                       \
01069           struct {USItype __l, __h;} __i;                      \
01070          } __x;                                                \
01071     USItype __m0 = (m0), __m1 = (m1);                                 \
01072     __asm__ ("emul %1,%2,$0,%0"                                       \
01073             : "=g" (__x.__ll) : "g" (__m0), "g" (__m1));              \
01074     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                           \
01075   } while (0)
01076 #define sdiv_qrnnd(q, r, n1, n0, d) \
01077   do {                                                         \
01078     union {DItype __ll;                                               \
01079           struct {SItype __l, __h;} __i;                       \
01080          } __x;                                                \
01081     __x.__i.__h = n1; __x.__i.__l = n0;                               \
01082     __asm__ ("ediv %3,%2,%0,%1"                                       \
01083             : "=g" (q), "=g" (r) : "g" (__x.__ll), "g" (d));          \
01084   } while (0)
01085 #endif /* __vax__ */
01086 
01087 #if defined (__z8000__) && W_TYPE_SIZE == 16
01088 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
01089   __asm__ ("add      %H1,%H5\n\tadc       %H0,%H3"                           \
01090           : "=r" ((unsigned int)(sh)), "=&r" ((unsigned int)(sl))     \
01091           : "%0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)),      \
01092             "%1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
01093 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
01094   __asm__ ("sub      %H1,%H5\n\tsbc       %H0,%H3"                           \
01095           : "=r" ((unsigned int)(sh)), "=&r" ((unsigned int)(sl))     \
01096           : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)),       \
01097             "1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
01098 #define umul_ppmm(xh, xl, m0, m1) \
01099   do {                                                         \
01100     union {long int __ll;                                      \
01101           struct {unsigned int __h, __l;} __i;                        \
01102          } __x;                                                \
01103     unsigned int __m0 = (m0), __m1 = (m1);                            \
01104     __asm__ ("mult   %S0,%H3"                                  \
01105             : "=r" (__x.__i.__h), "=r" (__x.__i.__l)                  \
01106             : "%1" (m0), "rQR" (m1));                                 \
01107     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                           \
01108     (xh) += ((((signed int) __m0 >> 15) & __m1)                       \
01109             + (((signed int) __m1 >> 15) & __m0));                    \
01110   } while (0)
01111 #endif /* __z8000__ */
01112 
01113 #endif /* __GNUC__ */
01114 
01115 #if !defined (umul_ppmm) && defined (__umulsidi3)
01116 #define umul_ppmm(ph, pl, m0, m1) \
01117   {                                                            \
01118     UDWtype __ll = __umulsidi3 (m0, m1);                       \
01119     ph = (UWtype) (__ll >> W_TYPE_SIZE);                       \
01120     pl = (UWtype) __ll;                                               \
01121   }
01122 #endif
01123 
01124 #if !defined (__umulsidi3)
01125 #define __umulsidi3(u, v) \
01126   ({UWtype __hi, __lo;                                                \
01127     umul_ppmm (__hi, __lo, u, v);                              \
01128     ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
01129 #endif
01130 
01131 
01132 /* Note the prototypes are under !define(umul_ppmm) etc too, since the HPPA
01133    versions above are different and we don't want to conflict.  */
01134 
01135 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm
01136 #define mpn_umul_ppmm  __MPN(umul_ppmm)
01137 extern mp_limb_t mpn_umul_ppmm _PROTO ((mp_limb_t *, mp_limb_t, mp_limb_t));
01138 #define umul_ppmm(wh, wl, u, v)                                 \
01139   do {                                                          \
01140     mp_limb_t __umul_ppmm__p0;                                  \
01141     (wh) = __MPN(umul_ppmm) (&__umul_ppmm__p0,                  \
01142                              (mp_limb_t) (u), (mp_limb_t) (v)); \
01143     (wl) = __umul_ppmm__p0;                                     \
01144   } while (0)
01145 #endif
01146 
01147 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd
01148 #define mpn_udiv_qrnnd  __MPN(udiv_qrnnd)
01149 extern mp_limb_t mpn_udiv_qrnnd _PROTO ((mp_limb_t *,
01150                                          mp_limb_t, mp_limb_t, mp_limb_t));
01151 #define udiv_qrnnd(q, r, n1, n0, d)                                           \
01152   do {                                                                        \
01153     mp_limb_t __udiv_qrnnd__r;                                                \
01154     (q) = mpn_udiv_qrnnd (&__udiv_qrnnd__r,                                   \
01155                           (mp_limb_t) (n1), (mp_limb_t) (n0), (mp_limb_t) d); \
01156     (r) = __udiv_qrnnd__r;                                                    \
01157   } while (0)
01158 #endif
01159 
01160 #endif /* !MZ_GMP_NO_ASM */
01161 
01162 
01163 /* If this machine has no inline assembler, use C macros.  */
01164 
01165 #if !defined (add_ssaaaa)
01166 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
01167   do {                                                         \
01168     UWtype __x;                                                       \
01169     __x = (al) + (bl);                                                \
01170     (sh) = (ah) + (bh) + (__x < (al));                                \
01171     (sl) = __x;                                                       \
01172   } while (0)
01173 #endif
01174 
01175 #if !defined (sub_ddmmss)
01176 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
01177   do {                                                         \
01178     UWtype __x;                                                       \
01179     __x = (al) - (bl);                                                \
01180     (sh) = (ah) - (bh) - (__x > (al));                                \
01181     (sl) = __x;                                                       \
01182   } while (0)
01183 #endif
01184 
01185 /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
01186    smul_ppmm.  */
01187 #if !defined (umul_ppmm) && defined (smul_ppmm)
01188 #define umul_ppmm(w1, w0, u, v)                                       \
01189   do {                                                         \
01190     UWtype __w1;                                               \
01191     UWtype __xm0 = (u), __xm1 = (v);                                  \
01192     smul_ppmm (__w1, w0, __xm0, __xm1);                               \
01193     (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)             \
01194               + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);              \
01195   } while (0)
01196 #endif
01197 
01198 /* If we still don't have umul_ppmm, define it using plain C.  */
01199 #if !defined (umul_ppmm)
01200 #define umul_ppmm(w1, w0, u, v)                                       \
01201   do {                                                         \
01202     UWtype __x0, __x1, __x2, __x3;                             \
01203     UHWtype __ul, __vl, __uh, __vh;                                   \
01204     UWtype __u = (u), __v = (v);                               \
01205                                                                \
01206     __ul = __ll_lowpart (__u);                                        \
01207     __uh = __ll_highpart (__u);                                       \
01208     __vl = __ll_lowpart (__v);                                        \
01209     __vh = __ll_highpart (__v);                                       \
01210                                                                \
01211     __x0 = (UWtype) __ul * __vl;                               \
01212     __x1 = (UWtype) __ul * __vh;                               \
01213     __x2 = (UWtype) __uh * __vl;                               \
01214     __x3 = (UWtype) __uh * __vh;                               \
01215                                                                \
01216     __x1 += __ll_highpart (__x0);/* this can't give carry */          \
01217     __x1 += __x2;           /* but this indeed can */          \
01218     if (__x1 < __x2)        /* did we get it? */               \
01219       __x3 += __ll_B;              /* yes, add it in the proper pos. */      \
01220                                                                \
01221     (w1) = __x3 + __ll_highpart (__x1);                               \
01222     (w0) = (__x1 << W_TYPE_SIZE/2) + __ll_lowpart (__x0);             \
01223   } while (0)
01224 #endif
01225 
01226 /* If we don't have smul_ppmm, define it using umul_ppmm (which surely will
01227    exist in one form or another.  */
01228 #if !defined (smul_ppmm)
01229 #define smul_ppmm(w1, w0, u, v)                                       \
01230   do {                                                         \
01231     UWtype __w1;                                               \
01232     UWtype __xm0 = (u), __xm1 = (v);                                  \
01233     umul_ppmm (__w1, w0, __xm0, __xm1);                               \
01234     (w1) = __w1 - (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)             \
01235               - (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);              \
01236   } while (0)
01237 #endif
01238 
01239 /* Define this unconditionally, so it can be used for debugging.  */
01240 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
01241   do {                                                         \
01242     UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;                   \
01243     __d1 = __ll_highpart (d);                                         \
01244     __d0 = __ll_lowpart (d);                                          \
01245                                                                \
01246     __q1 = (n1) / __d1;                                               \
01247     __r1 = (n1) - __q1 * __d1;                                        \
01248     __m = (UWtype) __q1 * __d0;                                       \
01249     __r1 = __r1 * __ll_B | __ll_highpart (n0);                        \
01250     if (__r1 < __m)                                            \
01251       {                                                               \
01252        __q1--, __r1 += (d);                                    \
01253        if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
01254          if (__r1 < __m)                                       \
01255            __q1--, __r1 += (d);                                \
01256       }                                                               \
01257     __r1 -= __m;                                               \
01258                                                                \
01259     __q0 = __r1 / __d1;                                               \
01260     __r0 = __r1  - __q0 * __d1;                                       \
01261     __m = (UWtype) __q0 * __d0;                                       \
01262     __r0 = __r0 * __ll_B | __ll_lowpart (n0);                         \
01263     if (__r0 < __m)                                            \
01264       {                                                               \
01265        __q0--, __r0 += (d);                                    \
01266        if (__r0 >= (d))                                        \
01267          if (__r0 < __m)                                       \
01268            __q0--, __r0 += (d);                                \
01269       }                                                               \
01270     __r0 -= __m;                                               \
01271                                                                \
01272     (q) = (UWtype) __q1 * __ll_B | __q0;                       \
01273     (r) = __r0;                                                       \
01274   } while (0)
01275 
01276 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
01277    __udiv_w_sdiv (defined in libgcc or elsewhere).  */
01278 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
01279 #define udiv_qrnnd(q, r, nh, nl, d) \
01280   do {                                                         \
01281     UWtype __r;                                                       \
01282     (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d);                       \
01283     (r) = __r;                                                        \
01284   } while (0)
01285 #endif
01286 
01287 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
01288 #if !defined (udiv_qrnnd)
01289 #define UDIV_NEEDS_NORMALIZATION 1
01290 #define udiv_qrnnd __udiv_qrnnd_c
01291 #endif
01292 
01293 #if !defined (count_leading_zeros)
01294 extern
01295 #if __STDC__
01296 const
01297 #endif
01298 unsigned char __clz_tab[];
01299 #define count_leading_zeros(count, x) \
01300   do {                                                         \
01301     UWtype __xr = (x);                                                \
01302     UWtype __a;                                                       \
01303                                                                \
01304     if (W_TYPE_SIZE <= 32)                                     \
01305       {                                                               \
01306        __a = __xr < ((UWtype) 1 << 2*__BITS4)                         \
01307          ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4)             \
01308          : (__xr < ((UWtype) 1 << 3*__BITS4) ?  2*__BITS4 : 3*__BITS4);\
01309       }                                                               \
01310     else                                                       \
01311       {                                                               \
01312        for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)                 \
01313          if (((__xr >> __a) & 0xff) != 0)                      \
01314            break;                                              \
01315       }                                                               \
01316                                                                \
01317     (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);           \
01318   } while (0)
01319 /* This version gives a well-defined value for zero. */
01320 #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
01321 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
01322 #endif
01323 
01324 #if !defined (count_trailing_zeros)
01325 /* Define count_trailing_zeros using count_leading_zeros.  The latter might be
01326    defined in asm, but if it is not, the C version above is good enough.  */
01327 #define count_trailing_zeros(count, x) \
01328   do {                                                         \
01329     UWtype __ctz_x = (x);                                      \
01330     UWtype __ctz_c;                                            \
01331     count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x);                \
01332     (count) = W_TYPE_SIZE - 1 - __ctz_c;                       \
01333   } while (0)
01334 #endif
01335 
01336 #ifndef UDIV_NEEDS_NORMALIZATION
01337 #define UDIV_NEEDS_NORMALIZATION 0
01338 #endif
01339 
01340 /* Give defaults for UMUL_TIME and UDIV_TIME.  */
01341 #ifndef UMUL_TIME
01342 #define UMUL_TIME 1
01343 #endif
01344 
01345 #ifndef UDIV_TIME
01346 #define UDIV_TIME UMUL_TIME
01347 #endif
01348 
01349 /* count_trailing_zeros is often on the slow side, so make that the default */
01350 #ifndef COUNT_TRAILING_ZEROS_TIME
01351 #define COUNT_TRAILING_ZEROS_TIME  15  /* cycles */
01352 #endif
01353 
01354