Back to index

plt-scheme  4.2.1
Classes | Defines | Typedefs | Enumerations | Functions | Variables
gmp.c File Reference
#include "../../sconfig.h"
#include "mzconfig.h"
#include "gmp.h"
#include "gmp-impl.h"
#include "gmplonglong.h"

Go to the source code of this file.

Classes

struct  powers

Defines

#define _FORCE_INLINES
#define _EXTERN_INLINE   /* empty */
#define MALLOC(amt)   scheme_malloc_gmp(amt, &mem_pool)
#define FREE(p, s)   scheme_free_gmp(p, &mem_pool)
#define GMP_NAIL_BITS   0
#define GMP_LIMB_BITS   BITS_PER_MP_LIMB
#define GMP_NUMB_BITS   BITS_PER_MP_LIMB
#define GMP_LIMB_HIGHBIT   ((mp_limb_t)1 << (BITS_PER_MP_LIMB - 1))
#define GMP_NUMB_HIGHBIT   GMP_LIMB_HIGHBIT
#define NULL   0L
#define MP_BASES_CHARS_PER_LIMB_10   9
#define MP_BASES_BIG_BASE_10   CNST_LIMB(0x3b9aca00)
#define MP_BASES_BIG_BASE_INVERTED_10   CNST_LIMB(0x12e0be82)
#define MP_BASES_NORMALIZATION_STEPS_10   2
#define GMP_NUMB_MASK   0xFFFFFFFF
#define MPN_DIVREM_OR_PREINV_DIVREM_1(qp, xsize, ap, size, d, dinv, shift)   mpn_divrem_1 (qp, xsize, ap, size, d)
#define ABOVE_THRESHOLD(size, thresh)
#define BELOW_THRESHOLD(size, thresh)   (! ABOVE_THRESHOLD (size, thresh))
#define POW2_P(n)   (((n) & ((n) - 1)) == 0)
#define __GMP_ALLOCATE_FUNC_LIMBS(n)   TMP_ALLOC(n * sizeof(mp_limb_t))
#define __GMP_FREE_FUNC_LIMBS(p, n)   /* */
#define SCHEME_BIGNUM_USE_FUEL(n)   scheme_bignum_use_fuel(n)
#define INVERSE_3   ((MP_LIMB_T_MAX / 3) * 2 + 1)
#define USE_MORE_MPN
#define TOOM3_MUL_REC(p, a, b, n, ws)
#define TOOM3_SQR_REC(p, a, n, ws)
#define udiv_qrnd_unnorm(q, r, n, d)
#define GET_STR_DC_THRESHOLD   15
#define GET_STR_PRECOMPUTE_THRESHOLD   30
#define BUF_ALLOC   (GET_STR_PRECOMPUTE_THRESHOLD * BITS_PER_MP_LIMB * 7 / 11)
#define ALLOC_SIZE   (2 * un + 30)
#define SET_STR_THRESHOLD   4000
#define SET_STR_BLOCK_SIZE   1 /* Must be a power of 2. */
#define BZ_THRESHOLD   (7 * KARATSUBA_MUL_THRESHOLD)
#define SHL(h, l, cnt)   ((h << cnt) | ((l >> 1) >> ((~cnt) & (BITS_PER_MP_LIMB - 1))))
#define HALF_NAIL   (GMP_NAIL_BITS / 2)
#define Prec   (GMP_NUMB_BITS >> 1)
#define PREINVERT_VIABLE   (UDIV_TIME > 2 * UMUL_TIME + 6 /* && ! TARGET_REGISTER_STARVED */)
#define SQR_KARATSUBA_THRESHOLD   KARATSUBA_SQR_THRESHOLD
#define INVERSE_3   ((MP_LIMB_T_MAX / 3) * 2 + 1)
#define MPN_MOD_OR_MODEXACT_1_ODD(src, size, divisor)   mpn_mod_1 (src, size, divisor)
#define MOD_1_NORM_THRESHOLD   0
#define MOD_1_UNNORM_THRESHOLD   0
#define GCD_ACCEL_THRESHOLD   5
#define HSIZ   ((sizeof (tmp_stack) + __TMP_ALIGN - 1) & -__TMP_ALIGN)

Typedefs

typedef struct powers
typedef struct tmp_stack

Enumerations

enum  { BMOD_THRESHOLD = GMP_NUMB_BITS/2 }

Functions

voidscheme_malloc_gmp (unsigned long, void **mem_pool)
void scheme_free_gmp (void *, void **mem_pool)
void scheme_bignum_use_fuel (long n)
int mpn_cmp (mp_srcptr op1_ptr, mp_srcptr op2_ptr, mp_size_t size)
mp_limb_t mpn_sub_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr, mp_size_t size)
mp_limb_t mpn_add_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr, mp_size_t size)
static void evaluate3 _PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t))
static void interpolate3 _PROTO ((mp_srcptr, mp_ptr, mp_ptr, mp_ptr, mp_srcptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t))
static mp_limb_t add2Times _PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t))
void mpn_kara_mul_n (mp_ptr p, mp_srcptr a, mp_srcptr b, mp_size_t n, mp_ptr ws)
void mpn_kara_sqr_n (mp_ptr p, mp_srcptr a, mp_size_t n, mp_ptr ws)
static mp_limb_t add2Times (mp_ptr z, mp_srcptr x, mp_srcptr y, mp_size_t n)
static void evaluate3 (mp_ptr ph, mp_ptr p1, mp_ptr p2, mp_ptr pth, mp_ptr pt1, mp_ptr pt2, mp_srcptr A, mp_srcptr B, mp_srcptr C, mp_size_t len, mp_size_t len2)
static void interpolate3 (mp_srcptr A, mp_ptr B, mp_ptr C, mp_ptr D, mp_srcptr E, mp_ptr ptb, mp_ptr ptc, mp_ptr ptd, mp_size_t len, mp_size_t len2)
void mpn_toom3_mul_n (mp_ptr p, mp_srcptr a, mp_srcptr b, mp_size_t n, mp_ptr ws)
void mpn_toom3_sqr_n (mp_ptr p, mp_srcptr a, mp_size_t n, mp_ptr ws)
void mpn_mul_n (mp_ptr p, mp_srcptr a, mp_srcptr b, mp_size_t n)
mp_limb_t mpn_rshift (mp_ptr wp, mp_srcptr up, mp_size_t usize, unsigned int cnt)
mp_limb_t mpn_lshift (mp_ptr wp, mp_srcptr up, mp_size_t usize, unsigned int cnt)
static unsigned char * mpn_sb_get_str (unsigned char *str, size_t len, mp_ptr up, mp_size_t un, const powers_t *powtab)
static unsigned char * mpn_dc_get_str (unsigned char *str, size_t len, mp_ptr up, mp_size_t un, const powers_t *powtab)
size_t mpn_get_str (unsigned char *str, int base, mp_ptr up, mp_size_t un)
static mp_size_t convert_blocks (mp_ptr dp, const unsigned char *str, size_t str_len, int base)
mp_size_t mpn_set_str (mp_ptr rp, const unsigned char *str, size_t str_len, int base)
void mpn_tdiv_qr (mp_ptr qp, mp_ptr rp, mp_size_t qxn, mp_srcptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
static mp_size_t mpn_sqrtrem1 (mp_ptr sp, mp_ptr rp, mp_srcptr np)
static mp_limb_t mpn_sqrtrem2 (mp_ptr sp, mp_ptr rp, mp_srcptr np)
static mp_limb_t mpn_dc_sqrtrem (mp_ptr sp, mp_ptr np, mp_size_t n)
mp_size_t mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr np, mp_size_t nn)
static mp_limb_t
mpn_bz_div_3_halves_by_2 
_PROTO ((mp_ptr qp, mp_ptr np, mp_srcptr dp, mp_size_t n))
mp_limb_t mpn_bz_divrem_n (mp_ptr qp, mp_ptr np, mp_srcptr dp, mp_size_t n)
static mp_limb_t mpn_bz_div_3_halves_by_2 (mp_ptr qp, mp_ptr np, mp_srcptr dp, mp_size_t n)
mp_limb_t mpn_sb_divrem_mn (mp_ptr qp, mp_ptr np, mp_size_t nsize, mp_srcptr dp, mp_size_t dsize)
static mp_limb_t __gmpn_divmod_1_internal (mp_ptr quot_ptr, mp_srcptr dividend_ptr, mp_size_t dividend_size, mp_limb_t divisor_limb)
mp_limb_t mpn_divrem_1 (mp_ptr qp, mp_size_t qxn, mp_srcptr np, mp_size_t nn, mp_limb_t d)
mp_limb_t mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, mp_ptr np, mp_size_t nsize, mp_srcptr dp)
void mpn_mul_basecase (mp_ptr prodp, mp_srcptr up, mp_size_t usize, mp_srcptr vp, mp_size_t vsize)
void mpn_sqr_basecase (mp_ptr prodp, mp_srcptr up, mp_size_t n)
mp_limb_t mpn_mul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_size_t s1_size, mp_limb_t s2_limb)
mp_limb_t mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
mp_limb_t mpn_submul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_size_t s1_size, mp_limb_t s2_limb)
mp_limb_t mpn_divexact_by3c (mp_ptr dst, mp_srcptr src, mp_size_t size, mp_limb_t c)
void mpn_sqr_n (mp_ptr prodp, mp_srcptr up, mp_size_t un)
mp_limb_t mpn_mul (mp_ptr prodp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
mp_limb_t mpn_divrem (mp_ptr qp, mp_size_t qxn, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
mp_limb_t mpn_mod_1 (mp_srcptr up, mp_size_t un, mp_limb_t d)
mp_limb_t mpn_gcd_1 (mp_srcptr up, mp_size_t size, mp_limb_t vlimb)
static mp_size_t gcd_2 (mp_ptr vp, mp_srcptr up)
static mp_limb_t find_a (mp_srcptr cp)
mp_size_t mpn_gcd (mp_ptr gp, mp_ptr up, mp_size_t usize, mp_ptr vp, mp_size_t vsize)
mp_limb_t mpn_bdivmod (mp_ptr qp, mp_ptr up, mp_size_t usize, mp_srcptr vp, mp_size_t vsize, unsigned long int d)
static int __gmp_assert_fail (char *filename, int linenum, char *expr)
void__gmp_tmp_alloc (unsigned long size)
void __gmp_tmp_mark (tmp_marker *mark)
void __gmp_tmp_free (tmp_marker *mark)
void scheme_gmp_tls_init (long *s)
voidscheme_gmp_tls_load (long *s)
void scheme_gmp_tls_unload (long *s, void *data)
void scheme_gmp_tls_snapshot (long *s, long *save)
void scheme_gmp_tls_restore_snapshot (long *s, void *data, long *save, int do_free)

Variables

static voidmem_pool = 0
static const unsigned char approx_tab [192]
const unsigned char modlimb_invert_table [128]
const unsigned char __clz_tab []
static unsigned long max_total_allocation = 0
static unsigned long current_total_allocation = 0
static tmp_stack xxx = {&xxx, &xxx, 0}
static tmp_stackcurrent = &xxx

Class Documentation

struct powers

Definition at line 1774 of file gmp.c.

Class Members
int base
size_t digits_in_base
mp_size_t n
mp_ptr p

Define Documentation

#define __GMP_ALLOCATE_FUNC_LIMBS (   n)    TMP_ALLOC(n * sizeof(mp_limb_t))

Definition at line 74 of file gmp.c.

#define __GMP_FREE_FUNC_LIMBS (   p,
 
)    /* */

Definition at line 75 of file gmp.c.

#define _EXTERN_INLINE   /* empty */

Definition at line 22 of file gmp.c.

#define _FORCE_INLINES

Definition at line 21 of file gmp.c.

#define ABOVE_THRESHOLD (   size,
  thresh 
)
Value:
((thresh) == 0                        \
   || ((thresh) != MP_SIZE_T_MAX        \
       && (size) >= (thresh)))

Definition at line 63 of file gmp.c.

#define ALLOC_SIZE   (2 * un + 30)
#define BELOW_THRESHOLD (   size,
  thresh 
)    (! ABOVE_THRESHOLD (size, thresh))

Definition at line 67 of file gmp.c.

Definition at line 2486 of file gmp.c.

#define FREE (   p,
 
)    scheme_free_gmp(p, &mem_pool)

Definition at line 28 of file gmp.c.

#define GCD_ACCEL_THRESHOLD   5

Definition at line 4599 of file gmp.c.

#define GET_STR_DC_THRESHOLD   15

Definition at line 1766 of file gmp.c.

Definition at line 1771 of file gmp.c.

Definition at line 37 of file gmp.c.

#define GMP_LIMB_HIGHBIT   ((mp_limb_t)1 << (BITS_PER_MP_LIMB - 1))

Definition at line 39 of file gmp.c.

#define GMP_NAIL_BITS   0

Definition at line 36 of file gmp.c.

Definition at line 38 of file gmp.c.

Definition at line 40 of file gmp.c.

#define GMP_NUMB_MASK   0xFFFFFFFF

Definition at line 51 of file gmp.c.

#define HALF_NAIL   (GMP_NAIL_BITS / 2)

Definition at line 2881 of file gmp.c.

#define HSIZ   ((sizeof (tmp_stack) + __TMP_ALIGN - 1) & -__TMP_ALIGN)

Definition at line 5698 of file gmp.c.

#define INVERSE_3   ((MP_LIMB_T_MAX / 3) * 2 + 1)

Definition at line 4032 of file gmp.c.

#define INVERSE_3   ((MP_LIMB_T_MAX / 3) * 2 + 1)

Definition at line 4032 of file gmp.c.

#define MALLOC (   amt)    scheme_malloc_gmp(amt, &mem_pool)

Definition at line 27 of file gmp.c.

#define MOD_1_NORM_THRESHOLD   0

Definition at line 4341 of file gmp.c.

#define MOD_1_UNNORM_THRESHOLD   0

Definition at line 4344 of file gmp.c.

#define MP_BASES_BIG_BASE_10   CNST_LIMB(0x3b9aca00)

Definition at line 48 of file gmp.c.

#define MP_BASES_BIG_BASE_INVERTED_10   CNST_LIMB(0x12e0be82)

Definition at line 49 of file gmp.c.

Definition at line 47 of file gmp.c.

Definition at line 50 of file gmp.c.

#define MPN_DIVREM_OR_PREINV_DIVREM_1 (   qp,
  xsize,
  ap,
  size,
  d,
  dinv,
  shift 
)    mpn_divrem_1 (qp, xsize, ap, size, d)

Definition at line 60 of file gmp.c.

#define MPN_MOD_OR_MODEXACT_1_ODD (   src,
  size,
  divisor 
)    mpn_mod_1 (src, size, divisor)

Definition at line 4327 of file gmp.c.

#define NULL   0L

Definition at line 43 of file gmp.c.

#define POW2_P (   n)    (((n) & ((n) - 1)) == 0)

Definition at line 72 of file gmp.c.

#define Prec   (GMP_NUMB_BITS >> 1)

Definition at line 2943 of file gmp.c.

#define PREINVERT_VIABLE   (UDIV_TIME > 2 * UMUL_TIME + 6 /* && ! TARGET_REGISTER_STARVED */)

Definition at line 3231 of file gmp.c.

Definition at line 82 of file gmp.c.

#define SET_STR_BLOCK_SIZE   1 /* Must be a power of 2. */

Definition at line 2154 of file gmp.c.

#define SET_STR_THRESHOLD   4000

Definition at line 2146 of file gmp.c.

#define SHL (   h,
  l,
  cnt 
)    ((h << cnt) | ((l >> 1) >> ((~cnt) & (BITS_PER_MP_LIMB - 1))))

Definition at line 2490 of file gmp.c.

Definition at line 3794 of file gmp.c.

#define TOOM3_MUL_REC (   p,
  a,
  b,
  n,
  ws 
)
Value:
do {                                                    \
    if (n < KARATSUBA_MUL_THRESHOLD)                           \
      mpn_mul_basecase (p, a, n, b, n);                        \
    else if (n < TOOM3_MUL_THRESHOLD)                          \
      mpn_kara_mul_n (p, a, b, n, ws);                         \
    else							\
      mpn_toom3_mul_n (p, a, b, n, ws);                        \
  } while (0)

Definition at line 1268 of file gmp.c.

#define TOOM3_SQR_REC (   p,
  a,
  n,
  ws 
)
Value:
do {                                                    \
    if (n < KARATSUBA_SQR_THRESHOLD)                           \
      mpn_sqr_basecase (p, a, n);                       \
    else if (n < TOOM3_SQR_THRESHOLD)                          \
      mpn_kara_sqr_n (p, a, n, ws);                            \
    else							\
      mpn_toom3_sqr_n (p, a, n, ws);                           \
  } while (0)

Definition at line 1379 of file gmp.c.

#define udiv_qrnd_unnorm (   q,
  r,
  n,
 
)
Value:
do {                                  \
    mp_limb_t  __q = (n) / (d);         \
    mp_limb_t  __r = (n) - __q*(d);     \
    (q) = __q;                          \
    (r) = __r;                          \
  } while (0)

Definition at line 1755 of file gmp.c.

#define USE_MORE_MPN

Definition at line 211 of file gmp.c.


Typedef Documentation

typedef struct powers

Definition at line 1781 of file gmp.c.

typedef struct tmp_stack

Definition at line 5689 of file gmp.c.


Enumeration Type Documentation

anonymous enum
Enumerator:
BMOD_THRESHOLD 

Definition at line 4605 of file gmp.c.


Function Documentation

static int __gmp_assert_fail ( char *  filename,
int  linenum,
char *  expr 
) [static]

Definition at line 5647 of file gmp.c.

{
#if 0
  if (filename != NULL && filename[0] != '\0')
    {
      fprintf (stderr, "%s:", filename);
      if (linenum != -1)
        fprintf (stderr, "%d: ", linenum);
    }

  fprintf (stderr, "GNU MP assertion failed: %s\n", expr);
  abort();
#endif

  /*NOTREACHED*/
  return 0;
}
void* __gmp_tmp_alloc ( unsigned long  size)

Definition at line 5706 of file gmp.c.

{
  void *that;

  if (size > (char *) current->end - (char *) current->alloc_point)
    {
      void *chunk;
      tmp_stack *header;
      unsigned long chunk_size;
      unsigned long now;

      /* Allocate a chunk that makes the total current allocation somewhat
        larger than the maximum allocation ever.  If size is very large, we
        allocate that much.  */

      now = current_total_allocation + size;
      if (now > max_total_allocation)
       {
         /* We need more temporary memory than ever before.  Increase
            for future needs.  */
         now = now * 3 / 2;
         chunk_size = now - current_total_allocation + HSIZ;
         current_total_allocation = now;
         max_total_allocation = current_total_allocation;
       }
      else
       {
         chunk_size = max_total_allocation - current_total_allocation + HSIZ;
         current_total_allocation = max_total_allocation;
       }

      chunk = MALLOC (chunk_size);
      header = (tmp_stack *) chunk;
      header->end = (char *) chunk + chunk_size;
      header->alloc_point = (char *) chunk + HSIZ;
      header->prev = current;
      current = header;
    }

  that = current->alloc_point;
  current->alloc_point = (char *) that + size;
  return that;
}

Definition at line 5772 of file gmp.c.

{
  while (mark->which_chunk != current)
    {
      tmp_stack *tmp;

      tmp = current;
      current = tmp->prev;
      current_total_allocation -= (((char *) (tmp->end) - (char *) tmp) - HSIZ);
      FREE (tmp, (char *) tmp->end - (char *) tmp);
    }
  current->alloc_point = mark->alloc_point;
}

Definition at line 5759 of file gmp.c.

{
  mark->which_chunk = current;
  mark->alloc_point = current->alloc_point;
}
static mp_limb_t __gmpn_divmod_1_internal ( mp_ptr  quot_ptr,
mp_srcptr  dividend_ptr,
mp_size_t  dividend_size,
mp_limb_t  divisor_limb 
) [static]

Definition at line 3414 of file gmp.c.

{
  mp_size_t i;
  mp_limb_t n1, n0, r;

  /* ??? Should this be handled at all?  Rely on callers?  */
  if (dividend_size == 0)
    return 0;

  /* If multiplication is much faster than division, and the
     dividend is large, pre-invert the divisor, and use
     only multiplications in the inner loop.  */

  /* This test should be read:
       Does it ever help to use udiv_qrnnd_preinv?
        && Does what we save compensate for the inversion overhead?  */
  if (UDIV_TIME > (2 * UMUL_TIME + 6)
      && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME)
    {
      int normalization_steps;

      count_leading_zeros (normalization_steps, divisor_limb);
      if (normalization_steps != 0)
       {
         mp_limb_t divisor_limb_inverted;

         divisor_limb <<= normalization_steps;
         invert_limb (divisor_limb_inverted, divisor_limb);

         n1 = dividend_ptr[dividend_size - 1];
         r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);

         /* Possible optimization:
            if (r == 0
            && divisor_limb > ((n1 << normalization_steps)
                          | (dividend_ptr[dividend_size - 2] >> ...)))
            ...one division less... */

         for (i = dividend_size - 2; i >= 0; i--)
           {
             n0 = dividend_ptr[i];
             udiv_qrnnd_preinv (quot_ptr[i + 1], r, r,
                             ((n1 << normalization_steps)
                              | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
                             divisor_limb, divisor_limb_inverted);
             n1 = n0;
           }
         udiv_qrnnd_preinv (quot_ptr[0], r, r,
                          n1 << normalization_steps,
                          divisor_limb, divisor_limb_inverted);
         return r >> normalization_steps;
       }
      else
       {
         mp_limb_t divisor_limb_inverted;

         invert_limb (divisor_limb_inverted, divisor_limb);

         i = dividend_size - 1;
         r = dividend_ptr[i];

         if (r >= divisor_limb)
           r = 0;
         else
           {
             quot_ptr[i] = 0;
             i--;
           }

         for (; i >= 0; i--)
           {
             n0 = dividend_ptr[i];
             udiv_qrnnd_preinv (quot_ptr[i], r, r,
                             n0, divisor_limb, divisor_limb_inverted);
           }
         return r;
       }
    }
  else
    {
      if (UDIV_NEEDS_NORMALIZATION)
       {
         int normalization_steps;

         count_leading_zeros (normalization_steps, divisor_limb);
         if (normalization_steps != 0)
           {
             divisor_limb <<= normalization_steps;

             n1 = dividend_ptr[dividend_size - 1];
             r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);

             /* Possible optimization:
               if (r == 0
               && divisor_limb > ((n1 << normalization_steps)
                             | (dividend_ptr[dividend_size - 2] >> ...)))
               ...one division less... */

             for (i = dividend_size - 2; i >= 0; i--)
              {
                n0 = dividend_ptr[i];
                udiv_qrnnd (quot_ptr[i + 1], r, r,
                           ((n1 << normalization_steps)
                            | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
                           divisor_limb);
                n1 = n0;
              }
             udiv_qrnnd (quot_ptr[0], r, r,
                       n1 << normalization_steps,
                       divisor_limb);
             return r >> normalization_steps;
           }
       }
      /* No normalization needed, either because udiv_qrnnd doesn't require
        it, or because DIVISOR_LIMB is already normalized.  */

      i = dividend_size - 1;
      r = dividend_ptr[i];

      if (r >= divisor_limb)
       r = 0;
      else
       {
         quot_ptr[i] = 0;
         i--;
       }

      for (; i >= 0; i--)
       {
         n0 = dividend_ptr[i];
         udiv_qrnnd (quot_ptr[i], r, r, n0, divisor_limb);
       }
      return r;
    }
}

Here is the caller graph for this function:

static mp_limb_t mpn_bz_div_3_halves_by_2 _PROTO ( (mp_ptr qp, mp_ptr np, mp_srcptr dp, mp_size_t n)  ) [static]
static mp_limb_t add2Times ( mp_ptr  z,
mp_srcptr  x,
mp_srcptr  y,
mp_size_t  n 
) [inline, static]

Definition at line 686 of file gmp.c.

{
  mp_ptr t;
  mp_limb_t c;
  TMP_DECL (marker);
  TMP_MARK (marker);
  t = (mp_ptr) TMP_ALLOC (n * BYTES_PER_MP_LIMB);
  c = mpn_lshift (t, y, n, 1);
  c += mpn_add_n (z, x, t, n);
  TMP_FREE (marker);
  return c;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mp_size_t convert_blocks ( mp_ptr  dp,
const unsigned char *  str,
size_t  str_len,
int  base 
) [static]

Definition at line 2424 of file gmp.c.

{
  int chars_per_limb;
  mp_size_t i;
  int j;
  int ds;
  mp_size_t dsize;
  mp_limb_t res_digit;

  chars_per_limb = __mp_bases[base].chars_per_limb;

  dsize = str_len / chars_per_limb;
  ds = str_len % chars_per_limb;

  if (ds != 0)
    {
      res_digit = *str++;
      for (j = ds - 1; j != 0; j--)
       res_digit = res_digit * base + *str++;
      dp[dsize] = res_digit;
    }

  if (base == 10)
    {
      for (i = dsize - 1; i >= 0; i--)
       {
         res_digit = *str++;
         for (j = MP_BASES_CHARS_PER_LIMB_10 - 1; j != 0; j--)
           res_digit = res_digit * 10 + *str++;
         dp[i] = res_digit;
       }
    }
  else
    {
      for (i = dsize - 1; i >= 0; i--)
       {
         res_digit = *str++;
         for (j = chars_per_limb - 1; j != 0; j--)
           res_digit = res_digit * base + *str++;
         dp[i] = res_digit;
       }
    }

  return dsize + (ds != 0);
}

Here is the caller graph for this function:

static void evaluate3 ( mp_ptr  ph,
mp_ptr  p1,
mp_ptr  p2,
mp_ptr  pth,
mp_ptr  pt1,
mp_ptr  pt2,
mp_srcptr  A,
mp_srcptr  B,
mp_srcptr  C,
mp_size_t  len,
mp_size_t  len2 
) [static]

Definition at line 761 of file gmp.c.

{
  mp_limb_t c, d, e;
  
  ASSERT (len - len2 <= 2);

  e = mpn_lshift (p1, B, len, 1);

  c = mpn_lshift (ph, A, len, 2);
  c += e + mpn_add_n (ph, ph, p1, len);
  d = mpn_add_n (ph, ph, C, len2);
  if (len2 == len) c += d; else c += mpn_add_1 (ph + len2, ph + len2, len-len2, d);
  ASSERT (c < 7);
  *pth = c;

  c = mpn_lshift (p2, C, len2, 2);
#if 1
  if (len2 != len) { p2[len-1] = 0; p2[len2] = c; c = 0; }
  c += e + mpn_add_n (p2, p2, p1, len);
#else
  d = mpn_add_n (p2, p2, p1, len2);
  c += d;
  if (len2 != len) c = mpn_add_1 (p2+len2, p1+len2, len-len2, c);
  c += e;
#endif
  c += mpn_add_n (p2, p2, A, len);
  ASSERT (c < 7);
  *pt2 = c;

  c = mpn_add_n (p1, A, B, len);
  d = mpn_add_n (p1, p1, C, len2);
  if (len2 == len) c += d;
  else c += mpn_add_1 (p1+len2, p1+len2, len-len2, d);
  ASSERT (c < 3);
  *pt1 = c;

}

Here is the call graph for this function:

Here is the caller graph for this function:

static mp_limb_t find_a ( mp_srcptr  cp) [inline, static]

Definition at line 4678 of file gmp.c.

{
  unsigned long int leading_zero_bits = 0;

  mp_limb_t n1_l = cp[0];   /* N1 == n1_h * 2^GMP_NUMB_BITS + n1_l.  */
  mp_limb_t n1_h = cp[1];

  mp_limb_t n2_l = (-n1_l & GMP_NUMB_MASK);      /* N2 == n2_h * 2^GMP_NUMB_BITS + n2_l.  */
  mp_limb_t n2_h = (~n1_h & GMP_NUMB_MASK);

  /* Main loop.  */
  while (n2_h != 0)         /* While N2 >= 2^GMP_NUMB_BITS.  */
    {
      /* N1 <-- N1 % N2.  */
      if (((GMP_NUMB_HIGHBIT >> leading_zero_bits) & n2_h) == 0)
       {
         unsigned long int i;
         count_leading_zeros (i, n2_h);
         i -= GMP_NAIL_BITS;
         i -= leading_zero_bits;
         leading_zero_bits += i;
         n2_h = ((n2_h << i) & GMP_NUMB_MASK) | (n2_l >> (GMP_NUMB_BITS - i));
         n2_l = (n2_l << i) & GMP_NUMB_MASK;
         do
           {
             if (n1_h > n2_h || (n1_h == n2_h && n1_l >= n2_l))
              {
                n1_h -= n2_h + (n1_l < n2_l);
                n1_l = (n1_l - n2_l) & GMP_NUMB_MASK;
              }
             n2_l = (n2_l >> 1) | ((n2_h << (GMP_NUMB_BITS - 1)) & GMP_NUMB_MASK);
             n2_h >>= 1;
             i -= 1;
           }
         while (i != 0);
       }
      if (n1_h > n2_h || (n1_h == n2_h && n1_l >= n2_l))
       {
         n1_h -= n2_h + (n1_l < n2_l);
         n1_l = (n1_l - n2_l) & GMP_NUMB_MASK;
       }

      MP_LIMB_T_SWAP (n1_h, n2_h);
      MP_LIMB_T_SWAP (n1_l, n2_l);
    }

  return n2_l;
}

Here is the caller graph for this function:

static mp_size_t gcd_2 ( mp_ptr  vp,
mp_srcptr  up 
) [inline, static]

Definition at line 4614 of file gmp.c.

{
  mp_limb_t u0, u1, v0, v1;
  mp_size_t vsize;

  u0 = up[0];
  u1 = up[1];
  v0 = vp[0];
  v1 = vp[1];

  while (u1 != v1 && u0 != v0)
    {
      unsigned long int r;
      if (u1 > v1)
       {
         u1 -= v1 + (u0 < v0);
         u0 = (u0 - v0) & GMP_NUMB_MASK;
         count_trailing_zeros (r, u0);
         u0 = ((u1 << (GMP_NUMB_BITS - r)) & GMP_NUMB_MASK) | (u0 >> r);
         u1 >>= r;
       }
      else  /* u1 < v1.  */
       {
         v1 -= u1 + (v0 < u0);
         v0 = (v0 - u0) & GMP_NUMB_MASK;
         count_trailing_zeros (r, v0);
         v0 = ((v1 << (GMP_NUMB_BITS - r)) & GMP_NUMB_MASK) | (v0 >> r);
         v1 >>= r;
       }
    }

  vp[0] = v0, vp[1] = v1, vsize = 1 + (v1 != 0);

  /* If U == V == GCD, done.  Otherwise, compute GCD (V, |U - V|).  */
  if (u1 == v1 && u0 == v0)
    return vsize;

  v0 = (u0 == v0) ? (u1 > v1) ? u1-v1 : v1-u1 : (u0 > v0) ? u0-v0 : v0-u0;
  vp[0] = mpn_gcd_1 (vp, vsize, v0);

  return 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void interpolate3 ( mp_srcptr  A,
mp_ptr  B,
mp_ptr  C,
mp_ptr  D,
mp_srcptr  E,
mp_ptr  ptb,
mp_ptr  ptc,
mp_ptr  ptd,
mp_size_t  len,
mp_size_t  len2 
) [static]

Definition at line 927 of file gmp.c.

{
  mp_ptr ws;
  mp_limb_t t, tb,tc,td;
  TMP_DECL (marker);
  TMP_MARK (marker);

  ASSERT (len - len2 == 0 || len - len2 == 2 || len - len2 == 4);

  /* Let x1, x2, x3 be the values to interpolate.  We have:
   *         b = 16*a + 8*x1 + 4*x2 + 2*x3 +    e
   *         c =    a +   x1 +   x2 +   x3 +    e
   *         d =    a + 2*x1 + 4*x2 + 8*x3 + 16*e
   */

  ws = (mp_ptr) TMP_ALLOC (len * BYTES_PER_MP_LIMB);

  tb = *ptb; tc = *ptc; td = *ptd;


  /* b := b - 16*a -    e
   * c := c -    a -    e
   * d := d -    a - 16*e
   */

  t = mpn_lshift (ws, A, len, 4);
  tb -= t + mpn_sub_n (B, B, ws, len);
  t = mpn_sub_n (B, B, E, len2);
  if (len2 == len) tb -= t;
  else tb -= mpn_sub_1 (B+len2, B+len2, len-len2, t);

  tc -= mpn_sub_n (C, C, A, len);
  t = mpn_sub_n (C, C, E, len2);
  if (len2 == len) tc -= t;
  else tc -= mpn_sub_1 (C+len2, C+len2, len-len2, t);

  t = mpn_lshift (ws, E, len2, 4);
  t += mpn_add_n (ws, ws, A, len2);
#if 1
  if (len2 != len) t = mpn_add_1 (ws+len2, A+len2, len-len2, t);
  td -= t + mpn_sub_n (D, D, ws, len);
#else
  t += mpn_sub_n (D, D, ws, len2);
  if (len2 != len) {
    t = mpn_sub_1 (D+len2, D+len2, len-len2, t);
    t += mpn_sub_n (D+len2, D+len2, A+len2, len-len2);
  } /* end if/else */
  td -= t;
#endif


  /* b, d := b + d, b - d */

#ifdef HAVE_MPN_ADD_SUB_N
  /* #error TO DO ... */
#else
  t = tb + td + mpn_add_n (ws, B, D, len);  
  td = tb - td - mpn_sub_n (D, B, D, len);
  tb = t;
  MPN_COPY (B, ws, len);
#endif
  
  /* b := b-8*c */
  t = 8 * tc + mpn_lshift (ws, C, len, 3);
  tb -= t + mpn_sub_n (B, B, ws, len);

  /* c := 2*c - b */
  tc = 2 * tc + mpn_lshift (C, C, len, 1);
  tc -= tb + mpn_sub_n (C, C, B, len);

  /* d := d/3 */
  td = (td - mpn_divexact_by3 (D, D, len)) * INVERSE_3;

  /* b, d := b + d, b - d */
#ifdef HAVE_MPN_ADD_SUB_N
  /* #error TO DO ... */
#else
  t = tb + td + mpn_add_n (ws, B, D, len);  
  td = tb - td - mpn_sub_n (D, B, D, len);
  tb = t;
  MPN_COPY (B, ws, len);
#endif

      /* Now:
       *       b = 4*x1
       *       c = 2*x2
       *       d = 4*x3
       */

  ASSERT(!(*B & 3));
  mpn_rshift (B, B, len, 2);
  B[len-1] |= tb<<(BITS_PER_MP_LIMB-2);
  ASSERT((long)tb >= 0);
  tb >>= 2;

  ASSERT(!(*C & 1));
  mpn_rshift (C, C, len, 1);
  C[len-1] |= tc<<(BITS_PER_MP_LIMB-1);
  ASSERT((long)tc >= 0);
  tc >>= 1;

  ASSERT(!(*D & 3));
  mpn_rshift (D, D, len, 2);
  D[len-1] |= td<<(BITS_PER_MP_LIMB-2);
  ASSERT((long)td >= 0);
  td >>= 2;

#if WANT_ASSERT
  ASSERT (tb < 2);
  if (len == len2)
    {
      ASSERT (tc < 3);
      ASSERT (td < 2);
    }
  else
    {
      ASSERT (tc < 2);
      ASSERT (!td);
    }
#endif

  *ptb = tb;
  *ptc = tc;
  *ptd = td;

  TMP_FREE (marker);
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_add_n ( mp_ptr  res_ptr,
mp_srcptr  s1_ptr,
mp_srcptr  s2_ptr,
mp_size_t  size 
)

Definition at line 166 of file gmp.c.

{
  register mp_limb_t x, y, cy;
  register mp_size_t j;

  /* The loop counter and index J goes from -SIZE to -1.  This way
     the loop becomes faster.  */
  j = -size;

  /* Offset the base pointers to compensate for the negative indices.  */
  s1_ptr -= j;
  s2_ptr -= j;
  res_ptr -= j;

  cy = 0;
  do
    {
      y = s2_ptr[j];
      x = s1_ptr[j];
      y += cy;                     /* add previous carry to one addend */
      cy = (y < cy);        /* get out carry from that addition */
      y = x + y;            /* add other addend */
      cy = (y < x) + cy;    /* get out carry from that add, combine */
      res_ptr[j] = y;
    }
  while (++j != 0);

  return cy;
}

Here is the caller graph for this function:

Definition at line 3960 of file gmp.c.

{
  mp_limb_t ul, cl, hpl, lpl, rl;
 
  SCHEME_BIGNUM_USE_FUEL(n);
 
  cl = 0;
  do
    {
      ul = *up++;
      umul_ppmm (hpl, lpl, ul, vl);
                                                                                
      lpl += cl;
      cl = (lpl < cl) + hpl;
                                                                                
      rl = *rp;
      lpl = rl + lpl;
      cl += lpl < rl;
      *rp++ = lpl;
    }
  while (--n != 0);
                                                                                
  return cl;
}

Here is the caller graph for this function:

mp_limb_t mpn_bdivmod ( mp_ptr  qp,
mp_ptr  up,
mp_size_t  usize,
mp_srcptr  vp,
mp_size_t  vsize,
unsigned long int  d 
)

Definition at line 5018 of file gmp.c.

{
  mp_limb_t v_inv;

  ASSERT (usize >= 1);
  ASSERT (vsize >= 1);
  ASSERT (usize * GMP_NUMB_BITS >= d);
  ASSERT (! MPN_OVERLAP_P (up, usize, vp, vsize));
  ASSERT (! MPN_OVERLAP_P (qp, d/GMP_NUMB_BITS, vp, vsize));
  ASSERT (MPN_SAME_OR_INCR2_P (qp, d/GMP_NUMB_BITS, up, usize));
  /* ASSERT_MPN (up, usize); */
  /* ASSERT_MPN (vp, vsize); */

  /* 1/V mod 2^GMP_NUMB_BITS. */
  modlimb_invert (v_inv, vp[0]);

  /* Fast code for two cases previously used by the accel part of mpn_gcd.
     (Could probably remove this now it's inlined there.) */
  if (usize == 2 && vsize == 2 &&
      (d == GMP_NUMB_BITS || d == 2*GMP_NUMB_BITS))
    {
      mp_limb_t hi, lo;
      mp_limb_t q = (up[0] * v_inv) & GMP_NUMB_MASK;
      umul_ppmm (hi, lo, q, vp[0] << GMP_NAIL_BITS);
      up[0] = 0;
      up[1] -= hi + q*vp[1];
      qp[0] = q;
      if (d == 2*GMP_NUMB_BITS)
        {
          q = (up[1] * v_inv) & GMP_NUMB_MASK;
          up[1] = 0;
          qp[1] = q;
        }
      return 0;
    }

  /* Main loop.  */
  while (d >= GMP_NUMB_BITS)
    {
      mp_limb_t q = (up[0] * v_inv) & GMP_NUMB_MASK;
      mp_limb_t b = mpn_submul_1 (up, vp, MIN (usize, vsize), q);
      if (usize > vsize)
       mpn_sub_1 (up + vsize, up + vsize, usize - vsize, b);
      d -= GMP_NUMB_BITS;
      up += 1, usize -= 1;
      *qp++ = q;
    }

  if (d)
    {
      mp_limb_t b;
      mp_limb_t q = (up[0] * v_inv) & (((mp_limb_t)1<<d) - 1);
      if (q <= 1)
       {
         if (q == 0)
           return 0;
         else
           b = mpn_sub_n (up, up, vp, MIN (usize, vsize));
       }
      else
       b = mpn_submul_1 (up, vp, MIN (usize, vsize), q);

      if (usize > vsize)
       mpn_sub_1 (up + vsize, up + vsize, usize - vsize, b);
      return q;
    }

  return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mp_limb_t mpn_bz_div_3_halves_by_2 ( mp_ptr  qp,
mp_ptr  np,
mp_srcptr  dp,
mp_size_t  n 
) [static]

Definition at line 3180 of file gmp.c.

{
  mp_size_t twon = n + n; 
  mp_limb_t qhl, cc;
  mp_ptr tmp;
  TMP_DECL (marker);

  TMP_MARK (marker);
  if (n < BZ_THRESHOLD)
    qhl = mpn_sb_divrem_mn (qp, np + n, twon, dp + n, n);
  else
    qhl = mpn_bz_divrem_n (qp, np + n, dp + n, n);
  tmp = (mp_ptr) TMP_ALLOC (twon * BYTES_PER_MP_LIMB);
  mpn_mul_n (tmp, qp, dp, n);
  cc = mpn_sub_n (np, np, tmp, twon);
  TMP_FREE (marker);
  if (qhl) cc += mpn_sub_n (np + n, np + n, dp, n);
  while (cc)
    {
      qhl -= mpn_sub_1 (qp, qp, n, (mp_limb_t) 1);
      cc -= mpn_add_n (np, np, dp, twon);
    }
  return qhl;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 3139 of file gmp.c.

{
  mp_limb_t qhl, cc;

  if (n % 2 != 0)
    {
      qhl = mpn_bz_divrem_n (qp + 1, np + 2, dp + 1, n - 1);
      cc = mpn_submul_1 (np + 1, qp + 1, n - 1, dp[0]);
      cc = mpn_sub_1 (np + n, np + n, 1, cc);
      if (qhl) cc += mpn_sub_1 (np + n, np + n, 1, dp[0]);
      while (cc)
        {
          qhl -= mpn_sub_1 (qp + 1, qp + 1, n - 1, (mp_limb_t) 1);
          cc -= mpn_add_n (np + 1, np + 1, dp, n);
        }
      qhl += mpn_add_1 (qp + 1, qp + 1, n - 1,
                        mpn_sb_divrem_mn (qp, np, n + 1, dp, n));
    }
  else
    {
      mp_size_t n2 = n/2;
      qhl = mpn_bz_div_3_halves_by_2 (qp + n2, np + n2, dp, n2);
      qhl += mpn_add_1 (qp + n2, qp + n2, n2,
                        mpn_bz_div_3_halves_by_2 (qp, np, dp, n2));
    }
  return qhl;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int mpn_cmp ( mp_srcptr  op1_ptr,
mp_srcptr  op2_ptr,
mp_size_t  size 
)

Definition at line 94 of file gmp.c.

{
  mp_size_t i;
  mp_limb_t op1_word, op2_word;

  for (i = size - 1; i >= 0; i--)
    {
      op1_word = op1_ptr[i];
      op2_word = op2_ptr[i];
      if (op1_word != op2_word)
       goto diff;
    }
  return 0;
 diff:
  /* This can *not* be simplified to
       op2_word - op2_word
     since that expression might give signed overflow.  */
  return (op1_word > op2_word) ? 1 : -1;
}

Here is the caller graph for this function:

static unsigned char* mpn_dc_get_str ( unsigned char *  str,
size_t  len,
mp_ptr  up,
mp_size_t  un,
const powers_t *  powtab 
) [static]

Definition at line 1950 of file gmp.c.

{
  if (un < GET_STR_DC_THRESHOLD)
    {
      if (un != 0)
       str = mpn_sb_get_str (str, len, up, un, powtab);
      else
       {
         while (len != 0)
           {
             *str++ = 0;
             len--;
           }
       }
    }
  else
    {
      mp_ptr pwp, qp, rp;
      mp_size_t pwn, qn;

      pwp = powtab->p;
      pwn = powtab->n;

      if (un < pwn || (un == pwn && mpn_cmp (up, pwp, un) < 0))
       {
         str = mpn_dc_get_str (str, len, up, un, powtab - 1);
       }
      else
       {
         TMP_DECL (marker);
         TMP_MARK (marker);
         qp = TMP_ALLOC_LIMBS (un - pwn + 1);
         rp = TMP_ALLOC_LIMBS (pwn);

         mpn_tdiv_qr (qp, rp, 0L, up, un, pwp, pwn);
         qn = un - pwn; qn += qp[qn] != 0;              /* quotient size */
         if (len != 0)
           len = len - powtab->digits_in_base;
         str = mpn_dc_get_str (str, len, qp, qn, powtab - 1);
         str = mpn_dc_get_str (str, powtab->digits_in_base, rp, pwn, powtab - 1);
         TMP_FREE (marker);
       }
    }
  return str;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mp_limb_t mpn_dc_sqrtrem ( mp_ptr  sp,
mp_ptr  np,
mp_size_t  n 
) [static]

Definition at line 2992 of file gmp.c.

{
  mp_limb_t q;                     /* carry out of {sp, n} */
  int c, b;                 /* carry out of remainder */
  mp_size_t l, h;

  ASSERT (np[2 * n - 1] >= GMP_NUMB_HIGHBIT / 2);

  if (n == 1)
    c = mpn_sqrtrem2 (sp, np, np);
  else
    {
      l = n / 2;
      h = n - l;
      q = mpn_dc_sqrtrem (sp + l, np + 2 * l, h);
      if (q != 0)
        mpn_sub_n (np + 2 * l, np + 2 * l, sp + l, h);
      q += mpn_divrem (sp, 0, np + l, n, sp + l, h);
      c = sp[0] & 1;
      mpn_rshift (sp, sp, l, 1);
      sp[l - 1] |= (q << (GMP_NUMB_BITS - 1)) & GMP_NUMB_MASK;
      q >>= 1;
      if (c != 0)
        c = mpn_add_n (np + l, np + l, sp + l, h);
      mpn_sqr_n (np + n, sp, l);
      b = q + mpn_sub_n (np, np, np + n, 2 * l);
      c -= (l == h) ? b : mpn_sub_1 (np + 2 * l, np + 2 * l, 1, b);
      q = mpn_add_1 (sp + l, sp + l, h, q);

      if (c < 0)
        {
          c += mpn_addmul_1 (np, sp, n, 2) + 2 * q;
          c -= mpn_sub_1 (np, np, n, 1);
          q -= mpn_sub_1 (sp, sp, n, 1);
        }
    }

  return c;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 4049 of file gmp.c.

{
  mp_size_t  i;

  SCHEME_BIGNUM_USE_FUEL(size);

  ASSERT (size >= 1);

  i = 0;
  do
    {
      mp_limb_t  l, s;

      s = src[i];
      l = s - c;
      c = (l > s);

      l *= INVERSE_3;
      dst[i] = l;

      c += (l > MP_LIMB_T_MAX/3);
      c += (l > (MP_LIMB_T_MAX/3)*2);
    }
  while (++i < size);

  return c;
}
mp_limb_t mpn_divrem ( mp_ptr  qp,
mp_size_t  qxn,
mp_ptr  np,
mp_size_t  nn,
mp_srcptr  dp,
mp_size_t  dn 
)

Definition at line 4254 of file gmp.c.

{
  SCHEME_BIGNUM_USE_FUEL(dn + nn);

  if (dn == 1)
    {
      mp_limb_t ret;
      mp_ptr q2p;
      mp_size_t qn;
      TMP_DECL (marker);

      TMP_MARK (marker);
      q2p = (mp_ptr) TMP_ALLOC ((nn + qxn) * BYTES_PER_MP_LIMB);

      np[0] = mpn_divrem_1 (q2p, qxn, np, nn, dp[0]);
      qn = nn + qxn - 1;
      MPN_COPY (qp, q2p, qn);
      ret = q2p[qn];

      TMP_FREE (marker);
      return ret;
    }
  else if (dn == 2)
    {
      return mpn_divrem_2 (qp, qxn, np, nn, dp);
    }
  else
    {
      mp_ptr rp, q2p;
      mp_limb_t qhl;
      mp_size_t qn;
      TMP_DECL (marker);

      TMP_MARK (marker);
      if (qxn != 0)
       {
         mp_ptr n2p;
         n2p = (mp_ptr) TMP_ALLOC ((nn + qxn) * BYTES_PER_MP_LIMB);
         MPN_ZERO (n2p, qxn);
         MPN_COPY (n2p + qxn, np, nn);
         q2p = (mp_ptr) TMP_ALLOC ((nn - dn + qxn + 1) * BYTES_PER_MP_LIMB);
         rp = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
         mpn_tdiv_qr (q2p, rp, 0L, n2p, nn + qxn, dp, dn);
         MPN_COPY (np, rp, dn);
         qn = nn - dn + qxn;
         MPN_COPY (qp, q2p, qn);
         qhl = q2p[qn];
       }
      else
       {
         q2p = (mp_ptr) TMP_ALLOC ((nn - dn + 1) * BYTES_PER_MP_LIMB);
         rp = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
         mpn_tdiv_qr (q2p, rp, 0L, np, nn, dp, dn);
         MPN_COPY (np, rp, dn);    /* overwrite np area with remainder */
         qn = nn - dn;
         MPN_COPY (qp, q2p, qn);
         qhl = q2p[qn];
       }
      TMP_FREE (marker);
      return qhl;
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_divrem_1 ( mp_ptr  qp,
mp_size_t  qxn,
mp_srcptr  np,
mp_size_t  nn,
mp_limb_t  d 
)

Definition at line 3563 of file gmp.c.

{
  mp_limb_t rlimb;
  mp_size_t i;

  /* Develop integer part of quotient.  */
  rlimb = __gmpn_divmod_1_internal (qp + qxn, np, nn, d);

  /* Develop fraction part of quotient.  This is not as fast as it should;
     the preinvert stuff from __gmpn_divmod_1_internal ought to be used here
     too.  */
  if (UDIV_NEEDS_NORMALIZATION)
    {
      int normalization_steps;

      count_leading_zeros (normalization_steps, d);
      if (normalization_steps != 0)
       {
         d <<= normalization_steps;
         rlimb <<= normalization_steps;

         for (i = qxn - 1; i >= 0; i--)
           udiv_qrnnd (qp[i], rlimb, rlimb, 0, d);

         return rlimb >> normalization_steps;
       }
      else
       /* fall out */
       ;
    }

  for (i = qxn - 1; i >= 0; i--)
    udiv_qrnnd (qp[i], rlimb, rlimb, 0, d);

  return rlimb;
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_divrem_2 ( mp_ptr  qp,
mp_size_t  qxn,
mp_ptr  np,
mp_size_t  nsize,
mp_srcptr  dp 
)

Definition at line 3631 of file gmp.c.

{
  mp_limb_t most_significant_q_limb = 0;
  mp_size_t i;
  mp_limb_t n1, n0, n2;
  mp_limb_t d1, d0;
  mp_limb_t d1inv = 0;
  int have_preinv;

  np += nsize - 2;
  d1 = dp[1];
  d0 = dp[0];
  n1 = np[1];
  n0 = np[0];

  if (n1 >= d1 && (n1 > d1 || n0 >= d0))
    {
      sub_ddmmss (n1, n0, n1, n0, d1, d0);
      most_significant_q_limb = 1;
    }

  /* If multiplication is much faster than division, preinvert the most 
     significant divisor limb before entering the loop.  */
  if (UDIV_TIME > 2 * UMUL_TIME + 6)
    {
      have_preinv = 0;
      if ((UDIV_TIME - (2 * UMUL_TIME + 6)) * (nsize - 2) > UDIV_TIME)
       {
         invert_limb (d1inv, d1);
         have_preinv = 1;
       }
    }

  for (i = qxn + nsize - 2 - 1; i >= 0; i--)
    {
      mp_limb_t q;
      mp_limb_t r;

      if (i >= qxn)
       np--;
      else
       np[0] = 0;

      if (n1 == d1)
       {
         /* Q should be either 111..111 or 111..110.  Need special treatment
            of this rare case as normal division would give overflow.  */
         q = ~(mp_limb_t) 0;

         r = n0 + d1;
         if (r < d1) /* Carry in the addition? */
           {
             add_ssaaaa (n1, n0, r - d0, np[0], 0, d0);
             qp[i] = q;
             continue;
           }
         n1 = d0 - (d0 != 0);
         n0 = -d0;
       }
      else
       {
         if (UDIV_TIME > 2 * UMUL_TIME + 6 && have_preinv)
           udiv_qrnnd_preinv (q, r, n1, n0, d1, d1inv);
         else
           udiv_qrnnd (q, r, n1, n0, d1);
         umul_ppmm (n1, n0, d0, q);
       }

      n2 = np[0];

    q_test:
      if (n1 > r || (n1 == r && n0 > n2))
       {
         /* The estimated Q was too large.  */
         q--;

         sub_ddmmss (n1, n0, n1, n0, 0, d0);
         r += d1;
         if (r >= d1)       /* If not carry, test Q again.  */
           goto q_test;
       }

      qp[i] = q;
      sub_ddmmss (n1, n0, r, n2, n1, n0);
    }
  np[1] = n1;
  np[0] = n0;

  return most_significant_q_limb;
}

Here is the caller graph for this function:

mp_size_t mpn_gcd ( mp_ptr  gp,
mp_ptr  up,
mp_size_t  usize,
mp_ptr  vp,
mp_size_t  vsize 
)

Definition at line 4730 of file gmp.c.

{
  mp_ptr orig_vp = vp;
  mp_size_t orig_vsize = vsize;
  int binary_gcd_ctr;              /* Number of times binary gcd will execute.  */
  TMP_DECL (marker);

  ASSERT (usize >= 1);
  ASSERT (vsize >= 1);
  ASSERT (usize >= vsize);
  ASSERT (vp[0] & 1);
  ASSERT (up[usize - 1] != 0);
  ASSERT (vp[vsize - 1] != 0);
#if WANT_ASSERT
  if (usize == vsize)
    {
      int  uzeros, vzeros;
      count_leading_zeros (uzeros, up[usize - 1]);
      count_leading_zeros (vzeros, vp[vsize - 1]);
      ASSERT (uzeros <= vzeros);
    }
#endif
  ASSERT (! MPN_OVERLAP_P (up, usize, vp, vsize));
  ASSERT (MPN_SAME_OR_SEPARATE2_P (gp, vsize, up, usize));
  ASSERT (MPN_SAME_OR_SEPARATE2_P (gp, vsize, vp, vsize));

  TMP_MARK (marker);

  /* Use accelerated algorithm if vsize is over GCD_ACCEL_THRESHOLD.
     Two EXTRA limbs for U and V are required for kary reduction.  */
  if (vsize >= GCD_ACCEL_THRESHOLD)
    {
      unsigned long int vbitsize, d;
      mp_ptr orig_up = up;
      mp_size_t orig_usize = usize;
      mp_ptr anchor_up = (mp_ptr) TMP_ALLOC ((usize + 2) * BYTES_PER_MP_LIMB);

      MPN_COPY (anchor_up, orig_up, usize);
      up = anchor_up;

      count_leading_zeros (d, up[usize - 1]);
      d -= GMP_NAIL_BITS;
      d = usize * GMP_NUMB_BITS - d;
      count_leading_zeros (vbitsize, vp[vsize - 1]);
      vbitsize -= GMP_NAIL_BITS;
      vbitsize = vsize * GMP_NUMB_BITS - vbitsize;
      ASSERT (d >= vbitsize);
      d = d - vbitsize + 1;

      /* Use bmod reduction to quickly discover whether V divides U.  */
      up[usize++] = 0;                           /* Insert leading zero.  */
      mpn_bdivmod (up, up, usize, vp, vsize, d);

      /* Now skip U/V mod 2^d and any low zero limbs.  */
      d /= GMP_NUMB_BITS, up += d, usize -= d;
      while (usize != 0 && up[0] == 0)
       up++, usize--;

      if (usize == 0)                            /* GCD == ORIG_V.  */
       goto done;

      vp = (mp_ptr) TMP_ALLOC ((vsize + 2) * BYTES_PER_MP_LIMB);
      MPN_COPY (vp, orig_vp, vsize);

      do                                  /* Main loop.  */
       {
         /* mpn_com_n can't be used here because anchor_up and up may
            partially overlap */
         if ((up[usize - 1] & GMP_NUMB_HIGHBIT) != 0)  /* U < 0; take twos' compl. */
           {
             mp_size_t i;
             anchor_up[0] = -up[0] & GMP_NUMB_MASK;
             for (i = 1; i < usize; i++)
              anchor_up[i] = (~up[i] & GMP_NUMB_MASK);
             up = anchor_up;
           }

         MPN_NORMALIZE_NOT_ZERO (up, usize);

         if ((up[0] & 1) == 0)                   /* Result even; remove twos. */
           {
             unsigned int r;
             count_trailing_zeros (r, up[0]);
             mpn_rshift (anchor_up, up, usize, r);
             usize -= (anchor_up[usize - 1] == 0);
           }
         else if (anchor_up != up)
           MPN_COPY_INCR (anchor_up, up, usize);

         MPN_PTR_SWAP (anchor_up,usize, vp,vsize);
         up = anchor_up;

         if (vsize <= 2)           /* Kary can't handle < 2 limbs and  */
           break;                  /* isn't efficient for == 2 limbs.  */

         d = vbitsize;
         count_leading_zeros (vbitsize, vp[vsize - 1]);
         vbitsize -= GMP_NAIL_BITS;
         vbitsize = vsize * GMP_NUMB_BITS - vbitsize;
         d = d - vbitsize + 1;

         if (d > BMOD_THRESHOLD)   /* Bmod reduction.  */
           {
             up[usize++] = 0;
             mpn_bdivmod (up, up, usize, vp, vsize, d);
             d /= GMP_NUMB_BITS, up += d, usize -= d;
           }
         else                      /* Kary reduction.  */
           {
             mp_limb_t bp[2], cp[2];

             /* C <-- V/U mod 2^(2*GMP_NUMB_BITS).  */
             {
              mp_limb_t u_inv, hi, lo;
              modlimb_invert (u_inv, up[0]);
              cp[0] = (vp[0] * u_inv) & GMP_NUMB_MASK;
              umul_ppmm (hi, lo, cp[0], up[0] << GMP_NAIL_BITS);
              lo >>= GMP_NAIL_BITS;
              cp[1] = (vp[1] - hi - cp[0] * up[1]) * u_inv & GMP_NUMB_MASK;
             }

             /* U <-- find_a (C)  *  U.  */
             up[usize] = mpn_mul_1 (up, up, usize, find_a (cp));
             usize++;

             /* B <-- A/C == U/V mod 2^(GMP_NUMB_BITS + 1).
                bp[0] <-- U/V mod 2^GMP_NUMB_BITS and
                bp[1] <-- ( (U - bp[0] * V)/2^GMP_NUMB_BITS ) / V mod 2

                Like V/U above, but simplified because only the low bit of
                bp[1] is wanted. */
             {
              mp_limb_t  v_inv, hi, lo;
              modlimb_invert (v_inv, vp[0]);
              bp[0] = (up[0] * v_inv) & GMP_NUMB_MASK;
              umul_ppmm (hi, lo, bp[0], vp[0] << GMP_NAIL_BITS);
              lo >>= GMP_NAIL_BITS;
              bp[1] = (up[1] + hi + (bp[0] & vp[1])) & 1;
             }

             up[usize++] = 0;
             if (bp[1] != 0)       /* B < 0: U <-- U + (-B)  * V.  */
              {
                 mp_limb_t c = mpn_addmul_1 (up, vp, vsize, -bp[0] & GMP_NUMB_MASK);
                 mpn_add_1 (up + vsize, up + vsize, usize - vsize, c);
              }
             else           /* B >= 0:  U <-- U - B * V.  */
              {
                mp_limb_t b = mpn_submul_1 (up, vp, vsize, bp[0]);
                mpn_sub_1 (up + vsize, up + vsize, usize - vsize, b);
              }

             up += 2, usize -= 2;  /* At least two low limbs are zero.  */
           }

         /* Must remove low zero limbs before complementing.  */
         while (usize != 0 && up[0] == 0)
           up++, usize--;
       }
      while (usize != 0);

      /* Compute GCD (ORIG_V, GCD (ORIG_U, V)).  Binary will execute twice.  */
      up = orig_up, usize = orig_usize;
      binary_gcd_ctr = 2;
    }
  else
    binary_gcd_ctr = 1;

  /* Finish up with the binary algorithm.  Executes once or twice.  */
  for ( ; binary_gcd_ctr--; up = orig_vp, usize = orig_vsize)
    {
      if (usize > 2)        /* First make U close to V in size.  */
       {
         unsigned long int vbitsize, d;
         count_leading_zeros (d, up[usize - 1]);
         d -= GMP_NAIL_BITS;
         d = usize * GMP_NUMB_BITS - d;
         count_leading_zeros (vbitsize, vp[vsize - 1]);
         vbitsize -= GMP_NAIL_BITS;
         vbitsize = vsize * GMP_NUMB_BITS - vbitsize;
         d = d - vbitsize - 1;
         if (d != -(unsigned long int)1 && d > 2)
           {
             mpn_bdivmod (up, up, usize, vp, vsize, d);  /* Result > 0.  */
             d /= (unsigned long int)GMP_NUMB_BITS, up += d, usize -= d;
           }
       }

      /* Start binary GCD.  */
      do
       {
         mp_size_t zeros;

         /* Make sure U is odd.  */
         MPN_NORMALIZE (up, usize);
         while (up[0] == 0)
           up += 1, usize -= 1;
         if ((up[0] & 1) == 0)
           {
             unsigned int r;
             count_trailing_zeros (r, up[0]);
             mpn_rshift (up, up, usize, r);
             usize -= (up[usize - 1] == 0);
           }

         /* Keep usize >= vsize.  */
         if (usize < vsize)
           MPN_PTR_SWAP (up, usize, vp, vsize);

         if (usize <= 2)                         /* Double precision. */
           {
             if (vsize == 1)
              vp[0] = mpn_gcd_1 (up, usize, vp[0]);
             else
              vsize = gcd_2 (vp, up);
             break;                              /* Binary GCD done.  */
           }

         /* Count number of low zero limbs of U - V.  */
         for (zeros = 0; up[zeros] == vp[zeros] && ++zeros != vsize; )
           continue;

         /* If U < V, swap U and V; in any case, subtract V from U.  */
         if (zeros == vsize)                            /* Subtract done.  */
           up += zeros, usize -= zeros;
         else if (usize == vsize)
           {
             mp_size_t size = vsize;
             do
              size--;
             while (up[size] == vp[size]);
             if (up[size] < vp[size])                   /* usize == vsize.  */
              MP_PTR_SWAP (up, vp);
             up += zeros, usize = size + 1 - zeros;
             mpn_sub_n (up, up, vp + zeros, usize);
           }
         else
           {
             mp_size_t size = vsize - zeros;
             up += zeros, usize -= zeros;
             if (mpn_sub_n (up, up, vp + zeros, size))
              {
                while (up[size] == 0)                   /* Propagate borrow. */
                  up[size++] = -(mp_limb_t)1;
                up[size] -= 1;
              }
           }
       }
      while (usize);                             /* End binary GCD.  */
    }

done:
  if (vp != gp)
    MPN_COPY_INCR (gp, vp, vsize);
  TMP_FREE (marker);
  return vsize;
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_gcd_1 ( mp_srcptr  up,
mp_size_t  size,
mp_limb_t  vlimb 
)

Definition at line 4486 of file gmp.c.

{
  mp_limb_t      ulimb;
  unsigned long  zero_bits, u_low_zero_bits;

  ASSERT (size >= 1);
  ASSERT (vlimb != 0);
  /* ASSERT_MPN_NONZERO_P (up, size); */

  ulimb = up[0];

  /* Need vlimb odd for modexact, want it odd to get common zeros. */
  count_trailing_zeros (zero_bits, vlimb);
  vlimb >>= zero_bits;

  if (size > 1)
    {
      /* Must get common zeros before the mod reduction.  If ulimb==0 then
        vlimb already gives the common zeros.  */
      if (ulimb != 0)
       {
         count_trailing_zeros (u_low_zero_bits, ulimb);
         zero_bits = MIN (zero_bits, u_low_zero_bits);
       }

      ulimb = MPN_MOD_OR_MODEXACT_1_ODD (up, size, vlimb);
      if (ulimb == 0)
       goto done;

      goto strip_u_maybe;
    }

  /* size==1, so up[0]!=0 */
  count_trailing_zeros (u_low_zero_bits, ulimb);
  ulimb >>= u_low_zero_bits;
  zero_bits = MIN (zero_bits, u_low_zero_bits);

  /* make u bigger */
  if (vlimb > ulimb)
    MP_LIMB_T_SWAP (ulimb, vlimb);

  /* if u is much bigger than v, reduce using a division rather than
     chipping away at it bit-by-bit */
  if ((ulimb >> 16) > vlimb)
    {
      ulimb %= vlimb;
      if (ulimb == 0)
       goto done;
      goto strip_u_maybe;
    }

  while (ulimb != vlimb)
    {
      ASSERT (ulimb & 1);
      ASSERT (vlimb & 1);

      if (ulimb > vlimb)
       {
         ulimb -= vlimb;
         do
           {
             ulimb >>= 1;
             ASSERT (ulimb != 0);
           strip_u_maybe:
             ;
           }
         while ((ulimb & 1) == 0);
       }
      else /*  vlimb > ulimb.  */
       {
         vlimb -= ulimb;
         do
           {
             vlimb >>= 1;
             ASSERT (vlimb != 0);
           }
         while ((vlimb & 1) == 0);
       }
    }

 done:
  return vlimb << zero_bits;
}

Here is the caller graph for this function:

size_t mpn_get_str ( unsigned char *  str,
int  base,
mp_ptr  up,
mp_size_t  un 
)

Definition at line 2002 of file gmp.c.

{
  mp_ptr powtab_mem, powtab_mem_ptr;
  mp_limb_t big_base;
  size_t digits_in_base;
  powers_t powtab[30];
  int pi;
  mp_size_t n;
  mp_ptr p, t;
  size_t out_len;

  /* Special case zero, as the code below doesn't handle it.  */
  if (un == 0)
    {
      str[0] = 0;
      return 1;
    }

  if (POW2_P (base))
    {
      /* The base is a power of 2.  Convert from most significant end.  */
      mp_limb_t n1, n0;
      int bits_per_digit = __mp_bases[base].big_base;
      int cnt;
      int bit_pos;
      mp_size_t i;
      unsigned char *s = str;

      n1 = up[un - 1];
      count_leading_zeros (cnt, n1);

      /* BIT_POS should be R when input ends in least significant nibble,
        R + bits_per_digit * n when input ends in nth least significant
        nibble. */

      {
       unsigned long bits;

       bits = GMP_NUMB_BITS * un - cnt + GMP_NAIL_BITS;
       cnt = bits % bits_per_digit;
       if (cnt != 0)
         bits += bits_per_digit - cnt;
       bit_pos = bits - (un - 1) * GMP_NUMB_BITS;
      }

      /* Fast loop for bit output.  */
      i = un - 1;
      for (;;)
       {
         bit_pos -= bits_per_digit;
         while (bit_pos >= 0)
           {
             *s++ = (n1 >> bit_pos) & ((1 << bits_per_digit) - 1);
             bit_pos -= bits_per_digit;
           }
         i--;
         if (i < 0)
           break;
         n0 = (n1 << -bit_pos) & ((1 << bits_per_digit) - 1);
         n1 = up[i];
         bit_pos += GMP_NUMB_BITS;
         *s++ = n0 | (n1 >> bit_pos);

         if (!(i & 0xFF)) SCHEME_BIGNUM_USE_FUEL(1);
       }

      *s = 0;

      return s - str;
    }

  /* General case.  The base is not a power of 2.  */

  if (un < GET_STR_PRECOMPUTE_THRESHOLD)
    {
      struct powers ptab[1];
      ptab[0].base = base;
      return mpn_sb_get_str (str, (size_t) 0, up, un, ptab) - str;
    }

  /* Allocate one large block for the powers of big_base.  With the current
     scheme, we need to allocate twice as much as would be possible if a
     minimal set of powers were generated.  */
#define ALLOC_SIZE (2 * un + 30)
 {
   TMP_DECL (marker);
   TMP_MARK (marker);

   powtab_mem = __GMP_ALLOCATE_FUNC_LIMBS (ALLOC_SIZE);
   powtab_mem_ptr = powtab_mem;

  /* Compute a table of powers: big_base^1, big_base^2, big_base^4, ...,
     big_base^(2^k), for k such that the biggest power is between U and
     sqrt(U).  */

  big_base = __mp_bases[base].big_base;
  digits_in_base = __mp_bases[base].chars_per_limb;

  powtab[0].base = base; /* FIXME: hack for getting base to mpn_sb_get_str */
  powtab[1].p = &big_base;
  powtab[1].n = 1;
  powtab[1].digits_in_base = digits_in_base;
  powtab[1].base = base;
  powtab[2].p = &big_base;
  powtab[2].n = 1;
  powtab[2].digits_in_base = digits_in_base;
  powtab[2].base = base;
  n = 1;
  pi = 2;
  p = &big_base;
  for (;;)
    {
      ++pi;
      t = powtab_mem_ptr;
      powtab_mem_ptr += 2 * n;
      mpn_sqr_n (t, p, n);
      n *= 2; n -= t[n - 1] == 0;
      digits_in_base *= 2;
      p = t;
      powtab[pi].p = p;
      powtab[pi].n = n;
      powtab[pi].digits_in_base = digits_in_base;
      powtab[pi].base = base;

      if (2 * n > un)
       break;
    }
  ASSERT_ALWAYS (ALLOC_SIZE > powtab_mem_ptr - powtab_mem);

  /* Using our precomputed powers, now in powtab[], convert our number.  */
  out_len = mpn_dc_get_str (str, 0, up, un, powtab + pi) - str;

  __GMP_FREE_FUNC_LIMBS (powtab_mem, ALLOC_SIZE);
  
  TMP_FREE(marker);
 }

  return out_len;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_kara_mul_n ( mp_ptr  p,
mp_srcptr  a,
mp_srcptr  b,
mp_size_t  n,
mp_ptr  ws 
)

Definition at line 240 of file gmp.c.

{
  mp_limb_t i, sign, w, w0, w1;
  mp_size_t n2;
  mp_srcptr x, y;

  n2 = n >> 1;
  ASSERT (n2 > 0);

  SCHEME_BIGNUM_USE_FUEL(n);

  if (n & 1)
    {
      /* Odd length. */
      mp_size_t n1, n3, nm1;

      n3 = n - n2;

      sign = 0;
      w = a[n2];
      if (w != 0)
       w -= mpn_sub_n (p, a, a + n3, n2);
      else
       {
         i = n2;
         do
           {
             --i;
             w0 = a[i];
             w1 = a[n3+i];
           }
         while (w0 == w1 && i != 0);
         if (w0 < w1)
           {
             x = a + n3;
             y = a;
             sign = 1;
           }
         else
           {
             x = a;
             y = a + n3;
           }
         mpn_sub_n (p, x, y, n2);
       }
      p[n2] = w;

      w = b[n2];
      if (w != 0)
       w -= mpn_sub_n (p + n3, b, b + n3, n2);
      else
       {
         i = n2;
         do 
           {
             --i;
             w0 = b[i]; 
             w1 = b[n3+i];
           }
         while (w0 == w1 && i != 0);
         if (w0 < w1)
           {
             x = b + n3;
             y = b;
             sign ^= 1;
           }
         else
           {
             x = b;
             y = b + n3;
           }
         mpn_sub_n (p + n3, x, y, n2);
       }
      p[n] = w;

      n1 = n + 1;
      if (n2 < KARATSUBA_MUL_THRESHOLD)
       {
         if (n3 < KARATSUBA_MUL_THRESHOLD)
           {
             mpn_mul_basecase (ws, p, n3, p + n3, n3);
             mpn_mul_basecase (p, a, n3, b, n3);
           }
         else
           {
             mpn_kara_mul_n (ws, p, p + n3, n3, ws + n1);
             mpn_kara_mul_n (p, a, b, n3, ws + n1);
           }
         mpn_mul_basecase (p + n1, a + n3, n2, b + n3, n2);
       }
      else
       {
         mpn_kara_mul_n (ws, p, p + n3, n3, ws + n1);
         mpn_kara_mul_n (p, a, b, n3, ws + n1);
         mpn_kara_mul_n (p + n1, a + n3, b + n3, n2, ws + n1);
       }

      if (sign)
       mpn_add_n (ws, p, ws, n1);
      else
       mpn_sub_n (ws, p, ws, n1);

      nm1 = n - 1;
      if (mpn_add_n (ws, p + n1, ws, nm1))
       {
         mp_limb_t x = ws[nm1] + 1;
         ws[nm1] = x;
         if (x == 0)
           ++ws[n];
       }
      if (mpn_add_n (p + n3, p + n3, ws, n1))
       {
         mp_limb_t x;
         i = n1 + n3;
         do
           {
             x = p[i] + 1;
             p[i] = x;
             ++i;
           } while (x == 0);
       }
    }
  else
    {
      /* Even length. */
      mp_limb_t t;

      i = n2;
      do
       {
         --i;
         w0 = a[i];
         w1 = a[n2+i];
       }
      while (w0 == w1 && i != 0);
      sign = 0;
      if (w0 < w1)
       {
         x = a + n2;
         y = a;
         sign = 1;
       }
      else
       {
         x = a;
         y = a + n2;
       }
      mpn_sub_n (p, x, y, n2);

      i = n2;
      do 
       {
         --i;
         w0 = b[i];
         w1 = b[n2+i];
       }
      while (w0 == w1 && i != 0);
      if (w0 < w1)
       {
         x = b + n2;
         y = b;
         sign ^= 1;
       }
      else
       {
         x = b;
         y = b + n2;
       }
      mpn_sub_n (p + n2, x, y, n2);

      /* Pointwise products. */
      if (n2 < KARATSUBA_MUL_THRESHOLD)
       {
         mpn_mul_basecase (ws, p, n2, p + n2, n2);
         mpn_mul_basecase (p, a, n2, b, n2);
         mpn_mul_basecase (p + n, a + n2, n2, b + n2, n2);
       }
      else
       {
         mpn_kara_mul_n (ws, p, p + n2, n2, ws + n);
         mpn_kara_mul_n (p, a, b, n2, ws + n);
         mpn_kara_mul_n (p + n, a + n2, b + n2, n2, ws + n);
       }

      /* Interpolate. */
      if (sign)
       w = mpn_add_n (ws, p, ws, n);
      else
       w = -mpn_sub_n (ws, p, ws, n);
      w += mpn_add_n (ws, p + n, ws, n);
      w += mpn_add_n (p + n2, p + n2, ws, n);
      /* TO DO: could put "if (w) { ... }" here.
       * Less work but badly predicted branch.
       * No measurable difference in speed on Alpha.
       */
      i = n + n2;
      t = p[i] + w;
      p[i] = t;
      if (t < w)
       {
         do
           {
             ++i;
             w = p[i] + 1;
             p[i] = w;
           }
         while (w == 0);
       }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_kara_sqr_n ( mp_ptr  p,
mp_srcptr  a,
mp_size_t  n,
mp_ptr  ws 
)

Definition at line 461 of file gmp.c.

{
  mp_limb_t i, sign, w, w0, w1;
  mp_size_t n2;
  mp_srcptr x, y;

  n2 = n >> 1;
  ASSERT (n2 > 0);

  SCHEME_BIGNUM_USE_FUEL(n);

  if (n & 1)
    {
      /* Odd length. */
      mp_size_t n1, n3, nm1;

      n3 = n - n2;

      sign = 0;
      w = a[n2];
      if (w != 0)
       w -= mpn_sub_n (p, a, a + n3, n2);
      else
       {
         i = n2;
         do
           {
             --i;
             w0 = a[i];
             w1 = a[n3+i];
           }
         while (w0 == w1 && i != 0);
         if (w0 < w1)
           {
             x = a + n3;
             y = a;
             sign = 1;
           }
         else
           {
             x = a;
             y = a + n3;
           }
         mpn_sub_n (p, x, y, n2);
       }
      p[n2] = w;

      w = a[n2];
      if (w != 0)
       w -= mpn_sub_n (p + n3, a, a + n3, n2);
      else
       {
         i = n2;
         do 
           {
             --i;
             w0 = a[i]; 
             w1 = a[n3+i];
           }
         while (w0 == w1 && i != 0);
         if (w0 < w1)
           {
             x = a + n3;
             y = a;
             sign ^= 1;
           }
         else
           {
             x = a;
             y = a + n3;
           }
         mpn_sub_n (p + n3, x, y, n2);
       }
      p[n] = w;

      n1 = n + 1;
      if (n2 < KARATSUBA_SQR_THRESHOLD)
       {
         if (n3 < KARATSUBA_SQR_THRESHOLD)
           {
             mpn_sqr_basecase (ws, p, n3);
             mpn_sqr_basecase (p, a, n3);
           }
         else
           {
             mpn_kara_sqr_n (ws, p, n3, ws + n1);
             mpn_kara_sqr_n (p, a, n3, ws + n1);
           }
         mpn_sqr_basecase (p + n1, a + n3, n2);
       }
      else
       {
         mpn_kara_sqr_n (ws, p, n3, ws + n1);
         mpn_kara_sqr_n (p, a, n3, ws + n1);
         mpn_kara_sqr_n (p + n1, a + n3, n2, ws + n1);
       }

      if (sign)
       mpn_add_n (ws, p, ws, n1);
      else
       mpn_sub_n (ws, p, ws, n1);

      nm1 = n - 1;
      if (mpn_add_n (ws, p + n1, ws, nm1))
       {
         mp_limb_t x = ws[nm1] + 1;
         ws[nm1] = x;
         if (x == 0)
           ++ws[n];
       }
      if (mpn_add_n (p + n3, p + n3, ws, n1))
       {
         mp_limb_t x;
         i = n1 + n3;
         do
           {
             x = p[i] + 1;
             p[i] = x;
             ++i;
           } while (x == 0);
       }
    }
  else
    {
      /* Even length. */
      mp_limb_t t;

      i = n2;
      do
       {
         --i;
         w0 = a[i];
         w1 = a[n2+i];
       }
      while (w0 == w1 && i != 0);
      sign = 0;
      if (w0 < w1)
       {
         x = a + n2;
         y = a;
         sign = 1;
       }
      else
       {
         x = a;
         y = a + n2;
       }
      mpn_sub_n (p, x, y, n2);

      i = n2;
      do 
       {
         --i;
         w0 = a[i];
         w1 = a[n2+i];
       }
      while (w0 == w1 && i != 0);
      if (w0 < w1)
       {
         x = a + n2;
         y = a;
         sign ^= 1;
       }
      else
       {
         x = a;
         y = a + n2;
       }
      mpn_sub_n (p + n2, x, y, n2);

      /* Pointwise products. */
      if (n2 < KARATSUBA_SQR_THRESHOLD)
       {
         mpn_sqr_basecase (ws, p, n2);
         mpn_sqr_basecase (p, a, n2);
         mpn_sqr_basecase (p + n, a + n2, n2);
       }
      else
       {
         mpn_kara_sqr_n (ws, p, n2, ws + n);
         mpn_kara_sqr_n (p, a, n2, ws + n);
         mpn_kara_sqr_n (p + n, a + n2, n2, ws + n);
       }

      /* Interpolate. */
      if (sign)
       w = mpn_add_n (ws, p, ws, n);
      else
       w = -mpn_sub_n (ws, p, ws, n);
      w += mpn_add_n (ws, p + n, ws, n);
      w += mpn_add_n (p + n2, p + n2, ws, n);
      /* TO DO: could put "if (w) { ... }" here.
       * Less work but badly predicted branch.
       * No measurable difference in speed on Alpha.
       */
      i = n + n2;
      t = p[i] + w;
      p[i] = t;
      if (t < w)
       {
         do
           {
             ++i;
             w = p[i] + 1;
             p[i] = w;
           }
         while (w == 0);
       }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_lshift ( mp_ptr  wp,
mp_srcptr  up,
mp_size_t  usize,
unsigned int  cnt 
)

Definition at line 1611 of file gmp.c.

{
  register mp_limb_t high_limb, low_limb;
  register unsigned sh_1, sh_2;
  register mp_size_t i;
  mp_limb_t retval;

#ifdef DEBUG
  if (usize == 0 || cnt == 0)
    abort ();
#endif

  sh_1 = cnt;
#if 0
  if (sh_1 == 0)
    {
      if (wp != up)
       {
         /* Copy from high end to low end, to allow specified input/output
            overlapping.  */
         for (i = usize - 1; i >= 0; i--)
           wp[i] = up[i];
       }
      return 0;
    }
#endif

  wp += 1;
  sh_2 = BITS_PER_MP_LIMB - sh_1;
  i = usize - 1;
  low_limb = up[i];
  retval = low_limb >> sh_2;
  high_limb = low_limb;
  while (--i >= 0)
    {
      low_limb = up[i];
      wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
      high_limb = low_limb;
    }
  wp[i] = high_limb << sh_1;

  return retval;
}

Here is the caller graph for this function:

Definition at line 4359 of file gmp.c.

{
  mp_size_t  i;
  mp_limb_t  n1, n0, r;
  mp_limb_t  dummy;

  ASSERT (un >= 0);
  ASSERT (d != 0);

  /* Botch: Should this be handled at all?  Rely on callers?
     But note un==0 is currently required by mpz/fdiv_r_ui.c and possibly
     other places.  */
  if (un == 0)
    return 0;

  d <<= GMP_NAIL_BITS;

  if ((d & GMP_LIMB_HIGHBIT) != 0)
    {
      /* High limb is initial remainder, possibly with one subtract of
        d to get r<d.  */
      r = up[un - 1] << GMP_NAIL_BITS;
      if (r >= d)
       r -= d;
      r >>= GMP_NAIL_BITS;
      un--;
      if (un == 0)
       return r;

      if (BELOW_THRESHOLD (un, MOD_1_NORM_THRESHOLD))
       {
       plain:
         for (i = un - 1; i >= 0; i--)
           {
             n0 = up[i] << GMP_NAIL_BITS;
             udiv_qrnnd (dummy, r, r, n0, d);
             r >>= GMP_NAIL_BITS;
           }
         return r;
       }
      else
       {
         mp_limb_t  inv;
         invert_limb (inv, d);
         for (i = un - 1; i >= 0; i--)
           {
             n0 = up[i] << GMP_NAIL_BITS;
             udiv_qrnnd_preinv (dummy, r, r, n0, d, inv);
             r >>= GMP_NAIL_BITS;
           }
         return r;
       }
    }
  else
    {
      int norm;

      /* Skip a division if high < divisor.  Having the test here before
        normalizing will still skip as often as possible.  */
      r = up[un - 1] << GMP_NAIL_BITS;
      if (r < d)
       {
         r >>= GMP_NAIL_BITS;
         un--;
         if (un == 0)
           return r;
       }
      else
       r = 0;

      /* If udiv_qrnnd doesn't need a normalized divisor, can use the simple
        code above. */
      if (! UDIV_NEEDS_NORMALIZATION
         && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD))
       goto plain;

      count_leading_zeros (norm, d);
      d <<= norm;

      n1 = up[un - 1] << GMP_NAIL_BITS;
      r = (r << norm) | (n1 >> (GMP_LIMB_BITS - norm));

      if (UDIV_NEEDS_NORMALIZATION
         && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD))
       {
         for (i = un - 2; i >= 0; i--)
           {
             n0 = up[i] << GMP_NAIL_BITS;
             udiv_qrnnd (dummy, r, r,
                       (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)),
                       d);
             r >>= GMP_NAIL_BITS;
             n1 = n0;
           }
         udiv_qrnnd (dummy, r, r, n1 << norm, d);
         r >>= GMP_NAIL_BITS;
         return r >> norm;
       }
      else
       {
         mp_limb_t inv;
         invert_limb (inv, d);

         for (i = un - 2; i >= 0; i--)
           {
             n0 = up[i] << GMP_NAIL_BITS;
             udiv_qrnnd_preinv (dummy, r, r,
                             (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)),
                             d, inv);
             r >>= GMP_NAIL_BITS;
             n1 = n0;
           }
         udiv_qrnnd_preinv (dummy, r, r, n1 << norm, d, inv);
         r >>= GMP_NAIL_BITS;
         return r >> norm;
       }
    }
}
mp_limb_t mpn_mul ( mp_ptr  prodp,
mp_srcptr  up,
mp_size_t  un,
mp_srcptr  vp,
mp_size_t  vn 
)

Definition at line 4150 of file gmp.c.

{
  mp_size_t l;
  mp_limb_t c;

  if (up == vp && un == vn)
    {
      mpn_sqr_n (prodp, up, un);
      return prodp[2 * un - 1];
    }

  if (vn < KARATSUBA_MUL_THRESHOLD)
    { /* long multiplication */
      mpn_mul_basecase (prodp, up, un, vp, vn);
      return prodp[un + vn - 1];
    }

  mpn_mul_n (prodp, up, vp, vn);
  if (un != vn)
    { mp_limb_t t;
      mp_ptr ws;
      TMP_DECL (marker);
      TMP_MARK (marker);

      prodp += vn;
      l = vn;
      up += vn;
      un -= vn;

      if (un < vn) 
       {
         /* Swap u's and v's. */
          MPN_SRCPTR_SWAP (up,un, vp,vn);
       }

      ws = (mp_ptr) TMP_ALLOC (((vn >= KARATSUBA_MUL_THRESHOLD ? vn : un) + vn)
                            * BYTES_PER_MP_LIMB);

      t = 0;
      while (vn >= KARATSUBA_MUL_THRESHOLD)
       {
         mpn_mul_n (ws, up, vp, vn);
         if (l <= 2*vn) 
           {
             t += mpn_add_n (prodp, prodp, ws, l);
             if (l != 2*vn)
              {
                t = mpn_add_1 (prodp + l, ws + l, 2*vn - l, t);
                l = 2*vn;
              }
           }
         else
           {
             c = mpn_add_n (prodp, prodp, ws, 2*vn);
             t += mpn_add_1 (prodp + 2*vn, prodp + 2*vn, l - 2*vn, c);
           }
         prodp += vn;
         l -= vn;
         up += vn;
         un -= vn;
         if (un < vn) 
           {
             /* Swap u's and v's. */
              MPN_SRCPTR_SWAP (up,un, vp,vn);
           }
       }

      if (vn)
       {
         mpn_mul_basecase (ws, up, un, vp, vn);
         if (l <= un + vn) 
           {
             t += mpn_add_n (prodp, prodp, ws, l);
             if (l != un + vn)
              t = mpn_add_1 (prodp + l, ws + l, un + vn - l, t);
           } 
         else
           {
             c = mpn_add_n (prodp, prodp, ws, un + vn);
             t += mpn_add_1 (prodp + un + vn, prodp + un + vn, l - un - vn, c);
           }
       }

    TMP_FREE (marker);
  }
  return prodp[un + vn - 1];
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_mul_1 ( mp_ptr  res_ptr,
mp_srcptr  s1_ptr,
mp_size_t  s1_size,
mp_limb_t  s2_limb 
)

Definition at line 3923 of file gmp.c.

{
  register mp_limb_t cy_limb;
  register mp_size_t j;
  register mp_limb_t prod_high, prod_low;

  SCHEME_BIGNUM_USE_FUEL(s1_size);

  /* The loop counter and index J goes from -S1_SIZE to -1.  This way
     the loop becomes faster.  */
  j = -s1_size;

  /* Offset the base pointers to compensate for the negative indices.  */
  s1_ptr -= j;
  res_ptr -= j;

  cy_limb = 0;
  do
    {
      umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);

      prod_low += cy_limb;
      cy_limb = (prod_low < cy_limb) + prod_high;

      res_ptr[j] = prod_low;
    }
  while (++j != 0);

  return cy_limb;
}

Here is the caller graph for this function:

void mpn_mul_basecase ( mp_ptr  prodp,
mp_srcptr  up,
mp_size_t  usize,
mp_srcptr  vp,
mp_size_t  vsize 
)

Definition at line 3743 of file gmp.c.

{
  /* We first multiply by the low order one or two limbs, as the result can
     be stored, not added, to PROD.  We also avoid a loop for zeroing this
     way.  */
#if HAVE_NATIVE_mpn_mul_2
  if (vsize >= 2)
    {
      prodp[usize + 1] = mpn_mul_2 (prodp, up, usize, vp[0], vp[1]);
      prodp += 2, vp += 2, vsize -= 2;
    }
  else
    {
      prodp[usize] = mpn_mul_1 (prodp, up, usize, vp[0]);
      return;
    }
#else
  prodp[usize] = mpn_mul_1 (prodp, up, usize, vp[0]);
  prodp += 1, vp += 1, vsize -= 1;
#endif

#if HAVE_NATIVE_mpn_addmul_2
  while (vsize >= 2)
    {
      prodp[usize + 1] = mpn_addmul_2 (prodp, up, usize, vp[0], vp[1]);
      prodp += 2, vp += 2, vsize -= 2;
    }
  if (vsize != 0)
    prodp[usize] = mpn_addmul_1 (prodp, up, usize, vp[0]);
#else
  /* For each iteration in the loop, multiply U with one limb from V, and
     add the result to PROD.  */
  while (vsize != 0)
    {
      prodp[usize] = mpn_addmul_1 (prodp, up, usize, vp[0]);
      prodp += 1, vp += 1, vsize -= 1;
    }
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_mul_n ( mp_ptr  p,
mp_srcptr  a,
mp_srcptr  b,
mp_size_t  n 
)

Definition at line 1481 of file gmp.c.

{
  if (n < KARATSUBA_MUL_THRESHOLD)
    mpn_mul_basecase (p, a, n, b, n);
  else if (n < TOOM3_MUL_THRESHOLD)
    {
      /* Allocate workspace of fixed size on stack: fast! */
#if TUNE_PROGRAM_BUILD
      mp_limb_t ws[2 * (TOOM3_MUL_THRESHOLD_LIMIT-1) + 2 * BITS_PER_MP_LIMB];
#else
      mp_limb_t ws[2 * (TOOM3_MUL_THRESHOLD-1) + 2 * BITS_PER_MP_LIMB];
#endif
      mpn_kara_mul_n (p, a, b, n, ws);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else if (n < FFT_MUL_THRESHOLD)
#else
  else
#endif
    {
      /* Use workspace of unknown size in heap, as stack space may
       * be limited.  Since n is at least TOOM3_MUL_THRESHOLD, the
       * multiplication will take much longer than malloc()/free().  */
      mp_limb_t wsLen, *ws;
      TMP_DECL (marker);
      TMP_MARK (marker);

      wsLen = 2 * n + 3 * BITS_PER_MP_LIMB;
      ws = (mp_ptr) TMP_ALLOC ((size_t) wsLen * sizeof (mp_limb_t));
      mpn_toom3_mul_n (p, a, b, n, ws);
      TMP_FREE (marker);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else
    {
      mpn_mul_fft_full (p, a, n, b, n);      
    }
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_rshift ( mp_ptr  wp,
mp_srcptr  up,
mp_size_t  usize,
unsigned int  cnt 
)

Definition at line 1544 of file gmp.c.

{
  register mp_limb_t high_limb, low_limb;
  register unsigned sh_1, sh_2;
  register mp_size_t i;
  mp_limb_t retval;

#ifdef DEBUG
  if (usize == 0 || cnt == 0)
    abort ();
#endif

  sh_1 = cnt;

#if 0
  if (sh_1 == 0)
    {
      if (wp != up)
       {
         /* Copy from low end to high end, to allow specified input/output
            overlapping.  */
         for (i = 0; i < usize; i++)
           wp[i] = up[i];
       }
      return usize;
    }
#endif

  wp -= 1;
  sh_2 = BITS_PER_MP_LIMB - sh_1;
  high_limb = up[0];
  retval = high_limb << sh_2;
  low_limb = high_limb;

  for (i = 1; i < usize; i++)
    {
      high_limb = up[i];
      wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
      low_limb = high_limb;
    }
  wp[i] = low_limb >> sh_1;

  return retval;
}

Here is the caller graph for this function:

mp_limb_t mpn_sb_divrem_mn ( mp_ptr  qp,
mp_ptr  np,
mp_size_t  nsize,
mp_srcptr  dp,
mp_size_t  dsize 
)

Definition at line 3240 of file gmp.c.

{
  mp_limb_t most_significant_q_limb = 0;
  mp_size_t i;
  mp_limb_t dx, d1, n0;
  mp_limb_t dxinv = 0;
  int have_preinv;

  ASSERT_ALWAYS (dsize > 2);

  np += nsize - dsize;
  dx = dp[dsize - 1];
  d1 = dp[dsize - 2];
  n0 = np[dsize - 1];

  if (n0 >= dx)
    {
      if (n0 > dx || mpn_cmp (np, dp, dsize - 1) >= 0)
       {
         mpn_sub_n (np, np, dp, dsize);
         most_significant_q_limb = 1;
       }
    }

  /* If multiplication is much faster than division, preinvert the
     most significant divisor limb before entering the loop.  */
  if (PREINVERT_VIABLE)
    {
      have_preinv = 0;
      if ((UDIV_TIME - (2 * UMUL_TIME + 6)) * (nsize - dsize) > UDIV_TIME)
       {
         invert_limb (dxinv, dx);
         have_preinv = 1;
       }
    }

  for (i = nsize - dsize - 1; i >= 0; i--)
    {
      mp_limb_t q;
      mp_limb_t nx;
      mp_limb_t cy_limb;

      nx = np[dsize - 1];
      np--;

      if (nx == dx)
       {
         /* This might over-estimate q, but it's probably not worth
            the extra code here to find out.  */
         q = ~(mp_limb_t) 0;

#if 1
         cy_limb = mpn_submul_1 (np, dp, dsize, q);
#else
         /* This should be faster on many machines */
         cy_limb = mpn_sub_n (np + 1, np + 1, dp, dsize);
         cy = mpn_add_n (np, np, dp, dsize);
         np[dsize] += cy;
#endif

         if (nx != cy_limb)
           {
             mpn_add_n (np, np, dp, dsize);
             q--;
           }

         qp[i] = q;
       }
      else
       {
         mp_limb_t rx, r1, r0, p1, p0;

          /* "workaround" avoids a problem with gcc 2.7.2.3 i386 register
             usage when np[dsize-1] is used in an asm statement like
             umul_ppmm in udiv_qrnnd_preinv.  The symptom is seg faults due
             to registers being clobbered.  gcc 2.95 i386 doesn't have the
             problem. */
          {
            mp_limb_t  workaround = np[dsize - 1];
            if (PREINVERT_VIABLE && have_preinv)
              udiv_qrnnd_preinv (q, r1, nx, workaround, dx, dxinv);
            else
              udiv_qrnnd (q, r1, nx, workaround, dx);
          }
         umul_ppmm (p1, p0, d1, q);

         r0 = np[dsize - 2];
         rx = 0;
         if (r1 < p1 || (r1 == p1 && r0 < p0))
           {
             p1 -= p0 < d1;
             p0 -= d1;
             q--;
             r1 += dx;
             rx = r1 < dx;
           }

         p1 += r0 < p0;     /* cannot carry! */
         rx -= r1 < p1;     /* may become 11..1 if q is still too large */
         r1 -= p1;
         r0 -= p0;

         cy_limb = mpn_submul_1 (np, dp, dsize - 2, q);

         {
           mp_limb_t cy1, cy2;
           cy1 = r0 < cy_limb;
           r0 -= cy_limb;
           cy2 = r1 < cy1;
           r1 -= cy1;
           np[dsize - 1] = r1;
           np[dsize - 2] = r0;
           if (cy2 != rx)
             {
              mpn_add_n (np, np, dp, dsize);
              q--;
             }
         }
         qp[i] = q;
       }
    }

  /* ______ ______ ______
    |__rx__|__r1__|__r0__|         partial remainder
           ______ ______
        - |__p1__|__p0__|          partial product to subtract
           ______ ______
        - |______|cylimb|          

     rx is -1, 0 or 1.  If rx=1, then q is correct (it should match
     carry out).  If rx=-1 then q is too large.  If rx=0, then q might
     be too large, but it is most likely correct.
  */

  return most_significant_q_limb;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static unsigned char* mpn_sb_get_str ( unsigned char *  str,
size_t  len,
mp_ptr  up,
mp_size_t  un,
const powers_t *  powtab 
) [static]

Definition at line 1789 of file gmp.c.

{
  mp_limb_t rl, ul;
  unsigned char *s;
  int base;
  size_t l;
  /* Allocate memory for largest possible string, given that we only get here
     for operands with un < GET_STR_PRECOMPUTE_THRESHOLD and that the smallest
     base is 3.  7/11 is an approximation to 1/log2(3).  */
#if TUNE_PROGRAM_BUILD
#define BUF_ALLOC (GET_STR_THRESHOLD_LIMIT * BITS_PER_MP_LIMB * 7 / 11)
#else
#define BUF_ALLOC (GET_STR_PRECOMPUTE_THRESHOLD * BITS_PER_MP_LIMB * 7 / 11)
#endif
  unsigned char buf[BUF_ALLOC];
#if TUNE_PROGRAM_BUILD
  mp_limb_t rp[GET_STR_THRESHOLD_LIMIT];
#else
  mp_limb_t rp[GET_STR_PRECOMPUTE_THRESHOLD];
#endif

  base = powtab->base;
  if (base == 10)
    {
      /* Special case code for base==10 so that the compiler has a chance to
        optimize things.  */

      MPN_COPY (rp + 1, up, un);

      s = buf + BUF_ALLOC;
      while (un > 1)
       {
         int i;
         mp_limb_t frac, digit;
         MPN_DIVREM_OR_PREINV_DIVREM_1 (rp, (mp_size_t) 1, rp + 1, un,
                                    MP_BASES_BIG_BASE_10,
                                    MP_BASES_BIG_BASE_INVERTED_10,
                                    MP_BASES_NORMALIZATION_STEPS_10);
         un -= rp[un] == 0;
         frac = (rp[0] + 1) << GMP_NAIL_BITS;
         s -= MP_BASES_CHARS_PER_LIMB_10;
#if HAVE_HOST_CPU_FAMILY_x86
         /* The code below turns out to be a bit slower for x86 using gcc.
            Use plain code.  */
         i = MP_BASES_CHARS_PER_LIMB_10;
         do
           {
             umul_ppmm (digit, frac, frac, 10);
             *s++ = digit;
           }
         while (--i);
#else
         /* Use the fact that 10 in binary is 1010, with the lowest bit 0.
            After a few umul_ppmm, we will have accumulated enough low zeros
            to use a plain multiply.  */
         if (MP_BASES_NORMALIZATION_STEPS_10 == 0)
           {
             umul_ppmm (digit, frac, frac, 10);
             *s++ = digit;
           }
         if (MP_BASES_NORMALIZATION_STEPS_10 <= 1)
           {
             umul_ppmm (digit, frac, frac, 10);
             *s++ = digit;
           }
         if (MP_BASES_NORMALIZATION_STEPS_10 <= 2)
           {
             umul_ppmm (digit, frac, frac, 10);
             *s++ = digit;
           }
         if (MP_BASES_NORMALIZATION_STEPS_10 <= 3)
           {
             umul_ppmm (digit, frac, frac, 10);
             *s++ = digit;
           }
         i = MP_BASES_CHARS_PER_LIMB_10 - (4-MP_BASES_NORMALIZATION_STEPS_10);
         frac = (frac + 0xf) >> 4;
         do
           {
             frac *= 10;
             digit = frac >> (BITS_PER_MP_LIMB - 4);
             *s++ = digit;
             frac &= (~(mp_limb_t) 0) >> 4;
           }
         while (--i);
#endif
         s -= MP_BASES_CHARS_PER_LIMB_10;
       }

      ul = rp[1];
      while (ul != 0)
       {
         udiv_qrnd_unnorm (ul, rl, ul, 10);
         *--s = rl;
       }
    }
  else /* not base 10 */
    {
      unsigned chars_per_limb;
      mp_limb_t big_base, big_base_inverted;
      unsigned normalization_steps;

      chars_per_limb = __mp_bases[base].chars_per_limb;
      big_base = __mp_bases[base].big_base;
      big_base_inverted = __mp_bases[base].big_base_inverted;
      count_leading_zeros (normalization_steps, big_base);

      MPN_COPY (rp + 1, up, un);

      s = buf + BUF_ALLOC;
      while (un > 1)
       {
         int i;
         mp_limb_t frac;
         MPN_DIVREM_OR_PREINV_DIVREM_1 (rp, (mp_size_t) 1, rp + 1, un,
                                    big_base, big_base_inverted,
                                    normalization_steps);
         un -= rp[un] == 0;
         frac = (rp[0] + 1) << GMP_NAIL_BITS;
         s -= chars_per_limb;
         i = chars_per_limb;
         do
           {
             mp_limb_t digit;
             umul_ppmm (digit, frac, frac, base);
             *s++ = digit;
           }
         while (--i);
         s -= chars_per_limb;
       }

      ul = rp[1];
      while (ul != 0)
       {
         udiv_qrnd_unnorm (ul, rl, ul, base);
         *--s = rl;
       }
    }

  l = buf + BUF_ALLOC - s;
  while (l < len)
    {
      *str++ = 0;
      len--;
    }
  while (l != 0)
    {
      *str++ = *s++;
      l--;
    }
  return str;
}

Here is the caller graph for this function:

mp_size_t mpn_set_str ( mp_ptr  rp,
const unsigned char *  str,
size_t  str_len,
int  base 
)

Definition at line 2170 of file gmp.c.

{
  mp_size_t size;
  mp_limb_t big_base;
  int chars_per_limb;
  mp_limb_t res_digit;

  ASSERT (base >= 2);
  ASSERT (base < numberof (__mp_bases));
  ASSERT (str_len >= 1);

  big_base = __mp_bases[base].big_base;
  chars_per_limb = __mp_bases[base].chars_per_limb;

  size = 0;

  if (POW2_P (base))
    {
      /* The base is a power of 2.  Read the input string from least to most
        significant character/digit.  */

      const unsigned char *s;
      int next_bitpos;
      int bits_per_indigit = big_base;

      res_digit = 0;
      next_bitpos = 0;

      for (s = str + str_len - 1; s >= str; s--)
       {
         int inp_digit = *s;

         res_digit |= ((mp_limb_t) inp_digit << next_bitpos) & GMP_NUMB_MASK;
         next_bitpos += bits_per_indigit;
         if (next_bitpos >= GMP_NUMB_BITS)
           {
             rp[size++] = res_digit;
             next_bitpos -= GMP_NUMB_BITS;
             res_digit = inp_digit >> (bits_per_indigit - next_bitpos);
           }

         if (!((long)s & 0xFF)) SCHEME_BIGNUM_USE_FUEL(1);
       }

      if (res_digit != 0)
       rp[size++] = res_digit;
      return size;
    }
  else
    {
      /* General case.  The base is not a power of 2.  */

      if (str_len < SET_STR_THRESHOLD)
       {
         size_t i;
         int j;
         mp_limb_t cy_limb;

         for (i = chars_per_limb; i < str_len; i += chars_per_limb)
           {
             res_digit = *str++;
             if (base == 10)
              { /* This is a common case.
                   Help the compiler to avoid multiplication.  */
                for (j = MP_BASES_CHARS_PER_LIMB_10 - 1; j != 0; j--)
                  res_digit = res_digit * 10 + *str++;
              }
             else
              {
                for (j = chars_per_limb - 1; j != 0; j--)
                  res_digit = res_digit * base + *str++;
              }

             if (size == 0)
              {
                if (res_digit != 0)
                  {
                    rp[0] = res_digit;
                    size = 1;
                  }
              }
             else
              {
#if HAVE_NATIVE_mpn_mul_1c
                cy_limb = mpn_mul_1c (rp, rp, size, big_base, res_digit);
#else
                cy_limb = mpn_mul_1 (rp, rp, size, big_base);
                cy_limb += mpn_add_1 (rp, rp, size, res_digit);
#endif
                if (cy_limb != 0)
                  rp[size++] = cy_limb;
              }
           }

         big_base = base;
         res_digit = *str++;
         if (base == 10)
           { /* This is a common case.
               Help the compiler to avoid multiplication.  */
             for (j = str_len - (i - MP_BASES_CHARS_PER_LIMB_10) - 1; j > 0; j--)
              {
                res_digit = res_digit * 10 + *str++;
                big_base *= 10;
              }
           }
         else
           {
             for (j = str_len - (i - chars_per_limb) - 1; j > 0; j--)
              {
                res_digit = res_digit * base + *str++;
                big_base *= base;
              }
           }

         if (size == 0)
           {
             if (res_digit != 0)
              {
                rp[0] = res_digit;
                size = 1;
              }
           }
         else
           {
#if HAVE_NATIVE_mpn_mul_1c
             cy_limb = mpn_mul_1c (rp, rp, size, big_base, res_digit);
#else
             cy_limb = mpn_mul_1 (rp, rp, size, big_base);
             cy_limb += mpn_add_1 (rp, rp, size, res_digit);
#endif
             if (cy_limb != 0)
              rp[size++] = cy_limb;
           }
         return size;
       }
      else
       {
         /* Sub-quadratic code.  */

         mp_ptr dp;
         mp_size_t dsize;
         mp_ptr xp, tp;
         mp_size_t step;
         mp_size_t i;
         size_t alloc;
         mp_size_t n;
         mp_ptr pow_mem;
         TMP_DECL (marker);
         TMP_MARK (marker);

         alloc = (str_len + chars_per_limb - 1) / chars_per_limb;
         alloc = 2 * alloc;
         dp = __GMP_ALLOCATE_FUNC_LIMBS (alloc);

#if SET_STR_BLOCK_SIZE == 1
         dsize = convert_blocks (dp, str, str_len, base);
#else
         {
           const unsigned char *s;
           mp_ptr ddp = dp;

           s = str + str_len;
           while (s - str >  SET_STR_BLOCK_SIZE * chars_per_limb)
             {
              s -= SET_STR_BLOCK_SIZE * chars_per_limb;
              mpn_set_str (ddp, s, SET_STR_BLOCK_SIZE * chars_per_limb, base);
              ddp += SET_STR_BLOCK_SIZE;
             }
           ddp += mpn_set_str (ddp, str, s - str, base);
           dsize = ddp - dp;
         }
#endif

         /* Allocate space for powers of big_base.  Could trim this in two
            ways:
            1. Only really need 2^ceil(log2(dsize)) bits for the largest
              power.
            2. Only the variable to get the largest power need that much
              memory.  The other variable needs half as much.  Need just
              figure out which of xp and tp will hold the last one.
            Net space savings would be in the range 1/4 to 5/8 of current
            allocation, depending on how close to the next power of 2 that
            dsize is.  */
         pow_mem = __GMP_ALLOCATE_FUNC_LIMBS (2 * alloc);
         xp = pow_mem;
         tp = pow_mem + alloc;

         xp[0] = big_base;
         n = 1;
         step = 1;
#if SET_STR_BLOCK_SIZE != 1
         for (i = SET_STR_BLOCK_SIZE; i > 1; i >>= 1)
           {
             mpn_sqr_n (tp, xp, n);
             n = 2 * n;
             n -= tp[n - 1] == 0;

             step = 2 * step;
             MP_PTR_SWAP (tp, xp);
           }
#endif

         /* Multiply every second limb block, each `step' limbs large by the
            base power currently in xp[], then add this to the adjacent block.
            We thereby convert from dsize blocks in base big_base, to dsize/2
            blocks in base big_base^2, then to dsize/4 blocks in base
            big_base^4, etc, etc.  */

         if (step < dsize)
           {
             for (;;)
              {
                for (i = 0; i < dsize - step; i += 2 * step)
                  {
                    mp_ptr bp = dp + i;
                    mp_size_t m = dsize - i - step;
                    mp_size_t hi;
                    if (n >= m)
                     {
                       mpn_mul (tp, xp, n, bp + step, m);
                       mpn_add (bp, tp, n + m, bp, n);
                       hi = i + n + m;
                       dsize = hi;
                       dsize -= dp[dsize - 1] == 0;
                     }
                    else
                     {
                       mpn_mul_n (tp, xp, bp + step, n);
                       mpn_add (bp, tp, n + n, bp, n);
                     }
                  }

                step = 2 * step;
                if (! (step < dsize))
                  break;

                mpn_sqr_n (tp, xp, n);
                n = 2 * n;
                n -= tp[n - 1] == 0;
                MP_PTR_SWAP (tp, xp);
              }
           }

         MPN_NORMALIZE (dp, dsize);
         MPN_COPY (rp, dp, dsize);
         __GMP_FREE_FUNC_LIMBS (pow_mem, 2 * alloc);
         __GMP_FREE_FUNC_LIMBS (dp, alloc);
         TMP_FREE (marker);
         return dsize;
       }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_sqr_basecase ( mp_ptr  prodp,
mp_srcptr  up,
mp_size_t  n 
)

Definition at line 3797 of file gmp.c.

{
  mp_size_t i;

  ASSERT (n >= 1);
  ASSERT (! MPN_OVERLAP_P (prodp, 2*n, up, n));

  {
    /* N.B.!  We need the superfluous indirection through argh to work around
       a reloader bug in GCC 2.7.*.  */
#if GMP_NAIL_BITS == 0
    mp_limb_t ul, argh;
    ul = up[0];
    umul_ppmm (argh, prodp[0], ul, ul);
    prodp[1] = argh;
#else
    mp_limb_t ul, lpl;
    ul = up[0];
    umul_ppmm (prodp[1], lpl, ul, ul << GMP_NAIL_BITS);
    prodp[0] = lpl >> GMP_NAIL_BITS;
#endif
  }
  if (n > 1)
    {
      mp_limb_t tarr[2 * SQR_KARATSUBA_THRESHOLD];
      mp_ptr tp = tarr;
      mp_limb_t cy;

      /* must fit 2*n limbs in tarr */
      ASSERT (n <= SQR_KARATSUBA_THRESHOLD);

      cy = mpn_mul_1 (tp, up + 1, n - 1, up[0]);
      tp[n - 1] = cy;
      for (i = 2; i < n; i++)
       {
         mp_limb_t cy;
         cy = mpn_addmul_1 (tp + 2 * i - 2, up + i, n - i, up[i - 1]);
         tp[n + i - 2] = cy;
       }
#if HAVE_NATIVE_mpn_sqr_diagonal
      mpn_sqr_diagonal (prodp + 2, up + 1, n - 1);
#else
      for (i = 1; i < n; i++)
       {
#if GMP_NAIL_BITS == 0
         mp_limb_t ul;
         ul = up[i];
         umul_ppmm (prodp[2 * i + 1], prodp[2 * i], ul, ul);
#else
         mp_limb_t ul, lpl;
         ul = up[i];
         umul_ppmm (prodp[2 * i + 1], lpl, ul, ul << GMP_NAIL_BITS);
         prodp[2 * i] = lpl >> GMP_NAIL_BITS;
#endif
       }
#endif
      {
       mp_limb_t cy;
       cy = mpn_lshift (tp, tp, 2 * n - 2, 1);
       cy += mpn_add_n (prodp + 1, prodp + 1, tp, 2 * n - 2);
       prodp[2 * n - 1] += cy;
      }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_sqr_n ( mp_ptr  prodp,
mp_srcptr  up,
mp_size_t  un 
)

Definition at line 4101 of file gmp.c.

{
  if (un < KARATSUBA_SQR_THRESHOLD)
    { /* plain schoolbook multiplication */
      if (un == 0)
       return;
      mpn_sqr_basecase (prodp, up, un);
    }
  else if (un < TOOM3_SQR_THRESHOLD)
    { /* karatsuba multiplication */
      mp_ptr tspace;
      TMP_DECL (marker);
      TMP_MARK (marker);
      tspace = (mp_ptr) TMP_ALLOC (2 * (un + BITS_PER_MP_LIMB) * BYTES_PER_MP_LIMB);
      mpn_kara_sqr_n (prodp, up, un, tspace);
      TMP_FREE (marker);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else if (un < FFT_SQR_THRESHOLD)
#else
  else
#endif
    { /* toom3 multiplication */
      mp_ptr tspace;
      TMP_DECL (marker);
      TMP_MARK (marker);
      tspace = (mp_ptr) TMP_ALLOC (2 * (un + BITS_PER_MP_LIMB) * BYTES_PER_MP_LIMB);
      mpn_toom3_sqr_n (prodp, up, un, tspace);
      TMP_FREE (marker);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else
    {
      /* schoenhage multiplication */
      mpn_mul_fft_full (prodp, up, un, up, un);
    }
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_size_t mpn_sqrtrem ( mp_ptr  sp,
mp_ptr  rp,
mp_srcptr  np,
mp_size_t  nn 
)

Definition at line 3034 of file gmp.c.

{
  mp_limb_t *tp, s0[1], cc, high, rl;
  int c;
  mp_size_t rn, tn;
  TMP_DECL (marker);

  ASSERT (nn >= 0);

  /* If OP is zero, both results are zero.  */
  if (nn == 0)
    return 0;

  ASSERT (np[nn - 1] != 0);
  ASSERT (rp == NULL || MPN_SAME_OR_SEPARATE_P (np, rp, nn));
  ASSERT (rp == NULL || ! MPN_OVERLAP_P (sp, (nn + 1) / 2, rp, nn));
  ASSERT (! MPN_OVERLAP_P (sp, (nn + 1) / 2, np, nn));

  high = np[nn - 1];
  if (nn == 1 && (high & GMP_NUMB_HIGHBIT))
    return mpn_sqrtrem1 (sp, rp, np);
  count_leading_zeros (c, high);
  c -= GMP_NAIL_BITS;

  c = c / 2; /* we have to shift left by 2c bits to normalize {np, nn} */
  tn = (nn + 1) / 2; /* 2*tn is the smallest even integer >= nn */

  TMP_MARK (marker);
  if (nn % 2 != 0 || c > 0)
    {
      tp = TMP_ALLOC_LIMBS (2 * tn);
      tp[0] = 0;          /* needed only when 2*tn > nn, but saves a test */
      if (c != 0)
       mpn_lshift (tp + 2 * tn - nn, np, nn, 2 * c);
      else
       MPN_COPY (tp + 2 * tn - nn, np, nn);
      rl = mpn_dc_sqrtrem (sp, tp, tn);
      /* We have 2^(2k)*N = S^2 + R where k = c + (2tn-nn)*GMP_NUMB_BITS/2,
        thus 2^(2k)*N = (S-s0)^2 + 2*S*s0 - s0^2 + R where s0=S mod 2^k */
      c += (nn % 2) * GMP_NUMB_BITS / 2;         /* c now represents k */
      s0[0] = sp[0] & (((mp_limb_t) 1 << c) - 1);       /* S mod 2^k */
      rl += mpn_addmul_1 (tp, sp, tn, 2 * s0[0]);       /* R = R + 2*s0*S */
      cc = mpn_submul_1 (tp, s0, 1, s0[0]);
      rl -= (tn > 1) ? mpn_sub_1 (tp + 1, tp + 1, tn - 1, cc) : cc;
      mpn_rshift (sp, sp, tn, c);
      tp[tn] = rl;
      if (rp == NULL)
       rp = tp;
      c = c << 1;
      if (c < GMP_NUMB_BITS)
       tn++;
      else
       {
         tp++;
         c -= GMP_NUMB_BITS;
       }
      if (c != 0)
       mpn_rshift (rp, tp, tn, c);
      else
       MPN_COPY_INCR (rp, tp, tn);
      rn = tn;
    }
  else
    {
      if (rp == NULL)
       rp = TMP_ALLOC_LIMBS (nn);
      if (rp != np)
       MPN_COPY (rp, np, nn);
      rn = tn + (rp[tn] = mpn_dc_sqrtrem (sp, rp, tn));
    }

  MPN_NORMALIZE (rp, rn);

  TMP_FREE (marker);
  return rn;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static mp_size_t mpn_sqrtrem1 ( mp_ptr  sp,
mp_ptr  rp,
mp_srcptr  np 
) [static]

Definition at line 2885 of file gmp.c.

{
  mp_limb_t np0, s, r, q, u;
  int prec;

  ASSERT (np[0] >= GMP_NUMB_HIGHBIT / 2);
  ASSERT (GMP_LIMB_BITS >= 16);

  /* first compute a 8-bit approximation from the high 8-bits of np[0] */
  np0 = np[0] << GMP_NAIL_BITS;
  q = np0 >> (GMP_LIMB_BITS - 8);
  /* 2^6 = 64 <= q < 256 = 2^8 */
  s = approx_tab[q - 64];                        /* 128 <= s < 255 */
  r = (np0 >> (GMP_LIMB_BITS - 16)) - s * s;            /* r <= 2*s */
  if (r > 2 * s)
    {
      r -= 2 * s + 1;
      s++;
    }

  prec = 8;
  np0 <<= 2 * prec;
  while (2 * prec < GMP_LIMB_BITS)
    {
      /* invariant: s has prec bits, and r <= 2*s */
      r = (r << prec) + (np0 >> (GMP_LIMB_BITS - prec));
      np0 <<= prec;
      u = 2 * s;
      q = r / u;
      u = r - q * u;
      s = (s << prec) + q;
      u = (u << prec) + (np0 >> (GMP_LIMB_BITS - prec));
      q = q * q;
      r = u - q;
      if (u < q)
       {
         r += 2 * s - 1;
         s --;
       }
      np0 <<= prec;
      prec = 2 * prec;
    }

  ASSERT (2 * prec == GMP_LIMB_BITS); /* GMP_LIMB_BITS must be a power of 2 */

  /* normalize back, assuming GMP_NAIL_BITS is even */
  ASSERT (GMP_NAIL_BITS % 2 == 0);
  sp[0] = s >> HALF_NAIL;
  u = s - (sp[0] << HALF_NAIL); /* s mod 2^HALF_NAIL */
  r += u * ((sp[0] << (HALF_NAIL + 1)) + u);
  r = r >> GMP_NAIL_BITS;

  if (rp != NULL)
    rp[0] = r;
  return r != 0 ? 1 : 0;
}

Here is the caller graph for this function:

static mp_limb_t mpn_sqrtrem2 ( mp_ptr  sp,
mp_ptr  rp,
mp_srcptr  np 
) [static]

Definition at line 2948 of file gmp.c.

{
  mp_limb_t qhl, q, u, np0;
  int cc;

  ASSERT (np[1] >= GMP_NUMB_HIGHBIT / 2);

  np0 = np[0];
  mpn_sqrtrem1 (sp, rp, np + 1);
  qhl = 0;
  while (rp[0] >= sp[0])
    {
      qhl++;
      rp[0] -= sp[0];
    }
  /* now rp[0] < sp[0] < 2^Prec */
  rp[0] = (rp[0] << Prec) + (np0 >> Prec);
  u = 2 * sp[0];
  q = rp[0] / u;
  u = rp[0] - q * u;
  q += (qhl & 1) << (Prec - 1);
  qhl >>= 1; /* if qhl=1, necessary q=0 as qhl*2^Prec + q <= 2^Prec */
  /* now we have (initial rp[0])<<Prec + np0>>Prec = (qhl<<Prec + q) * (2sp[0]) + u */
  sp[0] = ((sp[0] + qhl) << Prec) + q;
  cc = u >> Prec;
  rp[0] = ((u << Prec) & GMP_NUMB_MASK) + (np0 & (((mp_limb_t) 1 << Prec) - 1));
  /* subtract q * q or qhl*2^(2*Prec) from rp */
  cc -= mpn_sub_1 (rp, rp, 1, q * q) + qhl;
  /* now subtract 2*q*2^Prec + 2^(2*Prec) if qhl is set */
  if (cc < 0)
    {
      cc += sp[0] != 0 ? mpn_add_1 (rp, rp, 1, sp[0]) : 1;
      cc += mpn_add_1 (rp, rp, 1, --sp[0]);
    }

  return cc;
}

Here is the call graph for this function:

Here is the caller graph for this function:

mp_limb_t mpn_sub_n ( mp_ptr  res_ptr,
mp_srcptr  s1_ptr,
mp_srcptr  s2_ptr,
mp_size_t  size 
)

Definition at line 125 of file gmp.c.

{
  register mp_limb_t x, y, cy;
  register mp_size_t j;

  /* The loop counter and index J goes from -SIZE to -1.  This way
     the loop becomes faster.  */
  j = -size;

  /* Offset the base pointers to compensate for the negative indices.  */
  s1_ptr -= j;
  s2_ptr -= j;
  res_ptr -= j;

  cy = 0;
  do
    {
      y = s2_ptr[j];
      x = s1_ptr[j];
      y += cy;                     /* add previous carry to subtrahend */
      cy = (y < cy);        /* get out carry from that addition */
      y = x - y;            /* main subtract */
      cy = (y > x) + cy;    /* get out carry from the subtract, combine */
      res_ptr[j] = y;
    }
  while (++j != 0);

  return cy;
}

Here is the caller graph for this function:

mp_limb_t mpn_submul_1 ( mp_ptr  res_ptr,
mp_srcptr  s1_ptr,
mp_size_t  s1_size,
mp_limb_t  s2_limb 
)

Definition at line 3992 of file gmp.c.

{
  register mp_limb_t cy_limb;
  register mp_size_t j;
  register mp_limb_t prod_high, prod_low;
  register mp_limb_t x;

  SCHEME_BIGNUM_USE_FUEL(s1_size);

  /* The loop counter and index J goes from -SIZE to -1.  This way
     the loop becomes faster.  */
  j = -s1_size;

  /* Offset the base pointers to compensate for the negative indices.  */
  res_ptr -= j;
  s1_ptr -= j;

  cy_limb = 0;
  do
    {
      umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);

      prod_low += cy_limb;
      cy_limb = (prod_low < cy_limb) + prod_high;

      x = res_ptr[j];
      prod_low = x - prod_low;
      cy_limb += (prod_low > x);
      res_ptr[j] = prod_low;
    }
  while (++j != 0);

  return cy_limb;
}

Here is the caller graph for this function:

void mpn_tdiv_qr ( mp_ptr  qp,
mp_ptr  rp,
mp_size_t  qxn,
mp_srcptr  np,
mp_size_t  nn,
mp_srcptr  dp,
mp_size_t  dn 
)

Definition at line 2498 of file gmp.c.

{
  /* FIXME:
     1. qxn
     2. pass allocated storage in additional parameter?
  */
#ifdef DEBUG
  if (qxn != 0)
    abort ();
#endif

  switch (dn)
    {
    case 0:
#if 0
      DIVIDE_BY_ZERO;
#endif
      return;

    case 1:
      {
       rp[0] = mpn_divmod_1 (qp, np, nn, dp[0]);
       return;
      }

    case 2:
      {
       int cnt;
       mp_ptr n2p, d2p;
       mp_limb_t qhl, cy;
       TMP_DECL (marker);
       TMP_MARK (marker);
       count_leading_zeros (cnt, dp[dn - 1]);
       if (cnt != 0)
         {
           d2p = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
           mpn_lshift (d2p, dp, dn, cnt);
           n2p = (mp_ptr) TMP_ALLOC ((nn + 1) * BYTES_PER_MP_LIMB);
           cy = mpn_lshift (n2p, np, nn, cnt);
           n2p[nn] = cy;
           qhl = mpn_divrem_2 (qp, 0L, n2p, nn + (cy != 0), d2p);
           if (cy == 0)
             qp[nn - 2] = qhl;     /* always store nn-dn+1 quotient limbs */
         }
       else
         {
           d2p = (mp_ptr) dp;
           n2p = (mp_ptr) TMP_ALLOC (nn * BYTES_PER_MP_LIMB);
           MPN_COPY (n2p, np, nn);
           qhl = mpn_divrem_2 (qp, 0L, n2p, nn, d2p);
           qp[nn - 2] = qhl;       /* always store nn-dn+1 quotient limbs */
         }

       if (cnt != 0)
         mpn_rshift (rp, n2p, dn, cnt);
       else
         MPN_COPY (rp, n2p, dn);
       TMP_FREE (marker);
       return;
      }

    default:
      {
       int adjust;
       TMP_DECL (marker);
       TMP_MARK (marker);
       adjust = np[nn - 1] >= dp[dn - 1]; /* conservative tests for quotient size */
       if (nn + adjust >= 2 * dn)
         {
           mp_ptr n2p, d2p;
           mp_limb_t cy;
           int cnt;
           count_leading_zeros (cnt, dp[dn - 1]);

           qp[nn - dn] = 0;               /* zero high quotient limb */
           if (cnt != 0)                  /* normalize divisor if needed */
             {
              d2p = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
              mpn_lshift (d2p, dp, dn, cnt);
              n2p = (mp_ptr) TMP_ALLOC ((nn + 1) * BYTES_PER_MP_LIMB);
              cy = mpn_lshift (n2p, np, nn, cnt);
              n2p[nn] = cy;
              nn += adjust;
             }
           else
             {
              d2p = (mp_ptr) dp;
              n2p = (mp_ptr) TMP_ALLOC ((nn + 1) * BYTES_PER_MP_LIMB);
              MPN_COPY (n2p, np, nn);
              n2p[nn] = 0;
              nn += adjust;
             }

           if (dn == 2)
             mpn_divrem_2 (qp, 0L, n2p, nn, d2p);
           else if (dn < BZ_THRESHOLD)
             mpn_sb_divrem_mn (qp, n2p, nn, d2p, dn);
           else
             {
              /* Perform 2*dn / dn limb divisions as long as the limbs
                 in np last.  */
              mp_ptr q2p = qp + nn - 2 * dn;
              n2p += nn - 2 * dn;
              mpn_bz_divrem_n (q2p, n2p, d2p, dn);
              nn -= dn;
              while (nn >= 2 * dn)
                {
                  mp_limb_t c;
                  q2p -= dn;  n2p -= dn;
                  c = mpn_bz_divrem_n (q2p, n2p, d2p, dn);
                  ASSERT_ALWAYS (c == 0);
                  nn -= dn;
                }

              if (nn != dn)
                {
                  n2p -= nn - dn;
                  /* In theory, we could fall out to the cute code below
                     since we now have exactly the situation that code
                     is designed to handle.  We botch this badly and call
                     the basic mpn_sb_divrem_mn!  */
                  if (dn == 2)
                    mpn_divrem_2 (qp, 0L, n2p, nn, d2p);
                  else
                    mpn_sb_divrem_mn (qp, n2p, nn, d2p, dn);
                }
             }


           if (cnt != 0)
             mpn_rshift (rp, n2p, dn, cnt);
           else
             MPN_COPY (rp, n2p, dn);
           TMP_FREE (marker);
           return;
         }

       /* When we come here, the numerator/partial remainder is less
          than twice the size of the denominator.  */

         {
           /* Problem:

              Divide a numerator N with nn limbs by a denominator D with dn
              limbs forming a quotient of nn-dn+1 limbs.  When qn is small
              compared to dn, conventional division algorithms perform poorly.
              We want an algorithm that has an expected running time that is
              dependent only on qn.  It is assumed that the most significant
              limb of the numerator is smaller than the most significant limb
              of the denominator.

              Algorithm (very informally stated):

              1) Divide the 2 x qn most significant limbs from the numerator
                by the qn most significant limbs from the denominator.  Call
                the result qest.  This is either the correct quotient, but
                might be 1 or 2 too large.  Compute the remainder from the
                division.  (This step is implemented by a mpn_divrem call.)

              2) Is the most significant limb from the remainder < p, where p
                is the product of the most significant limb from the quotient
                and the next(d).  (Next(d) denotes the next ignored limb from
                the denominator.)  If it is, decrement qest, and adjust the
                remainder accordingly.

              3) Is the remainder >= qest?  If it is, qest is the desired
                quotient.  The algorithm terminates.

              4) Subtract qest x next(d) from the remainder.  If there is
                borrow out, decrement qest, and adjust the remainder
                accordingly.

              5) Skip one word from the denominator (i.e., let next(d) denote
                the next less significant limb.  */

           mp_size_t qn;
           mp_ptr n2p, d2p;
           mp_ptr tp;
           mp_limb_t cy;
           mp_size_t in, rn;
           mp_limb_t quotient_too_large;
           int cnt;

           qn = nn - dn;
           qp[qn] = 0;                           /* zero high quotient limb */
           qn += adjust;                  /* qn cannot become bigger */

           if (qn == 0)
             {
              MPN_COPY (rp, np, dn);
              TMP_FREE (marker);
              return;
             }

           in = dn - qn;           /* (at least partially) ignored # of limbs in ops */
           /* Normalize denominator by shifting it to the left such that its
              most significant bit is set.  Then shift the numerator the same
              amount, to mathematically preserve quotient.  */
           count_leading_zeros (cnt, dp[dn - 1]);
           if (cnt != 0)
             {
              d2p = (mp_ptr) TMP_ALLOC (qn * BYTES_PER_MP_LIMB);

              mpn_lshift (d2p, dp + in, qn, cnt);
              d2p[0] |= dp[in - 1] >> (BITS_PER_MP_LIMB - cnt);

              n2p = (mp_ptr) TMP_ALLOC ((2 * qn + 1) * BYTES_PER_MP_LIMB);
              cy = mpn_lshift (n2p, np + nn - 2 * qn, 2 * qn, cnt);
              if (adjust)
                {
                  n2p[2 * qn] = cy;
                  n2p++;
                }
              else
                {
                  n2p[0] |= np[nn - 2 * qn - 1] >> (BITS_PER_MP_LIMB - cnt);
                }
             }
           else
             {
              d2p = (mp_ptr) dp + in;

              n2p = (mp_ptr) TMP_ALLOC ((2 * qn + 1) * BYTES_PER_MP_LIMB);
              MPN_COPY (n2p, np + nn - 2 * qn, 2 * qn);
              if (adjust)
                {
                  n2p[2 * qn] = 0;
                  n2p++;
                }
             }

           /* Get an approximate quotient using the extracted operands.  */
           if (qn == 1)
             {
              mp_limb_t q0, r0;
              mp_limb_t gcc272bug_n1, gcc272bug_n0, gcc272bug_d0;
              /* Due to a gcc 2.7.2.3 reload pass bug, we have to use some
                 temps here.  This doesn't hurt code quality on any machines
                 so we do it unconditionally.  */
              gcc272bug_n1 = n2p[1];
              gcc272bug_n0 = n2p[0];
              gcc272bug_d0 = d2p[0];
              udiv_qrnnd (q0, r0, gcc272bug_n1, gcc272bug_n0, gcc272bug_d0);
              n2p[0] = r0;
              qp[0] = q0;
             }
           else if (qn == 2)
             mpn_divrem_2 (qp, 0L, n2p, 4L, d2p);
           else if (qn < BZ_THRESHOLD)
             mpn_sb_divrem_mn (qp, n2p, qn * 2, d2p, qn);
           else
             mpn_bz_divrem_n (qp, n2p, d2p, qn);

           rn = qn;
           /* Multiply the first ignored divisor limb by the most significant
              quotient limb.  If that product is > the partial remainder's
              most significant limb, we know the quotient is too large.  This
              test quickly catches most cases where the quotient is too large;
              it catches all cases where the quotient is 2 too large.  */
           {
             mp_limb_t dl, x;
             mp_limb_t h, l;

             if (in - 2 < 0)
              dl = 0;
             else
              dl = dp[in - 2];

             x = SHL (dp[in - 1], dl, cnt);
             umul_ppmm (h, l, x, qp[qn - 1]);

             if (n2p[qn - 1] < h)
              {
                mp_limb_t cy;

                mpn_decr_u (qp, (mp_limb_t) 1);
                cy = mpn_add_n (n2p, n2p, d2p, qn);
                if (cy)
                  {
                    /* The partial remainder is safely large.  */
                    n2p[qn] = cy;
                    ++rn;
                  }
              }
           }

           quotient_too_large = 0;
           if (cnt != 0)
             {
              mp_limb_t cy1, cy2;

              /* Append partially used numerator limb to partial remainder.  */
              cy1 = mpn_lshift (n2p, n2p, rn, BITS_PER_MP_LIMB - cnt);
              n2p[0] |= np[in - 1] & (~(mp_limb_t) 0 >> cnt);

              /* Update partial remainder with partially used divisor limb.  */
              cy2 = mpn_submul_1 (n2p, qp, qn, dp[in - 1] & (~(mp_limb_t) 0 >> cnt));
              if (qn != rn)
                {
#ifdef DEBUG
                  if (n2p[qn] < cy2)
                    abort ();
#endif
                  n2p[qn] -= cy2;
                }
              else
                {
                  n2p[qn] = cy1 - cy2;

                  quotient_too_large = (cy1 < cy2);
                  ++rn;
                }
              --in;
             }
           /* True: partial remainder now is neutral, i.e., it is not shifted up.  */

           tp = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);

           if (in < qn)
             {
              if (in == 0)
                {
                  MPN_COPY (rp, n2p, rn);
#ifdef DEBUG
                  if (rn != dn)
                    abort ();
#endif
                  goto foo;
                }
              mpn_mul (tp, qp, qn, dp, in);
             }
           else
             mpn_mul (tp, dp, in, qp, qn);

           cy = mpn_sub (n2p, n2p, rn, tp + in, qn);
           MPN_COPY (rp + in, n2p, dn - in);
           quotient_too_large |= cy;
           cy = mpn_sub_n (rp, np, tp, in);
           cy = mpn_sub_1 (rp + in, rp + in, rn, cy);
           quotient_too_large |= cy;
         foo:
           if (quotient_too_large)
             {
              mpn_decr_u (qp, (mp_limb_t) 1);
              mpn_add_n (rp, rp, dp, dn);
             }
         }
       TMP_FREE (marker);
       return;
      }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_toom3_mul_n ( mp_ptr  p,
mp_srcptr  a,
mp_srcptr  b,
mp_size_t  n,
mp_ptr  ws 
)

First stage: evaluation at points 0, 1/2, 1, 2, oo.

Second stage: pointwise multiplies.

Third stage: interpolation.

Final stage: add up the coefficients.

Definition at line 1282 of file gmp.c.

{
  mp_limb_t cB,cC,cD, dB,dC,dD, tB,tC,tD;
  mp_limb_t *A,*B,*C,*D,*E, *W;
  mp_size_t l,l2,l3,l4,l5,ls;

  SCHEME_BIGNUM_USE_FUEL(n);

  /* Break n words into chunks of size l, l and ls.
   * n = 3*k   => l = k,   ls = k
   * n = 3*k+1 => l = k+1, ls = k-1
   * n = 3*k+2 => l = k+1, ls = k
   */
  {
    mp_limb_t m;

    ASSERT (n >= TOOM3_MUL_THRESHOLD);
    l = ls = n / 3;
    m = n - l * 3;
    if (m != 0)
      ++l;
    if (m == 1)
      --ls;

    l2 = l * 2;
    l3 = l * 3;
    l4 = l * 4;
    l5 = l * 5;
    A = p;
    B = ws;
    C = p + l2;
    D = ws + l2;
    E = p + l4;
    W = ws + l4;
  }

  evaluate3 (A, B, C, &cB, &cC, &cD, a, a + l, a + l2, l, ls);
  evaluate3 (A + l, B + l, C + l, &dB, &dC, &dD, b, b + l, b + l2, l, ls);

  TOOM3_MUL_REC(D, C, C + l, l, W);
  tD = cD*dD;
  if (cD) tD += mpn_addmul_1 (D + l, C + l, l, cD);
  if (dD) tD += mpn_addmul_1 (D + l, C, l, dD);
  ASSERT (tD < 49);
  TOOM3_MUL_REC(C, B, B + l, l, W);
  tC = cC*dC;
  /* TO DO: choose one of the following alternatives. */
#if 0
  if (cC) tC += mpn_addmul_1 (C + l, B + l, l, cC);
  if (dC) tC += mpn_addmul_1 (C + l, B, l, dC);
#else
  if (cC)
    {
      if (cC == 1) tC += mpn_add_n (C + l, C + l, B + l, l);
      else tC += add2Times (C + l, C + l, B + l, l);
    }
  if (dC)
    {
      if (dC == 1) tC += mpn_add_n (C + l, C + l, B, l);
      else tC += add2Times (C + l, C + l, B, l);
    }
#endif
  ASSERT (tC < 9);
  TOOM3_MUL_REC(B, A, A + l, l, W);
  tB = cB*dB;
  if (cB) tB += mpn_addmul_1 (B + l, A + l, l, cB);
  if (dB) tB += mpn_addmul_1 (B + l, A, l, dB);
  ASSERT (tB < 49);
  TOOM3_MUL_REC(A, a, b, l, W);
  TOOM3_MUL_REC(E, a + l2, b + l2, ls, W);

  interpolate3 (A, B, C, D, E, &tB, &tC, &tD, l2, ls << 1);

  {
    /* mp_limb_t i, x, y; */
    tB += mpn_add_n (p + l, p + l, B, l2);
    tD += mpn_add_n (p + l3, p + l3, D, l2);
    mpn_incr_u (p + l3, tB);
    mpn_incr_u (p + l4, tC);
    mpn_incr_u (p + l5, tD);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mpn_toom3_sqr_n ( mp_ptr  p,
mp_srcptr  a,
mp_size_t  n,
mp_ptr  ws 
)

First stage: evaluation at points 0, 1/2, 1, 2, oo.

Second stage: pointwise multiplies.

Third stage: interpolation.

Final stage: add up the coefficients.

Definition at line 1393 of file gmp.c.

{
  mp_limb_t cB,cC,cD, tB,tC,tD;
  mp_limb_t *A,*B,*C,*D,*E, *W;
  mp_size_t l,l2,l3,l4,l5,ls;

  SCHEME_BIGNUM_USE_FUEL(n);

  /* Break n words into chunks of size l, l and ls.
   * n = 3*k   => l = k,   ls = k
   * n = 3*k+1 => l = k+1, ls = k-1
   * n = 3*k+2 => l = k+1, ls = k
   */
  {
    mp_limb_t m;

    ASSERT (n >= TOOM3_MUL_THRESHOLD);
    l = ls = n / 3;
    m = n - l * 3;
    if (m != 0)
      ++l;
    if (m == 1)
      --ls;

    l2 = l * 2;
    l3 = l * 3;
    l4 = l * 4;
    l5 = l * 5;
    A = p;
    B = ws;
    C = p + l2;
    D = ws + l2;
    E = p + l4;
    W = ws + l4;
  }

  evaluate3 (A, B, C, &cB, &cC, &cD, a, a + l, a + l2, l, ls);

  TOOM3_SQR_REC(D, C, l, W);
  tD = cD*cD;
  if (cD) tD += mpn_addmul_1 (D + l, C, l, 2*cD);
  ASSERT (tD < 49);
  TOOM3_SQR_REC(C, B, l, W);
  tC = cC*cC;
  /* TO DO: choose one of the following alternatives. */
#if 0
  if (cC) tC += mpn_addmul_1 (C + l, B, l, 2*cC);
#else
  if (cC >= 1)
    {
      tC += add2Times (C + l, C + l, B, l);
      if (cC == 2)
        tC += add2Times (C + l, C + l, B, l);
    }
#endif
  ASSERT (tC < 9);
  TOOM3_SQR_REC(B, A, l, W);
  tB = cB*cB;
  if (cB) tB += mpn_addmul_1 (B + l, A, l, 2*cB);
  ASSERT (tB < 49);
  TOOM3_SQR_REC(A, a, l, W);
  TOOM3_SQR_REC(E, a + l2, ls, W);

  interpolate3 (A, B, C, D, E, &tB, &tC, &tD, l2, ls << 1);

  {
    /* mp_limb_t i, x, y; */
    tB += mpn_add_n (p + l, p + l, B, l2);
    tD += mpn_add_n (p + l3, p + l3, D, l2);
    mpn_incr_u (p + l3, tB);
    mpn_incr_u (p + l4, tC);
    mpn_incr_u (p + l5, tD);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 1780 of file bignum.c.

{
#ifdef MZ_PRECISE_GC
# ifndef GC_STACK_CALLEE_RESTORE
  char *stupid; /* forces __gc_var_stack__ */
# endif
#endif

  SCHEME_USE_FUEL(n);

#ifdef MZ_PRECISE_GC
# ifndef GC_STACK_CALLEE_RESTORE
  /* Restore variable stack. */
  if (!stupid)
    GC_variable_stack = (void **)__gc_var_stack__[0];
# endif
#endif
}
void scheme_free_gmp ( void ,
void **  mem_pool 
)

Definition at line 7602 of file thread.c.

{
  if (p != SCHEME_CAR(*mem_pool))
    scheme_log(NULL,
               SCHEME_LOG_FATAL,
               0,
               "bad GMP memory free");
  *mem_pool = SCHEME_CDR(*mem_pool);
}
void scheme_gmp_tls_init ( long *  s)

Definition at line 5788 of file gmp.c.

{
  s[0] = 0;
  s[1] = 0;
  s[2] = (long)&xxx;
  ((tmp_marker *)(s + 3))->which_chunk = &xxx;
  ((tmp_marker *)(s + 3))->alloc_point = &xxx;
}

Here is the caller graph for this function:

void* scheme_gmp_tls_load ( long *  s)

Definition at line 5797 of file gmp.c.

{
  s[0] = (long)current_total_allocation;
  s[1] = (long)max_total_allocation;
  s[2] = (long)current;
  return mem_pool;
}

Here is the caller graph for this function:

void scheme_gmp_tls_restore_snapshot ( long *  s,
void data,
long *  save,
int  do_free 
)

Definition at line 5821 of file gmp.c.

{
  long other[6];
  void *other_data;

  if (do_free == 2) {
    other_data = scheme_gmp_tls_load(other);
    scheme_gmp_tls_unload(s, data);
  } else
    other_data = NULL;

  if (do_free)
    __gmp_tmp_free((tmp_marker *)(s + 3));  

  if (save) {
    s[3] = save[0];
    s[4] = save[1];
    
  }

  if (do_free == 2) {
    data = scheme_gmp_tls_load(s);
    scheme_gmp_tls_unload(other, other_data);
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void scheme_gmp_tls_snapshot ( long *  s,
long *  save 
)

Definition at line 5814 of file gmp.c.

{
  save[0] = s[3];
  save[1] = s[4];
  __gmp_tmp_mark((tmp_marker *)(s + 3));
}

Here is the call graph for this function:

Here is the caller graph for this function:

void scheme_gmp_tls_unload ( long *  s,
void data 
)

Definition at line 5805 of file gmp.c.

{
  current_total_allocation = (unsigned long)s[0];
  max_total_allocation = (unsigned long)s[1];
  current = (tmp_stack *)s[2];
  s[0] = 0;
  mem_pool = data;
}

Here is the caller graph for this function:

void* scheme_malloc_gmp ( unsigned  long,
void **  mem_pool 
)

Definition at line 7585 of file thread.c.

{
  void *p, *mp;

#ifdef MZ_PRECISE_GC      
  if (amt < GC_malloc_stays_put_threshold())
    amt = GC_malloc_stays_put_threshold();
#endif

  p = scheme_malloc_atomic(amt);

  mp = scheme_make_raw_pair(p, *mem_pool);
  *mem_pool = mp;

  return p;
}

Here is the call graph for this function:


Variable Documentation

const unsigned char __clz_tab[]
Initial value:
{
  0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
}

Definition at line 5672 of file gmp.c.

const unsigned char approx_tab[192] [static]
Initial value:
  {
    128,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,
    143,144,144,145,146,147,148,149,150,150,151,152,153,154,155,155,
    156,157,158,159,160,160,161,162,163,163,164,165,166,167,167,168,
    169,170,170,171,172,173,173,174,175,176,176,177,178,178,179,180,
    181,181,182,183,183,184,185,185,186,187,187,188,189,189,190,191,
    192,192,193,193,194,195,195,196,197,197,198,199,199,200,201,201,
    202,203,203,204,204,205,206,206,207,208,208,209,209,210,211,211,
    212,212,213,214,214,215,215,216,217,217,218,218,219,219,220,221,
    221,222,222,223,224,224,225,225,226,226,227,227,228,229,229,230,
    230,231,231,232,232,233,234,234,235,235,236,236,237,237,238,238,
    239,240,240,241,241,242,242,243,243,244,244,245,245,246,246,247,
    247,248,248,249,249,250,250,251,251,252,252,253,253,254,254,255
  }

Definition at line 2865 of file gmp.c.

tmp_stack* current = &xxx [static]

Definition at line 5695 of file gmp.c.

unsigned long current_total_allocation = 0 [static]

Definition at line 5692 of file gmp.c.

unsigned long max_total_allocation = 0 [static]

Definition at line 5691 of file gmp.c.

void* mem_pool = 0 [static]

Definition at line 26 of file gmp.c.

const unsigned char modlimb_invert_table[128]
Initial value:
 {
  0x01, 0xAB, 0xCD, 0xB7, 0x39, 0xA3, 0xC5, 0xEF,
  0xF1, 0x1B, 0x3D, 0xA7, 0x29, 0x13, 0x35, 0xDF,
  0xE1, 0x8B, 0xAD, 0x97, 0x19, 0x83, 0xA5, 0xCF,
  0xD1, 0xFB, 0x1D, 0x87, 0x09, 0xF3, 0x15, 0xBF,
  0xC1, 0x6B, 0x8D, 0x77, 0xF9, 0x63, 0x85, 0xAF,
  0xB1, 0xDB, 0xFD, 0x67, 0xE9, 0xD3, 0xF5, 0x9F,
  0xA1, 0x4B, 0x6D, 0x57, 0xD9, 0x43, 0x65, 0x8F,
  0x91, 0xBB, 0xDD, 0x47, 0xC9, 0xB3, 0xD5, 0x7F,
  0x81, 0x2B, 0x4D, 0x37, 0xB9, 0x23, 0x45, 0x6F,
  0x71, 0x9B, 0xBD, 0x27, 0xA9, 0x93, 0xB5, 0x5F,
  0x61, 0x0B, 0x2D, 0x17, 0x99, 0x03, 0x25, 0x4F,
  0x51, 0x7B, 0x9D, 0x07, 0x89, 0x73, 0x95, 0x3F,
  0x41, 0xEB, 0x0D, 0xF7, 0x79, 0xE3, 0x05, 0x2F,
  0x31, 0x5B, 0x7D, 0xE7, 0x69, 0x53, 0x75, 0x1F,
  0x21, 0xCB, 0xED, 0xD7, 0x59, 0xC3, 0xE5, 0x0F,
  0x11, 0x3B, 0x5D, 0xC7, 0x49, 0x33, 0x55, 0xFF
}

Definition at line 5093 of file gmp.c.

tmp_stack xxx = {&xxx, &xxx, 0} [static]

Definition at line 5694 of file gmp.c.