diff --git a/CMake/cmake_config.h.in b/CMake/cmake_config.h.in index 216f07ce47..696927d56a 100644 --- a/CMake/cmake_config.h.in +++ b/CMake/cmake_config.h.in @@ -20,6 +20,9 @@ #cmakedefine01 FLINT_KNOW_STRONG_ORDER +#cmakedefine FLINT_BITS @FLINT_BITS@ +#cmakedefine01 FLINT_LONG_LONG + /* NOTE: Here we assume this is how it works. */ #if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) # define HAVE__ALIGNED_MALLOC 1 diff --git a/CMakeLists.txt b/CMakeLists.txt index 6692a31082..bd94f6f78b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -321,14 +321,18 @@ check_c_source_compiles([[#include #ifndef _LONG_LONG_LIMB # error mp_limb_t != unsigned long long limb #endif - void main(){};]] LONG_LONG_LIMB) + void main(){};]] FLINT_LONG_LONG) -if(LONG_LONG_LIMB) - set(ULONG "unsigned long long int") - set(SLONG "long long int") +check_c_source_compiles([[#include + #if GMP_LIMB_BITS == 32 + # error + #endif + void main(){};]] FLINT64) + +if(FLINT64) + set(FLINT_BITS 64) else() - set(ULONG "unsigned long int") - set(SLONG "long int") + set(FLINT_BITS 32) endif() # Populate headers @@ -345,7 +349,7 @@ configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/src/fmpz/fmpz.c COPYONLY ) -if(LONG_LONG_LIMB) +if(FLINT_LONG_LONG) configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/src/gmpcompat-longlong.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/gmpcompat.h diff --git a/code_conventions.md b/code_conventions.md index 5550d617b4..4623486ebd 100644 --- a/code_conventions.md +++ b/code_conventions.md @@ -27,11 +27,11 @@ the following primitive datatype should be used: | | | |:--------------------------------------:|:------------------------------:| | bit counts up to a single limb | `ulong` | -| bit counts, multiprecision | `mp_bitcnt_t` | +| bit counts, multiprecision | `flint_bitcnt_t` | | byte counts (strings) | `size_t` | -| limb counts in multiprecision integers | `mp_size_t` | -| limbs (unsigned/signed) | `mp_limb_t`/`mp_limb_signed_t` | -| `mp_limb_t` arrays | `mp_ptr`/`mp_srcptr` | +| limb counts in multiprecision integers | `slong` | +| limbs (unsigned/signed) | `ulong`/`slong` | +| `ulong` arrays | `nn_ptr`/`nn_srcptr` | | ui/si function constants | `ulong`/`slong` | | exponents (unsigned/signed) | `ulong`/`slong` | | polynomial lengths | `slong` | @@ -43,11 +43,11 @@ The typical definitions of these in terms of primitive types are: | | | |:-------------:|:---------------------------------------:| -| `mp_bitcnt_t` | `unsigned long` or `unsigned long long` | -| `mp_size_t` | `long` or `long long` | -| `mp_limb_t` | `unsigned long` or `unsigned long long` | -| `mp_ptr` | `mp_limb_t *` | -| `mp_srcptr` | `const mp_limb_t *` | +| `flint_bitcnt_t` | `unsigned long` or `unsigned long long` | +| `slong` | `long` or `long long` | +| `ulong` | `unsigned long` or `unsigned long long` | +| `nn_ptr` | `ulong *` | +| `nn_srcptr` | `const ulong *` | | `slong` | `long` or `long long` | | `ulong` | `unsigned long` or `unsigned long long` | diff --git a/configure.ac b/configure.ac index 06d83213f0..76b90392ae 100644 --- a/configure.ac +++ b/configure.ac @@ -936,6 +936,11 @@ FLINT_GMP_LONG_LONG_LIMB([SLONG="long long int" AC_SUBST(SLONG) AC_SUBST(ULONG) +if test "$flint_cv_gmp_long_long_limb" = "yes"; +then + AC_DEFINE(FLINT_LONG_LONG,1,[Define to use long long limbs]) +fi + AC_CONFIG_LINKS([src/gmpcompat.h:src/$gmpcompat_h_in],[],[gmpcompat_h_in="$gmpcompat_h_in"]) AC_SUBST(GMPCOMPAT_H_IN, $gmpcompat_h_in) @@ -951,6 +956,8 @@ FLINT_CHECK_MPFR_H(4,1,0) FLINT_ABI +AC_DEFINE_UNQUOTED(FLINT_BITS,$flint_cv_abi,[Define according to the ABI FLINT was compiled with]) + ################################################################################ # check headers ################################################################################ diff --git a/dev/gen_mul_basecase.py b/dev/gen_mul_basecase.py index 5833138dcb..b3d08e32eb 100644 --- a/dev/gen_mul_basecase.py +++ b/dev/gen_mul_basecase.py @@ -12,9 +12,9 @@ """ Generic version of mul_n: -void flint_mpn_mul_n_basecase(mp_ptr res, mp_srcptr u, mp_srcptr v, mp_size_t n) +void flint_mpn_mul_n_basecase(nn_ptr res, nn_srcptr u, nn_srcptr v, slong n) { - mp_limb_t b, a; + ulong b, a; slong i; NN_MUL_1X1(a, res[0], u[0], v[0]); @@ -40,9 +40,9 @@ """ def mul1v(n): - print("mp_limb_t flint_mpn_mul_%i_1v(mp_ptr res, mp_srcptr u, mp_limb_t v0)" % n) + print("ulong flint_mpn_mul_%i_1v(nn_ptr res, nn_srcptr u, ulong v0)" % n) print("{") - print(" mp_limb_t a;") + print(" ulong a;") print(" NN_MUL_1X1(a, res[0], u[0], v0);") for i in range(1, n-1): print(" NN_ADDMUL_S2_A2_1X1(a, res[%i], 0, a, u[%i], v0);" % (i, i)) @@ -52,9 +52,9 @@ def mul1v(n): def mulnm(n, m): if m == 1: - print("mp_limb_t flint_mpn_mul_%i_1(mp_ptr res, mp_srcptr u, mp_srcptr v)" % n) + print("ulong flint_mpn_mul_%i_1(nn_ptr res, nn_srcptr u, nn_srcptr v)" % n) print("{") - print(" mp_limb_t a, v0 = v[0];") + print(" ulong a, v0 = v[0];") print(" NN_MUL_1X1(a, res[0], u[0], v0);") for i in range(1, n-1): print(" NN_ADDMUL_S2_A2_1X1(a, res[%i], 0, a, u[%i], v0);" % (i, i)) @@ -62,10 +62,10 @@ def mulnm(n, m): print(" return res[%i];" % n) print("}") elif m == 2: - print("mp_limb_t flint_mpn_mul_%i_%i(mp_ptr res, mp_srcptr u, mp_srcptr v)" % (n, m)) + print("ulong flint_mpn_mul_%i_%i(nn_ptr res, nn_srcptr u, nn_srcptr v)" % (n, m)) print("{") - print(" mp_limb_t b, a;") - print(" mp_limb_t w[2];") + print(" ulong b, a;") + print(" ulong w[2];") print(" w[0] = v[0];") print(" w[1] = v[1];") print(" NN_MUL_1X1(a, res[0], u[0], w[0]);") @@ -80,9 +80,9 @@ def mulnm(n, m): print(" return res[%i];" % (n + m - 1)) print("}") else: - print("mp_limb_t flint_mpn_mul_%i_%i(mp_ptr res, mp_srcptr u, mp_srcptr v)" % (n, m)) + print("ulong flint_mpn_mul_%i_%i(nn_ptr res, nn_srcptr u, nn_srcptr v)" % (n, m)) print("{") - print(" mp_limb_t b, a;") + print(" ulong b, a;") print(" NN_MUL_1X1(a, res[0], u[0], v[0]);") print(" NN_DOTREV_S3_A3_1X1(b, a, res[1], 0, 0, a, u, v, 2);") for i in range(2, m): @@ -98,7 +98,7 @@ def mulnm(n, m): for n in range(2, 8): for m in range(1, n+1): if n >= 8 and m >= 5: - print("void flint_mpn_mul_%i_%i(mp_ptr res, mp_srcptr u, mp_srcptr v)" % (n, m)) + print("void flint_mpn_mul_%i_%i(nn_ptr res, nn_srcptr u, nn_srcptr v)" % (n, m)) print("{") print(" __gmpn_mul_basecase(res, u, %i, v, %i);" % (n, m)) print("}") @@ -114,9 +114,9 @@ def mulnm(n, m): #print() def mulhigh_n(n): - print("mp_limb_t flint_mpn_mulhigh_%i_generic(mp_ptr res, mp_srcptr u, mp_srcptr v)" % n) + print("ulong flint_mpn_mulhigh_%i_generic(nn_ptr res, nn_srcptr u, nn_srcptr v)" % n) print("{") - print(" mp_limb_t b, a, low;") + print(" ulong b, a, low;") print(" NN_DOTREV_S3_1X1_HIGH(b, a, u, v, %i);" % (n - 1)) print(" NN_DOTREV_S3_A3_1X1(b, a, low, 0, b, a, u, v, %i);" % n) for i in range(n - 2): diff --git a/doc/source/arb.rst b/doc/source/arb.rst index 14bf55f7f1..0dd829f8d9 100644 --- a/doc/source/arb.rst +++ b/doc/source/arb.rst @@ -1649,9 +1649,9 @@ Other special functions Internals for computing elementary functions ------------------------------------------------------------------------------- -.. function:: void _arb_atan_taylor_naive(mp_ptr y, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N, int alternating) +.. function:: void _arb_atan_taylor_naive(nn_ptr y, ulong * error, nn_srcptr x, slong xn, ulong N, int alternating) -.. function:: void _arb_atan_taylor_rs(mp_ptr y, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N, int alternating) +.. function:: void _arb_atan_taylor_rs(nn_ptr y, ulong * error, nn_srcptr x, slong xn, ulong N, int alternating) Computes an approximation of `y = \sum_{k=0}^{N-1} x^{2k+1} / (2k+1)` (if *alternating* is 0) or `y = \sum_{k=0}^{N-1} (-1)^k x^{2k+1} / (2k+1)` @@ -1663,9 +1663,9 @@ Internals for computing elementary functions The input *x* and output *y* are fixed-point numbers with *xn* fractional limbs. A bound for the ulp error is written to *error*. -.. function:: void _arb_exp_taylor_naive(mp_ptr y, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N) +.. function:: void _arb_exp_taylor_naive(nn_ptr y, ulong * error, nn_srcptr x, slong xn, ulong N) -.. function:: void _arb_exp_taylor_rs(mp_ptr y, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N) +.. function:: void _arb_exp_taylor_rs(nn_ptr y, ulong * error, nn_srcptr x, slong xn, ulong N) Computes an approximation of `y = \sum_{k=0}^{N-1} x^k / k!`. Used internally for computing exponentials. The *naive* version uses the forward recurrence, @@ -1678,9 +1678,9 @@ Internals for computing elementary functions A bound for the ulp error is written to *error*. -.. function:: void _arb_sin_cos_taylor_naive(mp_ptr ysin, mp_ptr ycos, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N) +.. function:: void _arb_sin_cos_taylor_naive(nn_ptr ysin, nn_ptr ycos, ulong * error, nn_srcptr x, slong xn, ulong N) -.. function:: void _arb_sin_cos_taylor_rs(mp_ptr ysin, mp_ptr ycos, mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N, int sinonly, int alternating) +.. function:: void _arb_sin_cos_taylor_rs(nn_ptr ysin, nn_ptr ycos, ulong * error, nn_srcptr x, slong xn, ulong N, int sinonly, int alternating) Computes approximations of `y_s = \sum_{k=0}^{N-1} (-1)^k x^{2k+1} / (2k+1)!` and `y_c = \sum_{k=0}^{N-1} (-1)^k x^{2k} / (2k)!`. @@ -1698,13 +1698,13 @@ Internals for computing elementary functions the hyperbolic sine is computed (this is currently only intended to be used together with *sinonly*). -.. function:: int _arb_get_mpn_fixed_mod_log2(mp_ptr w, fmpz_t q, mp_limb_t * error, const arf_t x, mp_size_t wn) +.. function:: int _arb_get_mpn_fixed_mod_log2(nn_ptr w, fmpz_t q, ulong * error, const arf_t x, slong wn) Attempts to write `w = x - q \log(2)` with `0 \le w < \log(2)`, where *w* is a fixed-point number with *wn* limbs and ulp error *error*. Returns success. -.. function:: int _arb_get_mpn_fixed_mod_pi4(mp_ptr w, fmpz_t q, int * octant, mp_limb_t * error, const arf_t x, mp_size_t wn) +.. function:: int _arb_get_mpn_fixed_mod_pi4(nn_ptr w, fmpz_t q, int * octant, ulong * error, const arf_t x, slong wn) Attempts to write `w = |x| - q \pi/4` with `0 \le w < \pi/4`, where *w* is a fixed-point number with *wn* limbs and ulp error *error*. diff --git a/doc/source/arf.rst b/doc/source/arf.rst index 969a4819c5..e35b3d23d2 100644 --- a/doc/source/arf.rst +++ b/doc/source/arf.rst @@ -731,7 +731,7 @@ Complex arithmetic Low-level methods ------------------------------------------------------------------------------- -.. function:: int _arf_get_integer_mpn(mp_ptr y, mp_srcptr xp, mp_size_t xn, slong exp) +.. function:: int _arf_get_integer_mpn(nn_ptr y, nn_srcptr xp, slong xn, slong exp) Given a floating-point number *x* represented by *xn* limbs at *xp* and an exponent *exp*, writes the integer part of *x* to @@ -741,7 +741,7 @@ Low-level methods Assumes that ``xp[0]`` is nonzero and that the top bit of ``xp[xn-1]`` is set. -.. function:: int _arf_set_mpn_fixed(arf_t z, mp_srcptr xp, mp_size_t xn, mp_size_t fixn, int negative, slong prec, arf_rnd_t rnd) +.. function:: int _arf_set_mpn_fixed(arf_t z, nn_srcptr xp, slong xn, slong fixn, int negative, slong prec, arf_rnd_t rnd) Sets *z* to the fixed-point number having *xn* total limbs and *fixn* fractional limbs, negated if *negative* is set, rounding *z* to *prec* @@ -755,7 +755,7 @@ Low-level methods Sets *z* to the integer *x*, negated if *sgnbit* is 1, rounded to *prec* bits in the direction specified by *rnd*. There are no assumptions on *x*. -.. function:: int _arf_set_round_uiui(arf_t z, slong * fix, mp_limb_t hi, mp_limb_t lo, int sgnbit, slong prec, arf_rnd_t rnd) +.. function:: int _arf_set_round_uiui(arf_t z, slong * fix, ulong hi, ulong lo, int sgnbit, slong prec, arf_rnd_t rnd) Sets the mantissa of *z* to the two-limb mantissa given by *hi* and *lo*, negated if *sgnbit* is 1, rounded to *prec* bits in the direction specified @@ -763,7 +763,7 @@ Low-level methods Writes the exponent shift to *fix* without writing the exponent of *z* directly. -.. function:: int _arf_set_round_mpn(arf_t z, slong * exp_shift, mp_srcptr x, mp_size_t xn, int sgnbit, slong prec, arf_rnd_t rnd) +.. function:: int _arf_set_round_mpn(arf_t z, slong * exp_shift, nn_srcptr x, slong xn, int sgnbit, slong prec, arf_rnd_t rnd) Sets the mantissa of *z* to the mantissa given by the *xn* limbs in *x*, negated if *sgnbit* is 1, rounded to *prec* bits in the direction diff --git a/doc/source/arith.rst b/doc/source/arith.rst index d1980e6264..536b079107 100644 --- a/doc/source/arith.rst +++ b/doc/source/arith.rst @@ -150,7 +150,7 @@ Bell numbers running in time `O(n^2 \log^{O(1)} n)`. The default version chooses an algorithm automatically. -.. function:: mp_limb_t arith_bell_number_nmod(ulong n, nmod_t mod) +.. function:: ulong arith_bell_number_nmod(ulong n, nmod_t mod) Computes the Bell number `B_n` modulo an integer given by ``mod``. @@ -170,10 +170,10 @@ Bell numbers calling ``arith_bell_number_nmod_vec`` and reading the last coefficient. -.. function:: void arith_bell_number_nmod_vec(mp_ptr b, slong n, nmod_t mod) - void arith_bell_number_nmod_vec_recursive(mp_ptr b, slong n, nmod_t mod) - void arith_bell_number_nmod_vec_ogf(mp_ptr b, slong n, nmod_t mod) - int arith_bell_number_nmod_vec_series(mp_ptr b, slong n, nmod_t mod) +.. function:: void arith_bell_number_nmod_vec(nn_ptr b, slong n, nmod_t mod) + void arith_bell_number_nmod_vec_recursive(nn_ptr b, slong n, nmod_t mod) + void arith_bell_number_nmod_vec_ogf(nn_ptr b, slong n, nmod_t mod) + int arith_bell_number_nmod_vec_series(nn_ptr b, slong n, nmod_t mod) Sets `b` to the vector of Bell numbers `B_0, B_1, \ldots, B_{n-1}` inclusive modulo an integer given by ``mod``. @@ -433,7 +433,7 @@ Number of partitions Computes first ``len`` values of the partition function `p(n)` starting with `p(0)`. Uses inversion of Euler's pentagonal series. -.. function:: void arith_number_of_partitions_nmod_vec(mp_ptr res, slong len, nmod_t mod) +.. function:: void arith_number_of_partitions_nmod_vec(nn_ptr res, slong len, nmod_t mod) Computes first ``len`` values of the partition function `p(n)` starting with `p(0)`, modulo the modulus defined by ``mod``. @@ -443,7 +443,7 @@ Number of partitions Initializes ``prod``. This is an inline function only. -.. function:: void arith_hrr_expsum_factored(trig_prod_t prod, mp_limb_t k, mp_limb_t n) +.. function:: void arith_hrr_expsum_factored(trig_prod_t prod, ulong k, ulong n) Symbolically evaluates the exponential sum diff --git a/doc/source/fft_small.rst b/doc/source/fft_small.rst index fb48bd9ac9..2bfda79400 100644 --- a/doc/source/fft_small.rst +++ b/doc/source/fft_small.rst @@ -31,7 +31,7 @@ Integer multiplication frees the cache. .. function:: void mpn_ctx_mpn_mul(mpn_ctx_t R, ulong * r1, const ulong * i1, ulong n1, const ulong * i2, ulong n2) - void mpn_mul_default_mpn_ctx(mp_ptr r1, mp_srcptr i1, mp_size_t n1, mp_srcptr i2, mp_size_t n2) + void mpn_mul_default_mpn_ctx(nn_ptr r1, nn_srcptr i1, slong n1, nn_srcptr i2, slong n2) Writes to ``r1`` the product of the integers ``(i1, n1)`` and ``(i2, n2)``. Assumes that `n_1 \ge n_2 \ge 1`, respectively using a given context @@ -41,7 +41,7 @@ Polynomial arithmetic --------------------------------------------------------------------------------- .. function:: void _nmod_poly_mul_mid_mpn_ctx(ulong * z, ulong zl, ulong zh, const ulong * a, ulong an, const ulong * b, ulong bn, nmod_t mod, mpn_ctx_t R) - void _nmod_poly_mul_mid_default_mpn_ctx(mp_ptr res, slong zl, slong zh, mp_srcptr a, slong an, mp_srcptr b, slong bn, nmod_t mod) + void _nmod_poly_mul_mid_default_mpn_ctx(nn_ptr res, slong zl, slong zh, nn_srcptr a, slong an, nn_srcptr b, slong bn, nmod_t mod) Writes to ``z`` the middle product containing coefficients in the range `[zl, zh)` of the product of the polynomials ``(a, an)`` and ``(b, bn)``, diff --git a/doc/source/flint.rst b/doc/source/flint.rst index 99aa6a60b3..6237c9f8b1 100644 --- a/doc/source/flint.rst +++ b/doc/source/flint.rst @@ -8,121 +8,129 @@ Macros The file ``flint.h`` contains various useful macros. -The macro constant ``FLINT_BITS`` is set at compile time to be the -number of bits per limb on the machine. FLINT requires it to be either -32 or 64 bits. Other architectures are not currently supported. - -The macro constant ``FLINT_D_BITS`` is set at compile time to be the -number of bits per double on the machine or one less than the number of -bits per limb, whichever is smaller. This will have the value `53` or `31` -on currently supported architectures. Numerous internal functions using -precomputed inverses only support operands up to ``FLINT_D_BITS`` bits, -hence the macro. - -The macro ``FLINT_ABS(x)`` returns the absolute value of `x` -for primitive signed numerical types. It might fail for least negative -values such as ``INT_MIN`` and ``WORD_MIN``. - -The macro ``FLINT_MIN(x, y)`` returns the minimum of `x` and -`y` for primitive signed or unsigned numerical types. This macro -is only safe to use when `x` and `y` are of the same type, -to avoid problems with integer promotion. - -Similar to the previous macro, ``FLINT_MAX(x, y)`` returns the -maximum of `x` and `y`. - -The macro ``FLINT_SWAP(T, x, y)`` swaps ``x`` and ``y``, where ``x`` and ``y`` -are of type ``T``. For instance, with ``x`` and ``y`` of type ``fmpz_poly_t`` , -one can write ``FLINT_SWAP(fmpz_poly_struct, *x, *y)`` to swap the content of -``x`` with the content of ``y``. +.. macro:: __FLINT_VERSION + __FLINT_VERSION_MINOR + __FLINT_VERSION_PATCHLEVEL -.. macro:: FLINT_SGN(x) + The major, minor and patch for current version of FLINT. - Returns the sign of `x` where `x` is interpreted as a :type:`slong`, that - is, returns `-1` if `x < 0`, `0` if `x = 0` and `1` if `x > 0`. +.. macro:: __FLINT_RELEASE -.. function:: mp_limb_t FLINT_BIT_COUNT(mp_limb_t x) + Equivalent to ``10000 * __FLINT_VERSION + 100 * __FLINT_VERSION_MINOR + + __FLINT_VERSION_PATCHLEVEL``. - Returns the number of binary bits required to represent an ``ulong x``. If - `x` is zero, returns `0`. +.. macro:: FLINT_VERSION -Derived from this there are the two macros ``FLINT_FLOG2(x)`` and -``FLINT_CLOG2(x)`` which, for any `x \geq 1`, compute `\lfloor \log_2 x \rfloor` -and `\lceil \log_2 x \rceil`. + A static text string giving the version number, e.g. ``3.1.0`` or ``3.2.0-dev``. -To determine the current FLINT version a number of macros are available. -For example, if the current FLINT version is ``2.4.0`` then -``__FLINT_VERSION`` will have the value `2`, ``__FLINT_MINOR`` -will have the value `4` and ``__FLINT_PATCHLEVEL`` will have the value -`0`. +.. macro:: FLINT_BITS -The ``__FLINT_RELEASE`` macro gives a single number representing the FLINT -version. For example, it will have the value ``20400`` for version ``2.4.0``. + The constant defining how many bits per limb on the machine. We require this + to be either 32 or 64. This constant is set during the configuration. -The ``FLINT_VERSION`` macro is a static text string giving the version -number, e.g. "2.4" or "2.4.1". Note that if the final digit is a zero -it is suppressed. +.. macro:: FLINT_D_BITS -Integer types ------------------------------------------------ + A constant set at compile time to be the number of bits per double on the + machine or one less than the number of bits per limb, whichever is smaller. + This will have the value *31* on 32-bit systems and *53* on 64-bit systems. + Numerous internal functions using precomputed inverses only support operands + up to ``FLINT_D_BITS`` bits, hence the macro. -The *char*, *short* and *int* types are assumed to be two's complement -types with exactly 8, 16 and 32 bits. This is not technically guaranteed -by the C standard, but it is true on mainstream platforms. +.. macro:: FLINT_ABS(x) -Since the C types *long* and *unsigned long* do not have a standardized size -in practice, FLINT defines *slong* and *ulong* types which are guaranteed -to be 32 bits on a 32-bit system and 64 bits on a 64-bit system. -They are also guaranteed to have the same size as GMP's :type:`mp_limb_t`. -GMP builds with a different limb size configuration are not supported at all. -For convenience, the macro *FLINT_BITS* specifies the word length (32 or 64) -of the system. + Returns the absolute value of *x* for primitive signed numerical types. It + might fail for least negative values such as *INT_MIN* and *LONG_MIN*. -.. type:: slong +.. macro:: FLINT_MIN(x, y) + FLINT_MAX(x, y) + + Returns the minimum or maximum of *x* and *y* for primitive types. This + macro is only safe to use when *x* and *y* are of the same type, to avoid + problems with integer promotion. + +.. macro:: FLINT_SWAP(T, x, y) + + Swaps *x* and *y*, both of types *T*. For instance, with *x* and *y* of type + ``fmpz_poly_t``, one can write ``FLINT_SWAP(fmpz_poly_struct, *x, *y)`` to + swap the content of *x* with the content of *y*. - The *slong* type is used for precisions, bit counts, loop indices, - array sizes, and the like, even when those values are known to be - nonnegative. It is also used for small integer-valued coefficients. - In method names, an *slong* parameter is denoted by *si*, for example - :func:`arb_add_si`. +.. macro:: FLINT_SGN(x) + + Returns the sign of `x` where `x` is interpreted as a :type:`slong`, that + is, returns `-1` if `x < 0`, `0` if `x = 0` and `1` if `x > 0`. + +.. function:: flint_bitcnt_t FLINT_BIT_COUNT(ulong x) - The constants *WORD_MIN* and *WORD_MAX* give the range of this type. - This type can be printed with *flint_printf* using the format string ``%wd``. + Returns the number of binary bits required to represent *x*. If *x* is zero + it returns *0*. This is an inline-function only. + +.. macro:: FLINT_FLOG2(x) + FLINT_CLOG2(x) + + For `x \ge 1`, it returns `\lfloor \log_2 x \rfloor` + and `\lceil \log_2 x \rceil`, respectively. + +Integer types +----------------------------------------------- + +The *char*, *short* and *int* types are assumed to be two's complement types +with exactly 8, 16 and 32 bits. Although this is not guaranteed prior to C23, it +is true on all mainstream platforms prior to this. + +Since the C types *long* and *unsigned long* do not have a standardised size in +practice, FLINT defines *slong* and *ulong* types which are guaranteed to be 32 +bits on a 32-bit system and 64 bits on a 64-bit system. They are also guaranteed +to have the same size as GMP's *mp_limb_t*. GMP builds with a different limb +size configuration are not supported at all. .. type:: ulong - The *ulong* type is used for integer-valued coefficients - that are known to be unsigned, and for values that require the - full 32-bit or 64-bit range. + The *ulong* type is used for integer-valued coefficients that are known to + be unsigned, and for values that require the full 32-bit or 64-bit range. In method names, a *ulong* parameter is denoted by *ui*, for example :func:`arb_add_ui`. The constant *UWORD_MAX* gives the range of this type. This type can be printed with *flint_printf* using the format string ``%wu``. -The following GMP-defined types are used in methods that manipulate the -internal representation of numbers (using limb arrays). - -.. type:: mp_limb_t - - A single limb. + This is equivalent to GMP's *mp_limb_t*. -.. type:: mp_ptr +.. type:: slong - Pointer to a writable array of limbs. + The *slong* type is used for precisions, loop indices, array sizes, and the + like, even when those values are known to be nonnegative. It is also used + for small integer-valued coefficients. In method names, an *slong* parameter + is denoted by *si*, for example :func:`arb_add_si`. -.. type:: mp_srcptr + This type can be printed with *flint_printf* using the format string ``%wd`` + or ``%{slong}``. - Pointer to a read-only array of limbs. + This is equivalent to GMP's *mp_limb_signed_t*. Furthermore, for UNIX-type + systems it is also equivalent to *mp_size_t*. -.. type:: mp_size_t +.. macro:: UWORD_MIN + UWORD_MAX + WORD_MIN + WORD_MAX - A limb count (always nonnegative). + The minimum and maximum values that a *ulong* and *slong* can hold, + respectively. .. type:: flint_bitcnt_t A bit offset within an array of limbs (always nonnegative). +.. type:: nn_ptr + + Pointer to a writable array of limbs. + + This is equivalent to GMP's *mp_ptr*. + +.. type:: nn_srcptr + + Pointer to a read-only array of limbs. + + This is equivalent to GMP's *mp_srcptr*. Allocation Functions @@ -130,7 +138,7 @@ Allocation Functions .. function:: void * flint_malloc(size_t size) - Allocate ``size`` bytes of memory. + Allocate *size* bytes of memory. .. function:: void * flint_realloc(void * ptr, size_t size) @@ -139,13 +147,14 @@ Allocation Functions .. function:: void * flint_calloc(size_t num, size_t size) - Allocate ``num`` objects of ``size`` bytes each, and zero the allocated memory. + Allocate *num* objects of *size* bytes each, and zero the allocated memory. .. function:: void flint_free(void * ptr) Free a section of memory allocated by :func:`flint_malloc`, :func:`flint_realloc`, or :func:`flint_calloc`. + Random Numbers ------------------ @@ -162,12 +171,6 @@ Random Numbers Initialises or clears a :type:`flint_rand_t`:. -.. function:: flint_rand_struct * flint_rand_alloc(void) - void flint_rand_free(flint_rand_s * state) - - Allocates or frees a memory block to be used as a heap-allocated - :type:`flint_rand_t`:, such as use in external libraries. The random state - is not initialised, nor is it cleared. Thread functions ----------------------- @@ -237,7 +240,7 @@ Input/Output ``printf``, ``fprintf``, ``vprintf``, and ``vfprintf``. The first extension is the addition of the length modifier ``w``, used for - printing the types :type:`ulong`, :type:`slong` and :type:`mp_limb_t`. As + printing the types :type:`ulong`, :type:`slong` and :type:`ulong`. As these types are either defined as signed and unsigned ``long int`` or ``long long int``, this comes in handy. Just like ``long int`` and ``long long int``, the conversion format specifier are allowed to be ``d``, ``i``, @@ -309,7 +312,7 @@ Input/Output .. code-block:: c slong * vslong; slong vslong_len; - mp_ptr vnmod; slong vnmod_len; /* The base type for nmod is ulong */ + nn_ptr vnmod; slong vnmod_len; /* The base type for nmod is ulong */ fmpz * vfmpz; slong vfmpz_len; /* fmpz_mod vectors are given by the type `fmpz *' */ fmpq * vfmpq; slong vfmpq_len; @@ -414,7 +417,7 @@ Input/Output These are equivalent to the standard library functions ``scanf``, ``fscanf``, and ``sscanf`` with an additional length modifier "w" for - reading an :type:`mp_limb_t` type. + reading an :type:`ulong` type. Exceptions ----------------- diff --git a/doc/source/fmpq_mpoly.rst b/doc/source/fmpq_mpoly.rst index a5b5678638..dd6fa7d4de 100644 --- a/doc/source/fmpq_mpoly.rst +++ b/doc/source/fmpq_mpoly.rst @@ -378,17 +378,17 @@ Random generation -------------------------------------------------------------------------------- -.. function:: void fmpq_mpoly_randtest_bound(fmpq_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, ulong exp_bound, const fmpq_mpoly_ctx_t ctx) +.. function:: void fmpq_mpoly_randtest_bound(fmpq_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong exp_bound, const fmpq_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bound - 1]``. The exponents of each variable are generated by calls to ``n_randint(state, exp_bound)``. -.. function:: void fmpq_mpoly_randtest_bounds(fmpq_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, ulong * exp_bounds, const fmpq_mpoly_ctx_t ctx) +.. function:: void fmpq_mpoly_randtest_bounds(fmpq_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong * exp_bounds, const fmpq_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bounds[i] - 1]``. The exponents of the variable of index *i* are generated by calls to ``n_randint(state, exp_bounds[i])``. -.. function:: void fmpq_mpoly_randtest_bits(fmpq_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, mp_limb_t exp_bits, const fmpq_mpoly_ctx_t ctx) +.. function:: void fmpq_mpoly_randtest_bits(fmpq_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong exp_bits, const fmpq_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents whose packed form does not exceed the given bit count. diff --git a/doc/source/fmpz.rst b/doc/source/fmpz.rst index 239e9c666b..48f9d59bc7 100644 --- a/doc/source/fmpz.rst +++ b/doc/source/fmpz.rst @@ -282,13 +282,13 @@ Conversion Returns `f` as an ``ulong``. The result is undefined if `f` does not fit into an ``ulong`` or is negative. -.. function:: void fmpz_get_uiui(mp_limb_t * hi, mp_limb_t * low, const fmpz_t f) +.. function:: void fmpz_get_uiui(ulong * hi, ulong * low, const fmpz_t f) If `f` consists of two limbs, then ``*hi`` and ``*low`` are set to the high and low limbs, otherwise ``*low`` is set to the low limb and ``*hi`` is set to `0`. -.. function:: mp_limb_t fmpz_get_nmod(const fmpz_t f, nmod_t mod) +.. function:: ulong fmpz_get_nmod(const fmpz_t f, nmod_t mod) Returns `f \mod n`. @@ -325,9 +325,9 @@ Conversion Sets the ``mpz_t`` `x` to the same value as `f`. -.. function:: int fmpz_get_mpn(mp_ptr * n, fmpz_t n_in) +.. function:: int fmpz_get_mpn(nn_ptr * n, fmpz_t n_in) - Sets the ``mp_ptr`` `n` to the same value as `n_{in}`. Returned + Sets the ``nn_ptr`` `n` to the same value as `n_{in}`. Returned integer is number of limbs allocated to `n`, minimum number of limbs required to hold the value stored in `n_{in}`. @@ -362,12 +362,12 @@ Conversion Sets `f` to the given ``ulong`` value, and then negates `f`. -.. function:: void fmpz_set_uiui(fmpz_t f, mp_limb_t hi, mp_limb_t lo) +.. function:: void fmpz_set_uiui(fmpz_t f, ulong hi, ulong lo) Sets `f` to ``lo``, plus ``hi`` shifted to the left by ``FLINT_BITS``. -.. function:: void fmpz_neg_uiui(fmpz_t f, mp_limb_t hi, mp_limb_t lo) +.. function:: void fmpz_neg_uiui(fmpz_t f, ulong hi, ulong lo) Sets `f` to ``lo``, plus ``hi`` shifted to the left by ``FLINT_BITS``, and then negates `f`. @@ -427,7 +427,7 @@ Conversion in base `b`. The base `b` can vary between `2` and `62`, inclusive. Returns `0` if the string contains a valid input and `-1` otherwise. -.. function:: void fmpz_set_ui_smod(fmpz_t f, mp_limb_t x, mp_limb_t m) +.. function:: void fmpz_set_ui_smod(fmpz_t f, ulong x, ulong m) Sets `f` to the signed remainder `y \equiv x \bmod m` satisfying `-m/2 < y \leq m/2`, given `x` which is assumed to satisfy @@ -585,7 +585,7 @@ Basic properties and manipulation Returns the number of bits required to store the absolute value of `f`. If `f` is `0` then `0` is returned. -.. function:: mp_size_t fmpz_size(const fmpz_t f) +.. function:: slong fmpz_size(const fmpz_t f) Returns the number of limbs required to store the absolute value of `f`. If `f` is zero then `0` is returned. @@ -634,14 +634,14 @@ Basic properties and manipulation Test bit index `i` of `f` and return `0` or `1`, accordingly. -.. function:: mp_limb_t fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits) +.. function:: ulong fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits) For nonzero `x`, returns a mantissa `m` with exactly ``bits`` bits and sets ``exp`` to an exponent `e`, such that `|x| \ge m 2^e`. The number of bits must be between 1 and ``FLINT_BITS`` inclusive. The mantissa is guaranteed to be correctly rounded. -.. function:: mp_limb_t fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits) +.. function:: ulong fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits) For nonzero `x`, returns a mantissa `m` with exactly ``bits`` bits and sets ``exp`` to an exponent `e`, such that `|x| \le m 2^e`. @@ -1160,7 +1160,7 @@ Bit packing and unpacking -------------------------------------------------------------------------------- -.. function:: int fmpz_bit_pack(mp_limb_t * arr, flint_bitcnt_t shift, flint_bitcnt_t bits, const fmpz_t coeff, int negate, int borrow) +.. function:: int fmpz_bit_pack(ulong * arr, flint_bitcnt_t shift, flint_bitcnt_t bits, const fmpz_t coeff, int negate, int borrow) Shifts the given coefficient to the left by ``shift`` bits and adds it to the integer in ``arr`` in a field of the given number of bits:: @@ -1180,7 +1180,7 @@ Bit packing and unpacking The value of ``coeff`` may also be optionally (and notionally) negated before it is used, by setting the ``negate`` parameter to `-1`. -.. function:: int fmpz_bit_unpack(fmpz_t coeff, mp_limb_t * arr, flint_bitcnt_t shift, flint_bitcnt_t bits, int negate, int borrow) +.. function:: int fmpz_bit_unpack(fmpz_t coeff, ulong * arr, flint_bitcnt_t shift, flint_bitcnt_t bits, int negate, int borrow) A bit field of the given number of bits is extracted from ``arr``, starting after ``shift`` bits, and placed into ``coeff``. An @@ -1190,7 +1190,7 @@ Bit packing and unpacking The value of ``shift`` is expected to be less than ``FLINT_BITS``. -.. function:: void fmpz_bit_unpack_unsigned(fmpz_t coeff, const mp_limb_t * arr, flint_bitcnt_t shift, flint_bitcnt_t bits) +.. function:: void fmpz_bit_unpack_unsigned(fmpz_t coeff, const ulong * arr, flint_bitcnt_t shift, flint_bitcnt_t bits) A bit field of the given number of bits is extracted from ``arr``, starting after ``shift`` bits, and placed into ``coeff``. @@ -1278,7 +1278,7 @@ The ``fmpz_multi_CRT`` class is similar to ``fmpz_multi_CRT_ui`` except that it If sign = 0, it is assumed that `0 \le r_1 < m_1` and `0 \le r_2 < m_2`. Otherwise, it is assumed that `-m_1 \le r_1 < m_1` and `0 \le r_2 < m_2`. -.. function:: void fmpz_multi_mod_ui(mp_limb_t * out, const fmpz_t in, const fmpz_comb_t comb, fmpz_comb_temp_t temp) +.. function:: void fmpz_multi_mod_ui(ulong * out, const fmpz_t in, const fmpz_comb_t comb, fmpz_comb_temp_t temp) Reduces the multiprecision integer ``in`` modulo each of the primes stored in the ``comb`` structure. The array ``out`` will be filled @@ -1286,7 +1286,7 @@ The ``fmpz_multi_CRT`` class is similar to ``fmpz_multi_CRT_ui`` except that it temporary space which must be provided by :func:`fmpz_comb_temp_init` and cleared by :func:`fmpz_comb_temp_clear`. -.. function:: void fmpz_multi_CRT_ui(fmpz_t output, mp_srcptr residues, const fmpz_comb_t comb, fmpz_comb_temp_t ctemp, int sign) +.. function:: void fmpz_multi_CRT_ui(fmpz_t output, nn_srcptr residues, const fmpz_comb_t comb, fmpz_comb_temp_t ctemp, int sign) This function takes a set of residues modulo the list of primes contained in the ``comb`` structure and reconstructs a multiprecision @@ -1299,7 +1299,7 @@ The ``fmpz_multi_CRT`` class is similar to ``fmpz_multi_CRT_ui`` except that it space which must be provided by :func:`fmpz_comb_temp_init` and cleared by :func:`fmpz_comb_temp_clear`. -.. function:: void fmpz_comb_init(fmpz_comb_t comb, mp_srcptr primes, slong num_primes) +.. function:: void fmpz_comb_init(fmpz_comb_t comb, nn_srcptr primes, slong num_primes) Initialises a ``comb`` structure for multimodular reduction and recombination. The array ``primes`` is assumed to contain @@ -1427,7 +1427,7 @@ Primality testing composite prime. However in that case an error is printed, as that would be of independent interest. -.. function:: int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pm1, slong num_pm1) +.. function:: int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pm1, slong num_pm1) Applies the Pocklington primality test. The test computes a product `F` of prime powers which divide `n - 1`. @@ -1454,7 +1454,7 @@ Primality testing Requires `n` to be odd. -.. function:: void _fmpz_nm1_trial_factors(const fmpz_t n, mp_ptr pm1, slong * num_pm1, ulong limit) +.. function:: void _fmpz_nm1_trial_factors(const fmpz_t n, nn_ptr pm1, slong * num_pm1, ulong limit) Trial factors `n - 1` up to the given limit (approximately) and stores the factors in an array ``pm1`` whose length is written out to @@ -1464,7 +1464,7 @@ Primality testing be produced (and hence on the length of the array that needs to be supplied). -.. function:: int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pp1, slong num_pp1) +.. function:: int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pp1, slong num_pp1) Applies the Morrison `p + 1` primality test. The test computes a product `F` of primes which divide `n + 1`. @@ -1492,7 +1492,7 @@ Primality testing Requires `n` to be odd and non-square. -.. function:: void _fmpz_np1_trial_factors(const fmpz_t n, mp_ptr pp1, slong * num_pp1, ulong limit) +.. function:: void _fmpz_np1_trial_factors(const fmpz_t n, nn_ptr pp1, slong * num_pp1, ulong limit) Trial factors `n + 1` up to the given limit (approximately) and stores the factors in an array ``pp1`` whose length is written out to diff --git a/doc/source/fmpz_extras.rst b/doc/source/fmpz_extras.rst index 3b16b32940..a42090066f 100644 --- a/doc/source/fmpz_extras.rst +++ b/doc/source/fmpz_extras.rst @@ -63,7 +63,7 @@ so these methods should not be used gratuitously. Sets *z* to the sum of *x*, *y*, and *c*. -.. function:: mp_size_t _fmpz_size(const fmpz_t x) +.. function:: slong _fmpz_size(const fmpz_t x) Returns the number of limbs required to represent *x*. @@ -82,7 +82,7 @@ so these methods should not be used gratuitously. Low-level conversions ------------------------------------------------------------------------------- -.. function:: void fmpz_set_mpn_large(fmpz_t z, mp_srcptr src, mp_size_t n, int negative) +.. function:: void fmpz_set_mpn_large(fmpz_t z, nn_srcptr src, slong n, int negative) Sets *z* to the integer represented by the *n* limbs in the array *src*, or minus this value if *negative* is 1. @@ -93,13 +93,13 @@ Low-level conversions Given an *fmpz_t* *zv*, this macro sets *zptr* to a pointer to the limbs of *zv*, *zn* to the number of limbs, and *zsign* to a sign bit (0 if nonnegative, - 1 if negative). The variable *ztmp* must be a single *mp_limb_t*, which is + 1 if negative). The variable *ztmp* must be a single *ulong*, which is used as a buffer. If *zv* is a small value, *zv* itself contains no limb array that *zptr* could point to, so the single limb is copied to *ztmp* and *zptr* is set to point to *ztmp*. The case where *zv* is zero is not handled specially, and *zn* is set to 1. -.. function:: void fmpz_lshift_mpn(fmpz_t z, mp_srcptr src, mp_size_t n, int negative, flint_bitcnt_t shift) +.. function:: void fmpz_lshift_mpn(fmpz_t z, nn_srcptr src, slong n, int negative, flint_bitcnt_t shift) Sets *z* to the integer represented by the *n* limbs in the array *src*, or minus this value if *negative* is 1, shifted left by *shift* bits. diff --git a/doc/source/fmpz_factor.rst b/doc/source/fmpz_factor.rst index 9e6301c218..b8bca58aa2 100644 --- a/doc/source/fmpz_factor.rst +++ b/doc/source/fmpz_factor.rst @@ -28,7 +28,7 @@ A separate ``int`` field holds the sign, which may be `-1`, `0` or `1`. Clears an ``fmpz_factor_t`` structure. -.. function:: void _fmpz_factor_append_ui(fmpz_factor_t factor, mp_limb_t p, ulong exp) +.. function:: void _fmpz_factor_append_ui(fmpz_factor_t factor, ulong p, ulong exp) Append a factor `p` to the given exponent to the ``fmpz_factor_t`` structure ``factor``. @@ -145,7 +145,7 @@ A separate ``int`` field holds the sign, which may be `-1`, `0` or `1`. smooth for any prime factors `p` of `n` then the function will not ever succeed). -.. function:: int fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, fmpz_t ai, mp_limb_t max_iters) +.. function:: int fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, fmpz_t ai, ulong max_iters) Pollard Rho algorithm for integer factorization. Assumes that the `n` is not prime. ``factor`` is set as the factor if found. Takes as input the initial @@ -158,7 +158,7 @@ A separate ``int`` field holds the sign, which may be `-1`, `0` or `1`. If the algorithm fails to find a non trivial factor in one call, it tries again (this time with a different set of random values). -.. function:: int fmpz_factor_pollard_brent(fmpz_t factor, flint_rand_t state, fmpz_t n, mp_limb_t max_tries, mp_limb_t max_iters) +.. function:: int fmpz_factor_pollard_brent(fmpz_t factor, flint_rand_t state, fmpz_t n, ulong max_tries, ulong max_iters) Pollard Rho algorithm for integer factorization. Assumes that the `n` is not prime. ``factor`` is set as the factor if found. It is not assured that the @@ -196,7 +196,7 @@ Elliptic curve (ECM) method Factoring of ``fmpz`` integers using ECM -.. function:: void fmpz_factor_ecm_init(ecm_t ecm_inf, mp_limb_t sz) +.. function:: void fmpz_factor_ecm_init(ecm_t ecm_inf, ulong sz) Initializes the ``ecm_t`` struct. This is needed in some functions and carries data between subsequent calls. @@ -205,7 +205,7 @@ Factoring of ``fmpz`` integers using ECM Clears the ``ecm_t`` struct. -.. function:: void fmpz_factor_ecm_double(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, mp_ptr n, ecm_t ecm_inf) +.. function:: void fmpz_factor_ecm_double(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, nn_ptr n, ecm_t ecm_inf) Sets the point `(x : z)` to two times `(x_0 : z_0)` modulo `n` according to the formula @@ -218,11 +218,11 @@ Factoring of ``fmpz`` integers using ECM z = 4 x_0 z_0 \left((x_0 - z_0)^2 + 4a_{24}x_0z_0\right) \mod n. - ``ecm_inf`` is used just to use temporary ``mp_ptr``'s in the + ``ecm_inf`` is used just to use temporary ``nn_ptr``'s in the structure. This group doubling is valid only for points expressed in Montgomery projective coordinates. -.. function:: void fmpz_factor_ecm_add(mp_ptr x, mp_ptr z, mp_ptr x1, mp_ptr z1, mp_ptr x2, mp_ptr z2, mp_ptr x0, mp_ptr z0, mp_ptr n, ecm_t ecm_inf) +.. function:: void fmpz_factor_ecm_add(nn_ptr x, nn_ptr z, nn_ptr x1, nn_ptr z1, nn_ptr x2, nn_ptr z2, nn_ptr x0, nn_ptr z0, nn_ptr n, ecm_t ecm_inf) Sets the point `(x : z)` to the sum of `(x_1 : z_1)` and `(x_2 : z_2)` modulo `n`, given the difference `(x_0 : z_0)` according to the formula @@ -231,21 +231,21 @@ Factoring of ``fmpz`` integers using ECM x = 4z_0(x_1x_2 - z_1z_2)^2 \mod n, \\ z = 4x_0(x_2z_1 - x_1z_2)^2 \mod n. - ``ecm_inf`` is used just to use temporary ``mp_ptr``'s in the + ``ecm_inf`` is used just to use temporary ``nn_ptr``'s in the structure. This group addition is valid only for points expressed in Montgomery projective coordinates. -.. function:: void fmpz_factor_ecm_mul_montgomery_ladder(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, mp_limb_t k, mp_ptr n, ecm_t ecm_inf) +.. function:: void fmpz_factor_ecm_mul_montgomery_ladder(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, ulong k, nn_ptr n, ecm_t ecm_inf) Montgomery ladder algorithm for scalar multiplication of elliptic points. Sets the point `(x : z)` to `k(x_0 : z_0)` modulo `n`. - ``ecm_inf`` is used just to use temporary ``mp_ptr``'s in the + ``ecm_inf`` is used just to use temporary ``nn_ptr``'s in the structure. Valid only for points expressed in Montgomery projective coordinates. -.. function:: int fmpz_factor_ecm_select_curve(mp_ptr f, mp_ptr sigma, mp_ptr n, ecm_t ecm_inf) +.. function:: int fmpz_factor_ecm_select_curve(nn_ptr f, nn_ptr sigma, nn_ptr n, ecm_t ecm_inf) Selects a random elliptic curve given a random integer ``sigma``, according to Suyama's parameterization. If the factor is found while @@ -262,7 +262,7 @@ Factoring of ``fmpz`` integers using ECM The curve selected is of Montgomery form, the points selected satisfy the curve and are projective coordinates. -.. function:: int fmpz_factor_ecm_stage_I(mp_ptr f, const mp_limb_t * prime_array, mp_limb_t num, mp_limb_t B1, mp_ptr n, ecm_t ecm_inf) +.. function:: int fmpz_factor_ecm_stage_I(nn_ptr f, const ulong * prime_array, ulong num, ulong B1, nn_ptr n, ecm_t ecm_inf) Stage I implementation of the ECM algorithm. @@ -273,7 +273,7 @@ Factoring of ``fmpz`` integers using ECM If the factor is found, number of words required to store the factor is returned, otherwise `0`. -.. function:: int fmpz_factor_ecm_stage_II(mp_ptr f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, mp_ptr n, ecm_t ecm_inf) +.. function:: int fmpz_factor_ecm_stage_II(nn_ptr f, ulong B1, ulong B2, ulong P, nn_ptr n, ecm_t ecm_inf) Stage II implementation of the ECM algorithm. @@ -284,7 +284,7 @@ Factoring of ``fmpz`` integers using ECM If the factor is found, number of words required to store the factor is returned, otherwise `0`. -.. function:: int fmpz_factor_ecm(fmpz_t f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, flint_rand_t state, const fmpz_t n_in) +.. function:: int fmpz_factor_ecm(fmpz_t f, ulong curves, ulong B1, ulong B2, flint_rand_t state, const fmpz_t n_in) Outer wrapper function for the ECM algorithm. In case ``f`` can fit in a single unsigned word, a call to ``n_factor_ecm`` is made. diff --git a/doc/source/fmpz_mat.rst b/doc/source/fmpz_mat.rst index 7226c487b7..f5d197f080 100644 --- a/doc/source/fmpz_mat.rst +++ b/doc/source/fmpz_mat.rst @@ -1000,7 +1000,7 @@ allowed between arguments. Aliasing between input and output matrices is allowed. -.. function:: void _fmpz_mat_solve_dixon_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B, const nmod_mat_t Ainv, mp_limb_t p, const fmpz_t N, const fmpz_t D) +.. function:: void _fmpz_mat_solve_dixon_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B, const nmod_mat_t Ainv, ulong p, const fmpz_t N, const fmpz_t D) Solves the equation `AX = B` for nonsingular `A`. More precisely, computes (``X``, ``den``) such that `AX = B \times \operatorname{den}` using a diff --git a/doc/source/fmpz_mod_mpoly.rst b/doc/source/fmpz_mod_mpoly.rst index 890723c5f5..46823749f4 100644 --- a/doc/source/fmpz_mod_mpoly.rst +++ b/doc/source/fmpz_mod_mpoly.rst @@ -353,7 +353,7 @@ Random generation Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bounds[i] - 1]``. The exponents of the variable of index *i* are generated by calls to ``n_randint(state, exp_bounds[i])``. -.. function:: void fmpz_mod_mpoly_randtest_bits(fmpz_mod_mpoly_t A, flint_rand_t state, slong length, mp_limb_t exp_bits, const fmpz_mod_mpoly_ctx_t ctx) +.. function:: void fmpz_mod_mpoly_randtest_bits(fmpz_mod_mpoly_t A, flint_rand_t state, slong length, ulong exp_bits, const fmpz_mod_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents whose packed form does not exceed the given bit count. diff --git a/doc/source/fmpz_mpoly.rst b/doc/source/fmpz_mpoly.rst index 4b0b39501e..c8ad7e0e1d 100644 --- a/doc/source/fmpz_mpoly.rst +++ b/doc/source/fmpz_mpoly.rst @@ -391,17 +391,17 @@ Random generation -------------------------------------------------------------------------------- -.. function:: void fmpz_mpoly_randtest_bound(fmpz_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, ulong exp_bound, const fmpz_mpoly_ctx_t ctx) +.. function:: void fmpz_mpoly_randtest_bound(fmpz_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong exp_bound, const fmpz_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bound - 1]``. The exponents of each variable are generated by calls to ``n_randint(state, exp_bound)``. -.. function:: void fmpz_mpoly_randtest_bounds(fmpz_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, ulong * exp_bounds, const fmpz_mpoly_ctx_t ctx) +.. function:: void fmpz_mpoly_randtest_bounds(fmpz_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong * exp_bounds, const fmpz_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bounds[i] - 1]``. The exponents of the variable of index *i* are generated by calls to ``n_randint(state, exp_bounds[i])``. -.. function:: void fmpz_mpoly_randtest_bits(fmpz_mpoly_t A, flint_rand_t state, slong length, mp_limb_t coeff_bits, mp_limb_t exp_bits, const fmpz_mpoly_ctx_t ctx) +.. function:: void fmpz_mpoly_randtest_bits(fmpz_mpoly_t A, flint_rand_t state, slong length, ulong coeff_bits, ulong exp_bits, const fmpz_mpoly_ctx_t ctx) Generate a random polynomial with length up to the given length and exponents whose packed form does not exceed the given bit count. @@ -765,7 +765,7 @@ Internal Functions ``fmpz_mpoly_div_monagan_pearce`` below may be much faster if the quotient is known to be exact. -.. function:: slong _fmpz_mpoly_divides_monagan_pearce(fmpz ** poly1, ulong ** exp1, slong * alloc, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, ulong bits, slong N, const mp_limb_t * cmpmask) +.. function:: slong _fmpz_mpoly_divides_monagan_pearce(fmpz ** poly1, ulong ** exp1, slong * alloc, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, ulong bits, slong N, const ulong * cmpmask) Set ``(poly1, exp1, alloc)`` to ``(poly2, exp3, len2)`` divided by ``(poly3, exp3, len3)`` and return 1 if the quotient is exact. Otherwise @@ -800,7 +800,7 @@ Internal Functions Hence, you may find it easier to use this function instead if the C preprocessor is not available. -.. function:: slong _fmpz_mpoly_div_monagan_pearce(fmpz ** polyq, ulong ** expq, slong * allocq, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, slong bits, slong N, const mp_limb_t * cmpmask) +.. function:: slong _fmpz_mpoly_div_monagan_pearce(fmpz ** polyq, ulong ** expq, slong * allocq, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, slong bits, slong N, const ulong * cmpmask) Set ``(polyq, expq, allocq)`` to the quotient of ``(poly2, exp2, len2)`` by ``(poly3, exp3, len3)`` discarding @@ -822,7 +822,7 @@ Internal Functions Monagan and Roman Pearce. This function is exceptionally efficient if the division is known to be exact. -.. function:: slong _fmpz_mpoly_divrem_monagan_pearce(slong * lenr, fmpz ** polyq, ulong ** expq, slong * allocq, fmpz ** polyr, ulong ** expr, slong * allocr, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, slong bits, slong N, const mp_limb_t * cmpmask) +.. function:: slong _fmpz_mpoly_divrem_monagan_pearce(slong * lenr, fmpz ** polyq, ulong ** expq, slong * allocq, fmpz ** polyr, ulong ** expr, slong * allocr, const fmpz * poly2, const ulong * exp2, slong len2, const fmpz * poly3, const ulong * exp3, slong len3, slong bits, slong N, const ulong * cmpmask) Set ``(polyq, expq, allocq)`` and ``(polyr, expr, allocr)`` to the quotient and remainder of ``(poly2, exp2, len2)`` by @@ -875,7 +875,7 @@ Internal Functions ``poly3`` is zero or if an exponent overflow occurs. -.. function:: slong _fmpz_mpoly_divrem_ideal_monagan_pearce(fmpz_mpoly_struct ** polyq, fmpz ** polyr, ulong ** expr, slong * allocr, const fmpz * poly2, const ulong * exp2, slong len2, fmpz_mpoly_struct * const * poly3, ulong * const * exp3, slong len, slong N, slong bits, const fmpz_mpoly_ctx_t ctx, const mp_limb_t * cmpmask) +.. function:: slong _fmpz_mpoly_divrem_ideal_monagan_pearce(fmpz_mpoly_struct ** polyq, fmpz ** polyr, ulong ** expr, slong * allocr, const fmpz * poly2, const ulong * exp2, slong len2, fmpz_mpoly_struct * const * poly3, ulong * const * exp3, slong len, slong N, slong bits, const fmpz_mpoly_ctx_t ctx, const ulong * cmpmask) This function is as per ``_fmpz_mpoly_divrem_monagan_pearce`` except that it takes an array of divisor polynomials ``poly3`` and an array of diff --git a/doc/source/fmpz_mpoly_q.rst b/doc/source/fmpz_mpoly_q.rst index 44211a0077..74a1eb0520 100644 --- a/doc/source/fmpz_mpoly_q.rst +++ b/doc/source/fmpz_mpoly_q.rst @@ -143,7 +143,7 @@ The variable strings in *x* start with the variable of most significance at inde Random generation ------------------------------------------------------------------------------- -.. function:: void fmpz_mpoly_q_randtest(fmpz_mpoly_q_t res, flint_rand_t state, slong length, mp_limb_t coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx) +.. function:: void fmpz_mpoly_q_randtest(fmpz_mpoly_q_t res, flint_rand_t state, slong length, ulong coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx) Sets *res* to a random rational function where both numerator and denominator have up to *length* terms, coefficients up to size *coeff_bits*, and diff --git a/doc/source/fmpz_poly.rst b/doc/source/fmpz_poly.rst index 74cbba5272..ab5e3a759d 100644 --- a/doc/source/fmpz_poly.rst +++ b/doc/source/fmpz_poly.rst @@ -308,9 +308,9 @@ Randomisation length is up to ``len`` and where each coefficient has up to the given number of bits. -.. function:: void fmpz_poly_randtest_irreducible1(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits) - void fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits) - void fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits) +.. function:: void fmpz_poly_randtest_irreducible1(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits) + void fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits) + void fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits) Sets ``p`` to a random irreducible polynomial, whose length is up to ``len`` and where each coefficient has up to the @@ -586,13 +586,13 @@ Bit packing -------------------------------------------------------------------------------- -.. function:: void _fmpz_poly_bit_pack(mp_ptr arr, const fmpz * poly, slong len, flint_bitcnt_t bit_size, int negate) +.. function:: void _fmpz_poly_bit_pack(nn_ptr arr, const fmpz * poly, slong len, flint_bitcnt_t bit_size, int negate) Packs the coefficients of ``poly`` into bitfields of the given ``bit_size``, negating the coefficients before packing if ``negate`` is set to `-1`. -.. function:: int _fmpz_poly_bit_unpack(fmpz * poly, slong len, mp_srcptr arr, flint_bitcnt_t bit_size, int negate) +.. function:: int _fmpz_poly_bit_unpack(fmpz * poly, slong len, nn_srcptr arr, flint_bitcnt_t bit_size, int negate) Unpacks the polynomial of given length from the array as packed into fields of the given ``bit_size``, finally negating the coefficients @@ -600,7 +600,7 @@ Bit packing leading term with coefficient `\pm1` should be added at position ``len`` of ``poly``. -.. function:: void _fmpz_poly_bit_unpack_unsigned(fmpz * poly, slong len, mp_srcptr arr, flint_bitcnt_t bit_size) +.. function:: void _fmpz_poly_bit_unpack_unsigned(fmpz * poly, slong len, nn_srcptr arr, flint_bitcnt_t bit_size) Unpacks the polynomial of given length from the array as packed into fields of the given ``bit_size``. The coefficients are assumed to @@ -1181,7 +1181,7 @@ Bit sizes and norms integer square root of the sum of the squares of the coefficients of ``poly``. -.. function:: mp_limb_t _fmpz_poly_2norm_normalised_bits(const fmpz * poly, slong len) +.. function:: ulong _fmpz_poly_2norm_normalised_bits(const fmpz * poly, slong len) Returns an upper bound on the number of bits of the normalised Euclidean norm of ``(poly, len)``, i.e. the number of bits of @@ -2283,14 +2283,14 @@ Evaluation Evaluates the polynomial `f` at the rational `a`, and sets ``res`` to the result. -.. function:: mp_limb_t _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, mp_limb_t a, mp_limb_t n, mp_limb_t ninv) +.. function:: ulong _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, ulong a, ulong n, ulong ninv) Evaluates ``(poly, len)`` at the value `a` modulo `n` and returns the result. The last argument ``ninv`` must be set to the precomputed inverse of `n`, which can be obtained using the function :func:`n_preinvert_limb`. -.. function:: mp_limb_t fmpz_poly_evaluate_mod(const fmpz_poly_t poly, mp_limb_t a, mp_limb_t n) +.. function:: ulong fmpz_poly_evaluate_mod(const fmpz_poly_t poly, ulong a, ulong n) Evaluates ``poly`` at the value `a` modulo `n` and returns the result. @@ -3087,7 +3087,7 @@ Modular reduction and reconstruction Sets the coefficients of ``A`` to the residues in ``Amod``, normalised to the interval `0 \le r < m` where `m` is the modulus. -.. function:: void _fmpz_poly_CRT_ui_precomp(fmpz * res, const fmpz * poly1, slong len1, const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, mp_limb_t m2inv, fmpz_t m1m2, mp_limb_t c, int sign) +.. function:: void _fmpz_poly_CRT_ui_precomp(fmpz * res, const fmpz * poly1, slong len1, const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, ulong m2inv, fmpz_t m1m2, ulong c, int sign) Sets the coefficients in ``res`` to the CRT reconstruction modulo `m_1m_2` of the residues ``(poly1, len1)`` and ``(poly2, len2)`` @@ -3103,7 +3103,7 @@ Modular reduction and reconstruction Coefficients of ``res`` are written up to the maximum of ``len1`` and ``len2``. -.. function:: void _fmpz_poly_CRT_ui(fmpz * res, const fmpz * poly1, slong len1, const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, mp_limb_t m2inv, int sign) +.. function:: void _fmpz_poly_CRT_ui(fmpz * res, const fmpz * poly1, slong len1, const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, ulong m2inv, int sign) This function is identical to ``_fmpz_poly_CRT_ui_precomp``, apart from automatically computing `m_1m_2` and `c`. It also @@ -3206,7 +3206,7 @@ Minimal polynomials -------------------------------------------------------------------------------- -.. function:: void _fmpz_poly_cyclotomic(fmpz * a, ulong n, mp_ptr factors, slong num_factors, ulong phi) +.. function:: void _fmpz_poly_cyclotomic(fmpz * a, ulong n, nn_ptr factors, slong num_factors, ulong phi) Sets ``a`` to the lower half of the cyclotomic polynomial `\Phi_n(x)`, given `n \ge 3` which must be squarefree. diff --git a/doc/source/fmpz_vec.rst b/doc/source/fmpz_vec.rst index 1c7f3d8c78..04f4e2437e 100644 --- a/doc/source/fmpz_vec.rst +++ b/doc/source/fmpz_vec.rst @@ -55,7 +55,7 @@ Bit sizes and norms the elements of ``vec``. Sets ``maxabs`` to the bit count of the maximum of the absolute values of the elements of ``vec``. -.. function:: mp_size_t _fmpz_vec_max_limbs(const fmpz * vec, slong len) +.. function:: slong _fmpz_vec_max_limbs(const fmpz * vec, slong len) Returns the maximum number of limbs needed to store the absolute value of any entry in ``(vec, len)``. If all entries are zero, returns @@ -130,25 +130,25 @@ Conversions -------------------------------------------------------------------------------- -.. function:: void _fmpz_vec_get_nmod_vec(mp_ptr res, const fmpz * poly, slong len, nmod_t mod) +.. function:: void _fmpz_vec_get_nmod_vec(nn_ptr res, const fmpz * poly, slong len, nmod_t mod) Reduce the coefficients of ``(poly, len)`` modulo the given modulus and set ``(res, len)`` to the result. -.. function:: void _fmpz_vec_set_nmod_vec(fmpz * res, mp_srcptr poly, slong len, nmod_t mod) +.. function:: void _fmpz_vec_set_nmod_vec(fmpz * res, nn_srcptr poly, slong len, nmod_t mod) Set the coefficients of ``(res, len)`` to the symmetric modulus of the coefficients of ``(poly, len)``, i.e. convert the given coefficients modulo the given modulus `n` to their signed integer representatives in the range `[-n/2, n/2)`. -.. function:: void _fmpz_vec_get_fft(mp_limb_t ** coeffs_f, const fmpz * coeffs_m, slong l, slong length) +.. function:: void _fmpz_vec_get_fft(ulong ** coeffs_f, const fmpz * coeffs_m, slong l, slong length) Convert the vector of coeffs ``coeffs_m`` to an fft vector ``coeffs_f`` of the given ``length`` with ``l`` limbs per coefficient with an additional limb for overflow. -.. function:: void _fmpz_vec_set_fft(fmpz * coeffs_m, slong length, const mp_ptr * coeffs_f, slong limbs, slong sign) +.. function:: void _fmpz_vec_set_fft(fmpz * coeffs_m, slong length, const nn_ptr * coeffs_f, slong limbs, slong sign) Convert an fft vector ``coeffs_f`` of fully reduced Fermat numbers of the given ``length`` to a vector of ``fmpz``'s. Each is assumed to be the given diff --git a/doc/source/fmpzi.rst b/doc/source/fmpzi.rst index f214787805..ebe9fd3c16 100644 --- a/doc/source/fmpzi.rst +++ b/doc/source/fmpzi.rst @@ -51,7 +51,7 @@ Input and output Random number generation ------------------------------------------------------------------------------- -.. function:: void fmpzi_randtest(fmpzi_t res, flint_rand_t state, mp_bitcnt_t bits) +.. function:: void fmpzi_randtest(fmpzi_t res, flint_rand_t state, flint_bitcnt_t bits) Properties ------------------------------------------------------------------------------- diff --git a/doc/source/fq_nmod.rst b/doc/source/fq_nmod.rst index c2c1a4ce22..0e34f03f23 100644 --- a/doc/source/fq_nmod.rst +++ b/doc/source/fq_nmod.rst @@ -144,17 +144,17 @@ Memory management Clears the element ``rop``. -.. function:: void _fq_nmod_sparse_reduce(mp_limb_t * R, slong lenR, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_sparse_reduce(ulong * R, slong lenR, const fq_nmod_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx``. -.. function:: void _fq_nmod_dense_reduce(mp_limb_t * R, slong lenR, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_dense_reduce(ulong * R, slong lenR, const fq_nmod_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx`` using Newton division. -.. function:: void _fq_nmod_reduce(mp_limb_t * r, slong lenR, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_reduce(ulong * r, slong lenR, const fq_nmod_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx``. Does either sparse or dense reduction @@ -211,7 +211,7 @@ Basic arithmetic Sets ``rop`` to the square of ``op``, reducing the output in the given context. -.. function:: void _fq_nmod_inv(mp_ptr * rop, mp_srcptr * op, slong len, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_inv(nn_ptr * rop, nn_srcptr * op, slong len, const fq_nmod_ctx_t ctx) Sets ``(rop, d)`` to the inverse of the non-zero element ``(op, len)``. @@ -226,7 +226,7 @@ Basic arithmetic of ``ctx``. If ``op`` is not invertible, then ``f`` is set to a factor of the modulus; otherwise, it is set to one. -.. function:: void _fq_nmod_pow(mp_limb_t * rop, const mp_limb_t * op, slong len, const fmpz_t e, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_pow(ulong * rop, const ulong * op, slong len, const fmpz_t e, const fq_nmod_ctx_t ctx) Sets ``(rop, 2*d-1)`` to ``(op,len)`` raised to the power `e`, reduced modulo `f(X)`, the modulus of ``ctx``. @@ -445,7 +445,7 @@ Special functions -------------------------------------------------------------------------------- -.. function:: void _fq_nmod_trace(fmpz_t rop, const mp_limb_t * op, slong len, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_trace(fmpz_t rop, const ulong * op, slong len, const fq_nmod_ctx_t ctx) Sets ``rop`` to the trace of the non-zero element ``(op, len)`` in `\mathbf{F}_{q}`. @@ -461,7 +461,7 @@ Special functions `a` is equal to `\sum_{i=0}^{d-1} \Sigma^i (a)`, where `d = \log_{p} q`. -.. function:: void _fq_nmod_norm(fmpz_t rop, const mp_limb_t * op, slong len, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_norm(fmpz_t rop, const ulong * op, slong len, const fq_nmod_ctx_t ctx) Sets ``rop`` to the norm of the non-zero element ``(op, len)`` in `\mathbf{F}_{q}`. @@ -479,7 +479,7 @@ Special functions Algorithm selection is automatic depending on the input. -.. function:: void _fq_nmod_frobenius(mp_limb_t * rop, const mp_limb_t * op, slong len, slong e, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_frobenius(ulong * rop, const ulong * op, slong len, slong e, const fq_nmod_ctx_t ctx) Sets ``(rop, 2d-1)`` to the image of ``(op, len)`` under the Frobenius operator raised to the e-th power, assuming that neither diff --git a/doc/source/fq_nmod_mpoly.rst b/doc/source/fq_nmod_mpoly.rst index 72fd49eb9e..567e93491d 100644 --- a/doc/source/fq_nmod_mpoly.rst +++ b/doc/source/fq_nmod_mpoly.rst @@ -347,7 +347,7 @@ Random generation Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bounds[i] - 1]``. The exponents of the variable of index *i* are generated by calls to ``n_randint(state, exp_bounds[i])``. -.. function:: void fq_nmod_mpoly_randtest_bits(fq_nmod_mpoly_t A, flint_rand_t state, slong length, mp_limb_t exp_bits, const fq_nmod_mpoly_ctx_t ctx) +.. function:: void fq_nmod_mpoly_randtest_bits(fq_nmod_mpoly_t A, flint_rand_t state, slong length, ulong exp_bits, const fq_nmod_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents whose packed form does not exceed the given bit count. diff --git a/doc/source/fq_nmod_poly.rst b/doc/source/fq_nmod_poly.rst index 3093683f9d..a38a702f99 100644 --- a/doc/source/fq_nmod_poly.rst +++ b/doc/source/fq_nmod_poly.rst @@ -966,7 +966,7 @@ Euclidean division be invertible modulo the modulus of ``Q``. An exception is raised if this is not the case or if ``n = 0``. -.. function:: void _fq_nmod_poly_div_series(fq_nmod_struct * Q, const fq_nmod_struct * A, mp_limb_signed_t Alen, const fq_nmod_struct * B, mp_limb_signed_t Blen, mp_limb_signed_t n, const fq_nmod_ctx_t ctx) +.. function:: void _fq_nmod_poly_div_series(fq_nmod_struct * Q, const fq_nmod_struct * A, slong Alen, const fq_nmod_struct * B, slong Blen, slong n, const fq_nmod_ctx_t ctx) Set ``(Q, n)`` to the quotient of the series ``(A, Alen``) and ``(B, Blen)`` assuming ``Alen, Blen <= n``. We assume the bottom diff --git a/doc/source/fq_zech.rst b/doc/source/fq_zech.rst index 271c2fab26..efc1e6f17c 100644 --- a/doc/source/fq_zech.rst +++ b/doc/source/fq_zech.rst @@ -7,7 +7,7 @@ We represent an element of the finite field as a power of a generator for the multiplicative group of the finite field. In particular, we use a root of `f(x)`, where `f(X) \in \mathbf{F}_p[X]` is a monic, irreducible polynomial of degree `n`, as a polynomial in `\mathbf{F}_p[X]` of degree less than `n`. The -underlying data structure is just an ``mp_limb_t``. +underlying data structure is just an ``ulong``. The default choice for `f(X)` is the Conway polynomial for the pair `(p,n)`, enabled by Frank Lübeck's data base of Conway polynomials using the @@ -15,7 +15,7 @@ enabled by Frank Lübeck's data base of Conway polynomials using the then a random irreducible polynomial will be chosen for `f(X)`. Additionally, the user is able to supply their own `f(X)`. -We required that the order of the field fits inside of an ``mp_limb_t``; +We required that the order of the field fits inside of an ``ulong``; however, it is recommended that `p^n < 2^{20}` due to the time and memory needed to compute the Zech logarithm table. @@ -147,7 +147,7 @@ Context Management Sets `f` to be the size of the finite field. -.. function:: mp_limb_t fq_zech_ctx_order_ui(const fq_zech_ctx_t ctx) +.. function:: ulong fq_zech_ctx_order_ui(const fq_zech_ctx_t ctx) Returns the size of the finite field. @@ -178,17 +178,17 @@ Memory management Clears the element ``rop``. -.. function:: void _fq_zech_sparse_reduce(mp_ptr R, slong lenR, const fq_zech_ctx_t ctx) +.. function:: void _fq_zech_sparse_reduce(nn_ptr R, slong lenR, const fq_zech_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx``. -.. function:: void _fq_zech_dense_reduce(mp_ptr R, slong lenR, const fq_zech_ctx_t ctx) +.. function:: void _fq_zech_dense_reduce(nn_ptr R, slong lenR, const fq_zech_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx`` using Newton division. -.. function:: void _fq_zech_reduce(mp_ptr r, slong lenR, const fq_zech_ctx_t ctx) +.. function:: void _fq_zech_reduce(nn_ptr r, slong lenR, const fq_zech_ctx_t ctx) Reduces ``(R, lenR)`` modulo the polynomial `f` given by the modulus of ``ctx``. Does either sparse or dense reduction @@ -250,7 +250,7 @@ Basic arithmetic Sets ``rop`` to the quotient of ``op1`` and ``op2``, reducing the output in the given context. -.. function:: void _fq_zech_inv(mp_ptr * rop, mp_srcptr * op, slong len, const fq_zech_ctx_t ctx) +.. function:: void _fq_zech_inv(nn_ptr * rop, nn_srcptr * op, slong len, const fq_zech_ctx_t ctx) Sets ``(rop, d)`` to the inverse of the non-zero element ``(op, len)``. diff --git a/doc/source/long_extras.rst b/doc/source/long_extras.rst index adf752be9e..a9f90d511c 100644 --- a/doc/source/long_extras.rst +++ b/doc/source/long_extras.rst @@ -28,7 +28,7 @@ Random functions -------------------------------------------------------------------------------- -.. function:: mp_limb_signed_t z_randtest(flint_rand_t state) +.. function:: slong z_randtest(flint_rand_t state) Returns a pseudo random number with a random number of bits, from `0` to ``FLINT_BITS``. The probability of the special values `0`, @@ -37,11 +37,11 @@ Random functions This random function is mainly used for testing purposes. -.. function:: mp_limb_signed_t z_randtest_not_zero(flint_rand_t state) +.. function:: slong z_randtest_not_zero(flint_rand_t state) As for ``z_randtest(state)``, but does not return `0`. -.. function:: mp_limb_signed_t z_randint(flint_rand_t state, mp_limb_t limit) +.. function:: slong z_randint(flint_rand_t state, ulong limit) Returns a pseudo random number of absolute value less than ``limit``. If ``limit`` is zero or exceeds ``WORD_MAX``, diff --git a/doc/source/memory.rst b/doc/source/memory.rst index 2bf4d682c0..44f0aab9a6 100644 --- a/doc/source/memory.rst +++ b/doc/source/memory.rst @@ -60,7 +60,7 @@ allocate two different arrays. void myfun(void) { /* other variable declarations */ - mp_ptr a, b; + nn_ptr a, b; TMP_INIT; /* arbitrary code */ @@ -69,8 +69,8 @@ allocate two different arrays. /* arbitrary code */ - a = TMP_ALLOC(32*sizeof(mp_limb_t)); - b = TMP_ALLOC(64*sizeof(mp_limb_t)); + a = TMP_ALLOC(32*sizeof(ulong)); + b = TMP_ALLOC(64*sizeof(ulong)); /* arbitrary code */ diff --git a/doc/source/mpn_mod.rst b/doc/source/mpn_mod.rst index 0255447dbc..c56f0934e0 100644 --- a/doc/source/mpn_mod.rst +++ b/doc/source/mpn_mod.rst @@ -7,7 +7,7 @@ This module provides efficient arithmetic in rings `R = \mathbb{Z} / n \mathbb{Z}` for medium-sized `n`. Given an `\ell`-limb modulus `2^{\beta (\ell-1)} \le n < 2^{\beta \ell}` where `\beta` is ``FLINT_BITS`` (32 or 64), -elements are represented as `\ell`-limb arrays (i.e. ``mp_limb_t[l]``), +elements are represented as `\ell`-limb arrays (i.e. ``ulong[l]``), zero-padded for values that happen to fit in less than `\ell` limbs, which can be stack-allocated and packed consecutively without indirection or memory allocation overhead. @@ -43,10 +43,10 @@ Context objects ------------------------------------------------------------------------------- .. function:: int gr_ctx_init_mpn_mod(gr_ctx_t ctx, const fmpz_t n) - int _gr_ctx_init_mpn_mod(gr_ctx_t ctx, mp_srcptr n, mp_size_t nlimbs) + int _gr_ctx_init_mpn_mod(gr_ctx_t ctx, nn_srcptr n, slong nlimbs) Initializes *ctx* to the ring `\mathbb{Z}/n\mathbb{Z}` - of integers modulo *n* where elements are ``mp_limb_t`` arrays with + of integers modulo *n* where elements are ``ulong`` arrays with the same number of limbs as *n*. This constructor does no initialization and returns ``GR_DOMAIN`` if the modulus is nonpositive, or ``GR_UNABLE`` if the modulus is not in bounds. @@ -97,49 +97,49 @@ Basic operations and arithmetic .. function:: int mpn_mod_ctx_write(gr_stream_t out, gr_ctx_t ctx) void mpn_mod_ctx_clear(gr_ctx_t ctx) truth_t mpn_mod_ctx_is_field(gr_ctx_t ctx) - void mpn_mod_init(mp_ptr x, gr_ctx_t ctx) - void mpn_mod_clear(mp_ptr x, gr_ctx_t ctx) - void mpn_mod_swap(mp_ptr x, mp_ptr y, gr_ctx_t ctx) - int mpn_mod_set(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) - int mpn_mod_zero(mp_ptr res, gr_ctx_t ctx) - int mpn_mod_one(mp_ptr res, gr_ctx_t ctx) - int mpn_mod_set_ui(mp_ptr res, ulong x, gr_ctx_t ctx) - int mpn_mod_set_si(mp_ptr res, slong x, gr_ctx_t ctx) - int mpn_mod_neg_one(mp_ptr res, gr_ctx_t ctx) - int mpn_mod_set_mpn(mp_ptr res, mp_srcptr x, mp_size_t xn, gr_ctx_t ctx) - int mpn_mod_set_fmpz(mp_ptr res, const fmpz_t x, gr_ctx_t ctx) - int mpn_mod_set_other(mp_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) - int mpn_mod_randtest(mp_ptr res, flint_rand_t state, gr_ctx_t ctx) - int mpn_mod_write(gr_stream_t out, mp_srcptr x, gr_ctx_t ctx) - int mpn_mod_get_fmpz(fmpz_t res, mp_srcptr x, gr_ctx_t ctx) - truth_t mpn_mod_is_zero(mp_srcptr x, gr_ctx_t ctx) - truth_t mpn_mod_is_one(mp_srcptr x, gr_ctx_t ctx) + void mpn_mod_init(nn_ptr x, gr_ctx_t ctx) + void mpn_mod_clear(nn_ptr x, gr_ctx_t ctx) + void mpn_mod_swap(nn_ptr x, nn_ptr y, gr_ctx_t ctx) + int mpn_mod_set(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) + int mpn_mod_zero(nn_ptr res, gr_ctx_t ctx) + int mpn_mod_one(nn_ptr res, gr_ctx_t ctx) + int mpn_mod_set_ui(nn_ptr res, ulong x, gr_ctx_t ctx) + int mpn_mod_set_si(nn_ptr res, slong x, gr_ctx_t ctx) + int mpn_mod_neg_one(nn_ptr res, gr_ctx_t ctx) + int mpn_mod_set_mpn(nn_ptr res, nn_srcptr x, slong xn, gr_ctx_t ctx) + int mpn_mod_set_fmpz(nn_ptr res, const fmpz_t x, gr_ctx_t ctx) + int mpn_mod_set_other(nn_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) + int mpn_mod_randtest(nn_ptr res, flint_rand_t state, gr_ctx_t ctx) + int mpn_mod_write(gr_stream_t out, nn_srcptr x, gr_ctx_t ctx) + int mpn_mod_get_fmpz(fmpz_t res, nn_srcptr x, gr_ctx_t ctx) + truth_t mpn_mod_is_zero(nn_srcptr x, gr_ctx_t ctx) + truth_t mpn_mod_is_one(nn_srcptr x, gr_ctx_t ctx) truth_t mpn_mod_is_neg_one(gr_srcptr x, gr_ctx_t ctx) - truth_t mpn_mod_equal(mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_neg(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) - int mpn_mod_add(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_add_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) - int mpn_mod_sub_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) - int mpn_mod_add_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) - int mpn_mod_sub_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) - int mpn_mod_add_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) - int mpn_mod_sub_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) - int mpn_mod_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_mul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) - int mpn_mod_mul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) - int mpn_mod_mul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) - int mpn_mod_addmul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_addmul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) - int mpn_mod_addmul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) - int mpn_mod_addmul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) - int mpn_mod_submul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) - int mpn_mod_submul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) - int mpn_mod_submul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) - int mpn_mod_submul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) - int mpn_mod_sqr(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) - int mpn_mod_inv(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) - int mpn_mod_div(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) + truth_t mpn_mod_equal(nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_neg(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) + int mpn_mod_add(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_add_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) + int mpn_mod_sub_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) + int mpn_mod_add_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) + int mpn_mod_sub_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) + int mpn_mod_add_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) + int mpn_mod_sub_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) + int mpn_mod_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_mul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) + int mpn_mod_mul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) + int mpn_mod_mul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) + int mpn_mod_addmul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_addmul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) + int mpn_mod_addmul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) + int mpn_mod_addmul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) + int mpn_mod_submul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) + int mpn_mod_submul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) + int mpn_mod_submul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) + int mpn_mod_submul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) + int mpn_mod_sqr(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) + int mpn_mod_inv(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) + int mpn_mod_div(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) Basic functionality for the ``gr`` method table. These methods are interchangeable with their ``gr`` counterparts. @@ -151,19 +151,19 @@ Basic operations and arithmetic Vector functions ------------------------------------------------------------------------------- -.. function:: int _mpn_mod_vec_zero(mp_ptr res, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_clear(mp_ptr res, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_set(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx) - void _mpn_mod_vec_swap(mp_ptr vec1, mp_ptr vec2, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_neg(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_add(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_mul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx) - int _mpn_mod_scalar_mul_vec(mp_ptr res, mp_srcptr y, mp_srcptr x, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_addmul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx) - int _mpn_mod_vec_dot(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx) - int _mpn_mod_vec_dot_rev(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_vec_zero(nn_ptr res, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_clear(nn_ptr res, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_set(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx) + void _mpn_mod_vec_swap(nn_ptr vec1, nn_ptr vec2, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_neg(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_add(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_mul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx) + int _mpn_mod_scalar_mul_vec(nn_ptr res, nn_srcptr y, nn_srcptr x, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_addmul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx) + int _mpn_mod_vec_dot(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx) + int _mpn_mod_vec_dot_rev(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx) Overrides for generic ``gr`` vector operations with inlined or partially inlined code for reduced overhead. @@ -207,7 +207,7 @@ used by higher-level generic routines. Dispatches between classical, delayed-reduction and recursive LU factorization. -.. function:: int mpn_mod_mat_det(mp_ptr res, const gr_mat_t A, gr_ctx_t ctx) +.. function:: int mpn_mod_mat_det(nn_ptr res, const gr_mat_t A, gr_ctx_t ctx) Dispatches to an appropriate generic algorithm for computing the determinant. @@ -224,15 +224,15 @@ Multiplication All multiplication algorithms optimize for squaring. -.. function:: int _mpn_mod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) Polynomial multiplication using the schoolbook algorithm. -.. function:: int _mpn_mod_poly_mullow_KS(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_mullow_KS(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) Polynomial multiplication using Kronecker substitution (bit packing). -.. function:: int _mpn_mod_poly_mullow_karatsuba(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_mullow_karatsuba(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx) Polynomial multiplication using the Karatsuba algorithm, implemented without intermediate modular reductions. @@ -243,33 +243,33 @@ All multiplication algorithms optimize for squaring. Currently a full product is computed internally regardless of *len*; truncation only skips the modular reductions. -.. function:: int _mpn_mod_poly_mullow_fft_small(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_mullow_fft_small(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) Polynomial multiplication using the small-prime FFT. Returns ``GR_UNABLE`` if the small-prime FFT is not available or if the coefficients are too large to use this implementation. -.. function:: int _mpn_mod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) Polynomial multiplication with automatic algorithm selection. Division .............. -.. function:: int _mpn_mod_poly_inv_series(mp_ptr Q, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx) - int _mpn_mod_poly_div_series(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_inv_series(nn_ptr Q, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx) + int _mpn_mod_poly_div_series(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx) Power series inversion and divison with automatic selection between basecase and Newton algorithms. -.. function:: int _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, mp_srcptr invL, gr_ctx_t ctx) - int _mpn_mod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_divrem_basecase_preinv1(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nn_srcptr invL, gr_ctx_t ctx) + int _mpn_mod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) Polynomial division with remainder implemented using the basecase algorithm with delayed reductions. -.. function:: int _mpn_mod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) - int _mpn_mod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) + int _mpn_mod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) Polynomial division with remainder with automatic selection between basecase and Newton algorithms. @@ -277,12 +277,12 @@ Division GCD .............. -.. function:: int _mpn_mod_poly_gcd(mp_ptr G, slong * lenG, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +.. function:: int _mpn_mod_poly_gcd(nn_ptr G, slong * lenG, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) Polynomial GCD with automatic selection between basecase and HGCD algorithms. -.. function:: int _mpn_mod_poly_xgcd(slong * lenG, mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); +.. function:: int _mpn_mod_poly_xgcd(slong * lenG, nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); Polynomial extended GCD with automatic selection between basecase and HGCD algorithms. diff --git a/doc/source/nf_elem.rst b/doc/source/nf_elem.rst index db2e4339b9..4ae78c366d 100644 --- a/doc/source/nf_elem.rst +++ b/doc/source/nf_elem.rst @@ -26,7 +26,7 @@ Initialisation Clear resources allocated by the given number field element in the given number field. -.. function:: void nf_elem_randtest(nf_elem_t a, flint_rand_t state, mp_bitcnt_t bits, const nf_t nf) +.. function:: void nf_elem_randtest(nf_elem_t a, flint_rand_t state, flint_bitcnt_t bits, const nf_t nf) Generate a random number field element `a` in the number field ``nf`` whose coefficients have up to the given number of bits. diff --git a/doc/source/nfloat.rst b/doc/source/nfloat.rst index 98cd7012af..d332d7f00f 100644 --- a/doc/source/nfloat.rst +++ b/doc/source/nfloat.rst @@ -230,8 +230,8 @@ These methods are interchangeable with their ``gr`` counterparts. int nfloat_set_si(nfloat_ptr res, slong x, gr_ctx_t ctx) int nfloat_set_fmpz(nfloat_ptr res, const fmpz_t x, gr_ctx_t ctx) -.. function:: int _nfloat_set_mpn_2exp(nfloat_ptr res, mp_srcptr x, mp_size_t xn, slong exp, int xsgnbit, gr_ctx_t ctx) - int nfloat_set_mpn_2exp(nfloat_ptr res, mp_srcptr x, mp_size_t xn, slong exp, int xsgnbit, gr_ctx_t ctx) +.. function:: int _nfloat_set_mpn_2exp(nfloat_ptr res, nn_srcptr x, slong xn, slong exp, int xsgnbit, gr_ctx_t ctx) + int nfloat_set_mpn_2exp(nfloat_ptr res, nn_srcptr x, slong xn, slong exp, int xsgnbit, gr_ctx_t ctx) .. function:: int nfloat_set_arf(nfloat_ptr res, const arf_t x, gr_ctx_t ctx) int nfloat_get_arf(arf_t res, nfloat_srcptr x, gr_ctx_t ctx) @@ -315,7 +315,7 @@ Internal functions .. function:: int _nfloat_cmp(nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx) int _nfloat_cmpabs(nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx) - int _nfloat_add_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx) - int _nfloat_sub_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx) - int _nfloat_add_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) - int _nfloat_sub_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) + int _nfloat_add_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx) + int _nfloat_sub_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx) + int _nfloat_add_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) + int _nfloat_sub_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) diff --git a/doc/source/nmod.rst b/doc/source/nmod.rst index 06cfce7359..fcb5921812 100644 --- a/doc/source/nmod.rst +++ b/doc/source/nmod.rst @@ -6,7 +6,7 @@ Modular reduction and arithmetic -------------------------------------------------------------------------------- -.. function:: void nmod_init(nmod_t * mod, mp_limb_t n) +.. function:: void nmod_init(nmod_t * mod, ulong n) Initialises the given ``nmod_t`` structure for reduction modulo `n` with a precomputed inverse. @@ -67,68 +67,68 @@ Modular reduction and arithmetic ``mod`` parameter must be a valid ``nmod_t`` structure. It is assumed that `r`, `a`, `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t _nmod_add(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong _nmod_add(ulong a, ulong b, nmod_t mod) Returns `a + b` modulo ``mod.n``. It is assumed that ``mod`` is no more than ``FLINT_BITS - 1`` bits. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t nmod_add(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong nmod_add(ulong a, ulong b, nmod_t mod) Returns `a + b` modulo ``mod.n``. No assumptions are made about ``mod.n``. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t _nmod_sub(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong _nmod_sub(ulong a, ulong b, nmod_t mod) Returns `a - b` modulo ``mod.n``. It is assumed that ``mod`` is no more than ``FLINT_BITS - 1`` bits. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t nmod_sub(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong nmod_sub(ulong a, ulong b, nmod_t mod) Returns `a - b` modulo ``mod.n``. No assumptions are made about ``mod.n``. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t nmod_neg(mp_limb_t a, nmod_t mod) +.. function:: ulong nmod_neg(ulong a, nmod_t mod) Returns `-a` modulo ``mod.n``. It is assumed that `a` is already reduced modulo ``mod.n``, but no assumptions are made about the latter. -.. function:: mp_limb_t nmod_mul(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong nmod_mul(ulong a, ulong b, nmod_t mod) Returns `ab` modulo ``mod.n``. No assumptions are made about ``mod.n``. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t _nmod_mul_fullword(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong _nmod_mul_fullword(ulong a, ulong b, nmod_t mod) Returns `ab` modulo ``mod.n``. Requires that ``mod.n`` is exactly ``FLINT_BITS`` large. It is assumed that `a` and `b` are already reduced modulo ``mod.n``. -.. function:: mp_limb_t nmod_inv(mp_limb_t a, nmod_t mod) +.. function:: ulong nmod_inv(ulong a, nmod_t mod) Returns `a^{-1}` modulo ``mod.n``. The inverse is assumed to exist. -.. function:: mp_limb_t nmod_div(mp_limb_t a, mp_limb_t b, nmod_t mod) +.. function:: ulong nmod_div(ulong a, ulong b, nmod_t mod) Returns `ab^{-1}` modulo ``mod.n``. The inverse of `b` is assumed to exist. It is assumed that `a` is already reduced modulo ``mod.n``. -.. function:: int nmod_divides(mp_limb_t * a, mp_limb_t b, mp_limb_t c, nmod_t mod) +.. function:: int nmod_divides(ulong * a, ulong b, ulong c, nmod_t mod) If `a\cdot c = b \mod n` has a solution for `a` return `1` and set `a` to such a solution. Otherwise return `0` and leave `a` undefined. -.. function:: mp_limb_t nmod_pow_ui(mp_limb_t a, ulong e, nmod_t mod) +.. function:: ulong nmod_pow_ui(ulong a, ulong e, nmod_t mod) Returns `a^e` modulo ``mod.n``. No assumptions are made about ``mod.n``. It is assumed that `a` is already reduced modulo ``mod.n``. -.. function:: mp_limb_t nmod_pow_fmpz(mp_limb_t a, const fmpz_t e, nmod_t mod) +.. function:: ulong nmod_pow_fmpz(ulong a, const fmpz_t e, nmod_t mod) Returns `a^e` modulo ``mod.n``. No assumptions are made about ``mod.n``. It is assumed that `a` is already reduced @@ -146,16 +146,16 @@ Discrete Logarithms via Pohlig-Hellman Free any space used by ``L``. -.. function:: double nmod_discrete_log_pohlig_hellman_precompute_prime(nmod_discrete_log_pohlig_hellman_t L, mp_limb_t p) +.. function:: double nmod_discrete_log_pohlig_hellman_precompute_prime(nmod_discrete_log_pohlig_hellman_t L, ulong p) Configure ``L`` for discrete logarithms modulo ``p`` to an internally chosen base. It is assumed that ``p`` is prime. The return is an estimate on the number of multiplications needed for one run. -.. function:: mp_limb_t nmod_discrete_log_pohlig_hellman_primitive_root(const nmod_discrete_log_pohlig_hellman_t L) +.. function:: ulong nmod_discrete_log_pohlig_hellman_primitive_root(const nmod_discrete_log_pohlig_hellman_t L) Return the internally stored base. -.. function:: ulong nmod_discrete_log_pohlig_hellman_run(const nmod_discrete_log_pohlig_hellman_t L, mp_limb_t y) +.. function:: ulong nmod_discrete_log_pohlig_hellman_run(const nmod_discrete_log_pohlig_hellman_t L, ulong y) Return the logarithm of ``y`` with respect to the internally stored base. ``y`` is expected to be reduced modulo the ``p``. The function is undefined if the logarithm does not exist. diff --git a/doc/source/nmod_mat.rst b/doc/source/nmod_mat.rst index 1c40220535..02c4cee491 100644 --- a/doc/source/nmod_mat.rst +++ b/doc/source/nmod_mat.rst @@ -12,7 +12,7 @@ The :type:`nmod_mat_t` type is defined as an array of parameters of type :type:`nmod_mat_t` by reference. An :type:`nmod_mat_t` internally consists of a single array of -``mp_limb_t``'s, representing a dense matrix in row-major order. This +``ulong``'s, representing a dense matrix in row-major order. This array is only directly indexed during memory allocation and deallocation. A separate array holds pointers to the start of each row, and is used for all indexing. This allows the rows of a matrix to @@ -45,7 +45,7 @@ Memory management -------------------------------------------------------------------------------- -.. function:: void nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, mp_limb_t n) +.. function:: void nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, ulong n) Initialises ``mat`` to a ``rows``-by-``cols`` matrix with coefficients modulo `n`, where `n` can be any nonzero integer that @@ -87,16 +87,16 @@ Basic properties and manipulation indexed from zero. No bounds checking is performed. This macro can be used both for reading and writing coefficients. -.. function:: mp_limb_t nmod_mat_get_entry(const nmod_mat_t mat, slong i, slong j) +.. function:: ulong nmod_mat_get_entry(const nmod_mat_t mat, slong i, slong j) Get the entry at row `i` and column `j` of the matrix ``mat``. -.. function:: mp_limb_t * nmod_mat_entry_ptr(const nmod_mat_t mat, slong i, slong j) +.. function:: ulong * nmod_mat_entry_ptr(const nmod_mat_t mat, slong i, slong j) Return a pointer to the entry at row `i` and column `j` of the matrix ``mat``. -.. function:: void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, mp_limb_t x) +.. function:: void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, ulong x) Set the entry at row `i` and column `j` of the matrix ``mat`` to ``x``. @@ -193,7 +193,7 @@ Random matrix generation Sets the element to random numbers likely to be close to the modulus of the matrix. This is used to test potential overflow-related bugs. -.. function:: int nmod_mat_randpermdiag(nmod_mat_t mat, flint_rand_t state, mp_srcptr diag, slong n) +.. function:: int nmod_mat_randpermdiag(nmod_mat_t mat, flint_rand_t state, nn_srcptr diag, slong n) Sets ``mat`` to a random permutation of the diagonal matrix with `n` leading entries given by the vector ``diag``. It is @@ -307,12 +307,12 @@ Matrix-scalar arithmetic -------------------------------------------------------------------------------- -.. function:: void nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, mp_limb_t c) +.. function:: void nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, ulong c) Sets `B = cA`, where the scalar `c` is assumed to be reduced modulo the modulus. Dimensions of `A` and `B` must be identical. -.. function:: void nmod_mat_scalar_addmul_ui(nmod_mat_t dest, const nmod_mat_t X, const nmod_mat_t Y, const mp_limb_t b) +.. function:: void nmod_mat_scalar_addmul_ui(nmod_mat_t dest, const nmod_mat_t X, const nmod_mat_t Y, const ulong b) Sets `dest = X + bY`, where the scalar `b` is assumed to be reduced modulo the modulus. Dimensions of dest, X and Y must be identical. @@ -381,15 +381,15 @@ Matrix multiplication Sets `D = C + AB`. `C` and `D` may be aliased with each other but not with `A` or `B`. -.. function:: void nmod_mat_mul_nmod_vec(mp_limb_t * c, const nmod_mat_t A, const mp_limb_t * b, slong blen) - void nmod_mat_mul_nmod_vec_ptr(mp_limb_t * const * c, const nmod_mat_t A, const mp_limb_t * const * b, slong blen) +.. function:: void nmod_mat_mul_nmod_vec(ulong * c, const nmod_mat_t A, const ulong * b, slong blen) + void nmod_mat_mul_nmod_vec_ptr(ulong * const * c, const nmod_mat_t A, const ulong * const * b, slong blen) Compute a matrix-vector product of ``A`` and ``(b, blen)`` and store the result in ``c``. The vector ``(b, blen)`` is either truncated or zero-extended to the number of columns of ``A``. The number entries written to ``c`` is always equal to the number of rows of ``A``. -.. function:: void nmod_mat_nmod_vec_mul(mp_limb_t * c, const mp_limb_t * a, slong alen, const nmod_mat_t B) - void nmod_mat_nmod_vec_mul_ptr(mp_limb_t * const * c, const mp_limb_t * const * a, slong alen, const nmod_mat_t B) +.. function:: void nmod_mat_nmod_vec_mul(ulong * c, const ulong * a, slong alen, const nmod_mat_t B) + void nmod_mat_nmod_vec_mul_ptr(ulong * const * c, const ulong * const * a, slong alen, const nmod_mat_t B) Compute a vector-matrix product of ``(a, alen)`` and ``B`` and and store the result in ``c``. The vector ``(a, alen)`` is either truncated or zero-extended to the number of rows of ``B``. @@ -414,7 +414,7 @@ Trace -------------------------------------------------------------------------------- -.. function:: mp_limb_t nmod_mat_trace(const nmod_mat_t mat) +.. function:: ulong nmod_mat_trace(const nmod_mat_t mat) Computes the trace of the matrix, i.e. the sum of the entries on the main diagonal. The matrix is required to be square. @@ -423,11 +423,11 @@ Trace Determinant and rank -------------------------------------------------------------------------------- -.. function:: mp_limb_t nmod_mat_det_howell(const nmod_mat_t A) +.. function:: ulong nmod_mat_det_howell(const nmod_mat_t A) Returns the determinant of `A`. -.. function:: mp_limb_t nmod_mat_det(const nmod_mat_t A) +.. function:: ulong nmod_mat_det(const nmod_mat_t A) Returns the determinant of `A`. @@ -568,7 +568,7 @@ Nonsingular square solving There are no restrictions on the shape of `A` and it may be singular. -.. function:: int nmod_mat_solve_vec(mp_ptr x, const nmod_mat_t A, mp_srcptr b) +.. function:: int nmod_mat_solve_vec(nn_ptr x, const nmod_mat_t A, nn_srcptr b) Solves the matrix-vector equation `Ax = b` over `\mathbb{Z} / p \mathbb{Z}` where `p` is the modulus of `A` which must be a prime number. diff --git a/doc/source/nmod_mpoly.rst b/doc/source/nmod_mpoly.rst index 5d9030cb5f..7553315112 100644 --- a/doc/source/nmod_mpoly.rst +++ b/doc/source/nmod_mpoly.rst @@ -4,7 +4,7 @@ =============================================================================== The exponents follow the ``mpoly`` interface. - A coefficient may be referenced as a ``mp_limb_t *``. + A coefficient may be referenced as a ``ulong *``. Types, macros and constants ------------------------------------------------------------------------------- @@ -30,7 +30,7 @@ Context object -------------------------------------------------------------------------------- -.. function:: void nmod_mpoly_ctx_init(nmod_mpoly_ctx_t ctx, slong nvars, const ordering_t ord, mp_limb_t n) +.. function:: void nmod_mpoly_ctx_init(nmod_mpoly_ctx_t ctx, slong nvars, const ordering_t ord, ulong n) Initialise a context object for a polynomial ring with the given number of variables and the given ordering. It will have coefficients modulo *n*. Setting `n = 0` will give undefined behavior. @@ -44,7 +44,7 @@ Context object Return the ordering used to initialize the context. -.. function:: mp_limb_t nmod_mpoly_ctx_modulus(const nmod_mpoly_ctx_t ctx) +.. function:: ulong nmod_mpoly_ctx_modulus(const nmod_mpoly_ctx_t ctx) Return the modulus used to initialize the context. @@ -255,7 +255,7 @@ Container operations These functions deal with violations of the internal canonical representation. If a term index is negative or not strictly less than the length of the polynomial, the function will throw. -.. function:: mp_limb_t * nmod_mpoly_term_coeff_ref(nmod_mpoly_t A, slong i, const nmod_mpoly_ctx_t ctx) +.. function:: ulong * nmod_mpoly_term_coeff_ref(nmod_mpoly_t A, slong i, const nmod_mpoly_ctx_t ctx) Return a reference to the coefficient of index *i* of *A*. @@ -352,7 +352,7 @@ Random generation Generate a random polynomial with length up to *length* and exponents in the range ``[0, exp_bounds[i] - 1]``. The exponents of the variable of index *i* are generated by calls to ``n_randint(state, exp_bounds[i])``. -.. function:: void nmod_mpoly_randtest_bits(nmod_mpoly_t A, flint_rand_t state, slong length, mp_limb_t exp_bits, const nmod_mpoly_ctx_t ctx) +.. function:: void nmod_mpoly_randtest_bits(nmod_mpoly_t A, flint_rand_t state, slong length, ulong exp_bits, const nmod_mpoly_ctx_t ctx) Generate a random polynomial with length up to *length* and exponents whose packed form does not exceed the given bit count. diff --git a/doc/source/nmod_poly.rst b/doc/source/nmod_poly.rst index 11a92057d6..4831d02cbd 100644 --- a/doc/source/nmod_poly.rst +++ b/doc/source/nmod_poly.rst @@ -8,7 +8,7 @@ The :type:`nmod_poly_t` data type represents elements of module provides routines for memory management, basic arithmetic and some higher level functions such as GCD, etc. -Each coefficient of an :type:`nmod_poly_t` is of type ``mp_limb_t`` +Each coefficient of an :type:`nmod_poly_t` is of type ``ulong`` and represents an integer reduced modulo the fixed modulus `n`. Unless otherwise specified, all functions in this section permit @@ -79,25 +79,15 @@ Types, macros and constants .. type:: nmod_poly_t -Helper functions --------------------------------------------------------------------------------- - - -.. function:: int signed_mpn_sub_n(mp_ptr res, mp_srcptr op1, mp_srcptr op2, slong n) - - If ``op1 >= op2`` return 0 and set ``res`` to ``op1 - op2`` - else return 1 and set ``res`` to ``op2 - op1``. - Memory management -------------------------------------------------------------------------------- - -.. function:: void nmod_poly_init(nmod_poly_t poly, mp_limb_t n) +.. function:: void nmod_poly_init(nmod_poly_t poly, ulong n) Initialises ``poly``. It will have coefficients modulo `n`. -.. function:: void nmod_poly_init_preinv(nmod_poly_t poly, mp_limb_t n, mp_limb_t ninv) +.. function:: void nmod_poly_init_preinv(nmod_poly_t poly, ulong n, ulong ninv) Initialises ``poly``. It will have coefficients modulo `n`. The caller supplies a precomputed inverse limb generated by @@ -107,12 +97,12 @@ Memory management Initialises ``poly`` using an already initialised modulus ``mod``. -.. function:: void nmod_poly_init2(nmod_poly_t poly, mp_limb_t n, slong alloc) +.. function:: void nmod_poly_init2(nmod_poly_t poly, ulong n, slong alloc) Initialises ``poly``. It will have coefficients modulo `n`. Up to ``alloc`` coefficients may be stored in ``poly``. -.. function:: void nmod_poly_init2_preinv(nmod_poly_t poly, mp_limb_t n, mp_limb_t ninv, slong alloc) +.. function:: void nmod_poly_init2_preinv(nmod_poly_t poly, ulong n, ulong ninv, slong alloc) Initialises ``poly``. It will have coefficients modulo `n`. The caller supplies a precomputed inverse limb generated by @@ -157,7 +147,7 @@ Polynomial properties Returns the degree of the polynomial ``poly``. The zero polynomial is deemed to have degree `-1`. -.. function:: mp_limb_t nmod_poly_modulus(const nmod_poly_t poly) +.. function:: ulong nmod_poly_modulus(const nmod_poly_t poly) Returns the modulus of the polynomial ``poly``. This will be a positive integer. @@ -205,7 +195,7 @@ Assignment and basic manipulation Notionally truncate ``poly`` to length ``len`` and set ``res`` to the result. The result is normalised. -.. function:: void _nmod_poly_reverse(mp_ptr output, mp_srcptr input, slong len, slong m) +.. function:: void _nmod_poly_reverse(nn_ptr output, nn_srcptr input, slong len, slong m) Sets ``output`` to the reverse of ``input``, which is of length ``len``, but thinking of it as a polynomial of length ``m``, @@ -438,7 +428,7 @@ Shifting -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_shift_left(mp_ptr res, mp_srcptr poly, slong len, slong k) +.. function:: void _nmod_poly_shift_left(nn_ptr res, nn_srcptr poly, slong len, slong k) Sets ``(res, len + k)`` to ``(poly, len)`` shifted left by ``k`` coefficients. Assumes that ``res`` has space for @@ -449,7 +439,7 @@ Shifting Sets ``res`` to ``poly`` shifted left by ``k`` coefficients, i.e. multiplied by `x^k`. -.. function:: void _nmod_poly_shift_right(mp_ptr res, mp_srcptr poly, slong len, slong k) +.. function:: void _nmod_poly_shift_right(nn_ptr res, nn_srcptr poly, slong len, slong k) Sets ``(res, len - k)`` to ``(poly, len)`` shifted left by ``k`` coefficients. It is assumed that ``k <= len`` and that @@ -467,7 +457,7 @@ Addition and subtraction -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_add(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_add(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Sets ``res`` to the sum of ``(poly1, len1)`` and ``(poly2, len2)``. There are no restrictions on the lengths. @@ -481,7 +471,7 @@ Addition and subtraction Notionally truncate ``poly1`` and ``poly2`` to length `n` and set ``res`` to the sum. -.. function:: void _nmod_poly_sub(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_sub(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Sets ``res`` to the difference of ``(poly1, len1)`` and ``(poly2, len2)``. There are no restrictions on the lengths. @@ -514,7 +504,7 @@ Scalar multiplication and division Adds ``poly`` multiplied by `c` to ``res``. The element `c` is assumed to be less than the modulus of ``poly``. -.. function:: void _nmod_poly_make_monic(mp_ptr output, mp_srcptr input, slong len, nmod_t mod) +.. function:: void _nmod_poly_make_monic(nn_ptr output, nn_srcptr input, slong len, nmod_t mod) Sets ``output`` to be the scalar multiple of ``input`` of length ``len > 0`` that has leading coefficient one, if such a @@ -537,7 +527,7 @@ Bit packing and unpacking -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_bit_pack(mp_ptr res, mp_srcptr poly, slong len, flint_bitcnt_t bits) +.. function:: void _nmod_poly_bit_pack(nn_ptr res, nn_srcptr poly, slong len, flint_bitcnt_t bits) Packs ``len`` coefficients of ``poly`` into fields of the given number of bits in the large integer ``res``, i.e. evaluates @@ -546,7 +536,7 @@ Bit packing and unpacking coefficient of ``poly`` is bigger than ``bits/2`` bits. We also assume ``bits < 3 * FLINT_BITS``. -.. function:: void _nmod_poly_bit_unpack(mp_ptr res, slong len, mp_srcptr mpn, ulong bits, nmod_t mod) +.. function:: void _nmod_poly_bit_unpack(nn_ptr res, slong len, nn_srcptr mpn, ulong bits, nmod_t mod) Unpacks ``len`` coefficients stored in the big integer ``mpn`` in bit fields of the given number of bits, reduces them modulo the @@ -566,32 +556,32 @@ Bit packing and unpacking represented by the integer ``f``. -.. function:: void _nmod_poly_KS2_pack1(mp_ptr res, mp_srcptr op, slong n, slong s, ulong b, ulong k, slong r) +.. function:: void _nmod_poly_KS2_pack1(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r) Same as ``_nmod_poly_KS2_pack``, but requires ``b <= FLINT_BITS``. -.. function:: void _nmod_poly_KS2_pack(mp_ptr res, mp_srcptr op, slong n, slong s, ulong b, ulong k, slong r) +.. function:: void _nmod_poly_KS2_pack(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r) Bit packing routine used by KS2 and KS4 multiplication. -.. function:: void _nmod_poly_KS2_unpack1(mp_ptr res, mp_srcptr op, slong n, ulong b, ulong k) +.. function:: void _nmod_poly_KS2_unpack1(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) Same as ``_nmod_poly_KS2_unpack``, but requires ``b <= FLINT_BITS`` (i.e. writes one word per coefficient). -.. function:: void _nmod_poly_KS2_unpack2(mp_ptr res, mp_srcptr op, slong n, ulong b, ulong k) +.. function:: void _nmod_poly_KS2_unpack2(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) Same as ``_nmod_poly_KS2_unpack``, but requires ``FLINT_BITS < b <= 2 * FLINT_BITS`` (i.e. writes two words per coefficient). -.. function:: void _nmod_poly_KS2_unpack3(mp_ptr res, mp_srcptr op, slong n, ulong b, ulong k) +.. function:: void _nmod_poly_KS2_unpack3(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) Same as ``_nmod_poly_KS2_unpack``, but requires ``2 * FLINT_BITS < b < 3 * FLINT_BITS`` (i.e. writes three words per coefficient). -.. function:: void _nmod_poly_KS2_unpack(mp_ptr res, mp_srcptr op, slong n, ulong b, ulong k) +.. function:: void _nmod_poly_KS2_unpack(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) Bit unpacking code used by KS2 and KS4 multiplication. @@ -601,31 +591,31 @@ KS2/KS4 Reduction -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_KS2_reduce(mp_ptr res, slong s, mp_srcptr op, slong n, ulong w, nmod_t mod) +.. function:: void _nmod_poly_KS2_reduce(nn_ptr res, slong s, nn_srcptr op, slong n, ulong w, nmod_t mod) Reduction code used by KS2 and KS4 multiplication. -.. function:: void _nmod_poly_KS2_recover_reduce1(mp_ptr res, slong s, mp_srcptr op1, mp_srcptr op2, slong n, ulong b, nmod_t mod) +.. function:: void _nmod_poly_KS2_recover_reduce1(nn_ptr res, slong s, nn_srcptr op1, nn_srcptr op2, slong n, ulong b, nmod_t mod) Same as ``_nmod_poly_KS2_recover_reduce``, but requires ``0 < 2 * b <= FLINT_BITS``. -.. function:: void _nmod_poly_KS2_recover_reduce2(mp_ptr res, slong s, mp_srcptr op1, mp_srcptr op2, slong n, ulong b, nmod_t mod) +.. function:: void _nmod_poly_KS2_recover_reduce2(nn_ptr res, slong s, nn_srcptr op1, nn_srcptr op2, slong n, ulong b, nmod_t mod) Same as ``_nmod_poly_KS2_recover_reduce``, but requires ``FLINT_BITS < 2 * b < 2*FLINT_BITS``. -.. function:: void _nmod_poly_KS2_recover_reduce2b(mp_ptr res, slong s, mp_srcptr op1, mp_srcptr op2, slong n, ulong b, nmod_t mod) +.. function:: void _nmod_poly_KS2_recover_reduce2b(nn_ptr res, slong s, nn_srcptr op1, nn_srcptr op2, slong n, ulong b, nmod_t mod) Same as ``_nmod_poly_KS2_recover_reduce``, but requires ``b == FLINT_BITS``. -.. function:: void _nmod_poly_KS2_recover_reduce3(mp_ptr res, slong s, mp_srcptr op1, mp_srcptr op2, slong n, ulong b, nmod_t mod) +.. function:: void _nmod_poly_KS2_recover_reduce3(nn_ptr res, slong s, nn_srcptr op1, nn_srcptr op2, slong n, ulong b, nmod_t mod) Same as ``_nmod_poly_KS2_recover_reduce``, but requires ``2 * FLINT_BITS < 2 * b <= 3 * FLINT_BITS``. -.. function:: void _nmod_poly_KS2_recover_reduce(mp_ptr res, slong s, mp_srcptr op1, mp_srcptr op2, slong n, ulong b, nmod_t mod) +.. function:: void _nmod_poly_KS2_recover_reduce(nn_ptr res, slong s, nn_srcptr op1, nn_srcptr op2, slong n, ulong b, nmod_t mod) Reduction code used by KS4 multiplication. @@ -635,7 +625,7 @@ Multiplication -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_mul_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_mul_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Sets ``(res, len1 + len2 - 1)`` to the product of ``(poly1, len1)`` and ``(poly2, len2)``. Assumes ``len1 >= len2 > 0``. Aliasing of @@ -645,7 +635,7 @@ Multiplication Sets ``res`` to the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong trunc, nmod_t mod) +.. function:: void _nmod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong trunc, nmod_t mod) Sets ``res`` to the lower ``trunc`` coefficients of the product of ``(poly1, len1)`` and ``(poly2, len2)``. Assumes that @@ -657,7 +647,7 @@ Multiplication Sets ``res`` to the lower ``trunc`` coefficients of the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mulhigh_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong start, nmod_t mod) +.. function:: void _nmod_poly_mulhigh_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong start, nmod_t mod) Computes the product of ``(poly1, len1)`` and ``(poly2, len2)`` and writes the coefficients from ``start`` onwards into the high @@ -671,7 +661,7 @@ Multiplication coefficients from ``start`` onwards into the high coefficients of ``res``, the remaining coefficients being arbitrary but reduced. -.. function:: void _nmod_poly_mul_KS(mp_ptr out, mp_srcptr in1, slong len1, mp_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod) +.. function:: void _nmod_poly_mul_KS(nn_ptr out, nn_srcptr in1, slong len1, nn_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod) Sets ``res`` to the product of ``in1`` and ``in2`` assuming the output coefficients are at most the given number of @@ -685,7 +675,7 @@ Multiplication bits wide. If ``bits`` is set to `0` an appropriate value is computed automatically. -.. function:: void _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, mp_srcptr op2, slong n2, nmod_t mod) +.. function:: void _nmod_poly_mul_KS2(nn_ptr res, nn_srcptr op1, slong n1, nn_srcptr op2, slong n2, nmod_t mod) Sets ``res`` to the product of ``op1`` and ``op2``. Assumes that ``len1 >= len2 > 0``. @@ -694,7 +684,7 @@ Multiplication Sets ``res`` to the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, mp_srcptr op2, slong n2, nmod_t mod) +.. function:: void _nmod_poly_mul_KS4(nn_ptr res, nn_srcptr op1, slong n1, nn_srcptr op2, slong n2, nmod_t mod) Sets ``res`` to the product of ``op1`` and ``op2``. Assumes that ``len1 >= len2 > 0``. @@ -703,7 +693,7 @@ Multiplication Sets ``res`` to the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mullow_KS(mp_ptr out, mp_srcptr in1, slong len1, mp_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod) +.. function:: void _nmod_poly_mullow_KS(nn_ptr out, nn_srcptr in1, slong len1, nn_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod) Sets ``out`` to the low `n` coefficients of ``in1`` of length ``len1`` times ``in2`` of length ``len2``. The output must have @@ -715,7 +705,7 @@ Multiplication Set ``res`` to the low `n` coefficients of ``in1`` of length ``len1`` times ``in2`` of length ``len2``. -.. function:: void _nmod_poly_mul(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_mul(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Sets ``res`` to the product of ``poly1`` of length ``len1`` and ``poly2`` of length ``len2``. Assumes ``len1 >= len2 > 0``. @@ -725,7 +715,7 @@ Multiplication Sets ``res`` to the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong n, nmod_t mod) +.. function:: void _nmod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong n, nmod_t mod) Sets ``res`` to the first ``n`` coefficients of the product of ``poly1`` of length ``len1`` and ``poly2`` of @@ -738,7 +728,7 @@ Multiplication Sets ``res`` to the first ``trunc`` coefficients of the product of ``poly1`` and ``poly2``. -.. function:: void _nmod_poly_mulhigh(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong n, nmod_t mod) +.. function:: void _nmod_poly_mulhigh(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong n, nmod_t mod) Sets all but the low `n` coefficients of ``res`` to the corresponding coefficients of the product of ``poly1`` of length @@ -753,7 +743,7 @@ Multiplication corresponding coefficients of the product of ``poly1`` and ``poly2``, the remaining coefficients being arbitrary. -.. function:: void _nmod_poly_mulmod(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, mp_srcptr f, slong lenf, nmod_t mod) +.. function:: void _nmod_poly_mulmod(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nn_srcptr f, slong lenf, nmod_t mod) Sets ``res`` to the remainder of the product of ``poly1`` and ``poly2`` upon polynomial division by ``f``. @@ -769,7 +759,7 @@ Multiplication Sets ``res`` to the remainder of the product of ``poly1`` and ``poly2`` upon polynomial division by ``f``. -.. function:: void _nmod_poly_mulmod_preinv(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +.. function:: void _nmod_poly_mulmod_preinv(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) Sets ``res`` to the remainder of the product of ``poly1`` and ``poly2`` upon polynomial division by ``f``. @@ -794,7 +784,7 @@ Powering -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_pow_binexp(mp_ptr res, mp_srcptr poly, slong len, ulong e, nmod_t mod) +.. function:: void _nmod_poly_pow_binexp(nn_ptr res, nn_srcptr poly, slong len, ulong e, nmod_t mod) Raises ``poly`` of length ``len`` to the power ``e`` and sets ``res`` to the result. We require that ``res`` has enough space @@ -807,7 +797,7 @@ Powering Raises ``poly`` to the power ``e`` and sets ``res`` to the result. Uses the binary exponentiation method. -.. function:: void _nmod_poly_pow(mp_ptr res, mp_srcptr poly, slong len, ulong e, nmod_t mod) +.. function:: void _nmod_poly_pow(nn_ptr res, nn_srcptr poly, slong len, ulong e, nmod_t mod) Raises ``poly`` of length ``len`` to the power ``e`` and sets ``res`` to the result. We require that ``res`` has enough space @@ -819,7 +809,7 @@ Powering Raises ``poly`` to the power ``e`` and sets ``res`` to the result. -.. function:: void _nmod_poly_pow_trunc_binexp(mp_ptr res, mp_srcptr poly, ulong e, slong trunc, nmod_t mod) +.. function:: void _nmod_poly_pow_trunc_binexp(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod) Sets ``res`` to the low ``trunc`` coefficients of ``poly`` (assumed to be zero padded if necessary to length ``trunc``) to @@ -835,7 +825,7 @@ Powering to the power ``e``. This is equivalent to doing a powering followed by a truncation. Uses the binary exponentiation method. -.. function:: void _nmod_poly_pow_trunc(mp_ptr res, mp_srcptr poly, ulong e, slong trunc, nmod_t mod) +.. function:: void _nmod_poly_pow_trunc(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod) Sets ``res`` to the low ``trunc`` coefficients of ``poly`` (assumed to be zero padded if necessary to length ``trunc``) to @@ -850,7 +840,7 @@ Powering to the power ``e``. This is equivalent to doing a powering followed by a truncation. -.. function:: void _nmod_poly_powmod_ui_binexp(mp_ptr res, mp_srcptr poly, ulong e, mp_srcptr f, slong lenf, nmod_t mod) +.. function:: void _nmod_poly_powmod_ui_binexp(nn_ptr res, nn_srcptr poly, ulong e, nn_srcptr f, slong lenf, nmod_t mod) Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e > 0``. @@ -865,7 +855,7 @@ Powering Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e >= 0``. -.. function:: void _nmod_poly_powmod_fmpz_binexp(mp_ptr res, mp_srcptr poly, fmpz_t e, mp_srcptr f, slong lenf, nmod_t mod) +.. function:: void _nmod_poly_powmod_fmpz_binexp(nn_ptr res, nn_srcptr poly, fmpz_t e, nn_srcptr f, slong lenf, nmod_t mod) Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e > 0``. @@ -879,7 +869,7 @@ Powering Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e >= 0``. -.. function:: void _nmod_poly_powmod_ui_binexp_preinv (mp_ptr res, mp_srcptr poly, ulong e, mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +.. function:: void _nmod_poly_powmod_ui_binexp_preinv (nn_ptr res, nn_srcptr poly, ulong e, nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e > 0``. @@ -896,7 +886,7 @@ Powering modulo ``f``, using binary exponentiation. We require ``e >= 0``. We require ``finv`` to be the inverse of the reverse of ``f``. -.. function:: void _nmod_poly_powmod_fmpz_binexp_preinv (mp_ptr res, mp_srcptr poly, fmpz_t e, mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +.. function:: void _nmod_poly_powmod_fmpz_binexp_preinv (nn_ptr res, nn_srcptr poly, fmpz_t e, nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) Sets ``res`` to ``poly`` raised to the power ``e`` modulo ``f``, using binary exponentiation. We require ``e > 0``. @@ -913,7 +903,7 @@ Powering modulo ``f``, using binary exponentiation. We require ``e >= 0``. We require ``finv`` to be the inverse of the reverse of ``f``. -.. function:: void _nmod_poly_powmod_x_ui_preinv (mp_ptr res, ulong e, mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +.. function:: void _nmod_poly_powmod_x_ui_preinv (nn_ptr res, ulong e, nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) Sets ``res`` to ``x`` raised to the power ``e`` modulo ``f``, using sliding window exponentiation. We require ``e > 0``. @@ -929,7 +919,7 @@ Powering ``e >= 0``. We require ``finv`` to be the inverse of the reverse of ``f``. -.. function:: void _nmod_poly_powmod_x_fmpz_preinv (mp_ptr res, fmpz_t e, mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +.. function:: void _nmod_poly_powmod_x_fmpz_preinv (nn_ptr res, fmpz_t e, nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) Sets ``res`` to ``x`` raised to the power ``e`` modulo ``f``, using sliding window exponentiation. We require ``e > 0``. @@ -945,7 +935,7 @@ Powering ``e >= 0``. We require ``finv`` to be the inverse of the reverse of ``f``. -.. function:: void _nmod_poly_powers_mod_preinv_naive(mp_ptr * res, mp_srcptr f, slong flen, slong n, mp_srcptr g, slong glen, mp_srcptr ginv, slong ginvlen, const nmod_t mod) +.. function:: void _nmod_poly_powers_mod_preinv_naive(nn_ptr * res, nn_srcptr f, slong flen, slong n, nn_srcptr g, slong glen, nn_srcptr ginv, slong ginvlen, const nmod_t mod) Compute ``f^0, f^1, ..., f^(n-1) mod g``, where ``g`` has length ``glen`` and ``f`` is reduced mod ``g`` and has length ``flen`` (possibly zero @@ -960,7 +950,7 @@ Powering No aliasing is permitted between the entries of ``res`` and either of the inputs. -.. function:: void _nmod_poly_powers_mod_preinv_threaded_pool(mp_ptr * res, mp_srcptr f, slong flen, slong n, mp_srcptr g, slong glen, mp_srcptr ginv, slong ginvlen, const nmod_t mod, thread_pool_handle * threads, slong num_threads) +.. function:: void _nmod_poly_powers_mod_preinv_threaded_pool(nn_ptr * res, nn_srcptr f, slong flen, slong n, nn_srcptr g, slong glen, nn_srcptr ginv, slong ginvlen, const nmod_t mod, thread_pool_handle * threads, slong num_threads) Compute ``f^0, f^1, ..., f^(n-1) mod g``, where ``g`` has length ``glen`` and ``f`` is reduced mod ``g`` and has length ``flen`` (possibly zero @@ -969,7 +959,7 @@ Powering ``ginv`` of length ``ginvlen`` is set to the power series inverse of the reverse of ``g``. -.. function:: void _nmod_poly_powers_mod_preinv_threaded(mp_ptr * res, mp_srcptr f, slong flen, slong n, mp_srcptr g, slong glen, mp_srcptr ginv, slong ginvlen, const nmod_t mod) +.. function:: void _nmod_poly_powers_mod_preinv_threaded(nn_ptr * res, nn_srcptr f, slong flen, slong n, nn_srcptr g, slong glen, nn_srcptr ginv, slong ginvlen, const nmod_t mod) Compute ``f^0, f^1, ..., f^(n-1) mod g``, where ``g`` has length ``glen`` and ``f`` is reduced mod ``g`` and has length ``flen`` (possibly zero @@ -988,7 +978,7 @@ Division -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, mp_srcptr A, slong A_len, mp_srcptr B, slong B_len, nmod_t mod) +.. function:: void _nmod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, nn_srcptr A, slong A_len, nn_srcptr B, slong B_len, nmod_t mod) Finds `Q` and `R` such that `A = B Q + R` with `\operatorname{len}(R) < \operatorname{len}(B)`. If `\operatorname{len}(B) = 0` an exception is raised. We require that ``W`` @@ -1000,7 +990,7 @@ Division Finds `Q` and `R` such that `A = B Q + R` with `\operatorname{len}(R) < \operatorname{len}(B)`. If `\operatorname{len}(B) = 0` an exception is raised. -.. function:: void _nmod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: void _nmod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes `Q` and `R` such that `A = BQ + R` with `\operatorname{len}(R)` less than ``lenB``, where ``A`` is of length ``lenA`` and ``B`` is of @@ -1011,7 +1001,7 @@ Division Computes `Q` and `R` such that `A = BQ + R` with `\operatorname{len}(R) < \operatorname{len}(B)`. -.. function:: void _nmod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: void _nmod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Notionally computes polynomials `Q` and `R` such that `A = BQ + R` with `\operatorname{len}(R)` less than ``lenB``, where ``A`` is of length ``lenA`` @@ -1022,9 +1012,9 @@ Division Computes the quotient `Q` on polynomial division of `A` and `B`. -.. function:: void _nmod_poly_rem_q1(mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: void _nmod_poly_rem_q1(nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) -.. function:: void _nmod_poly_rem(mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: void _nmod_poly_rem(nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes the remainder `R` on polynomial division of `A` by `B`. @@ -1032,13 +1022,13 @@ Division Computes the remainder `R` on polynomial division of `A` by `B`. -.. function:: void _nmod_poly_divexact(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: void _nmod_poly_divexact(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) void nmod_poly_divexact(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) Computes the quotient `Q` of `A` and `B` assuming that the division is exact. -.. function:: void _nmod_poly_inv_series_basecase(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_inv_series_basecase(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) Given ``Q`` of length ``Qlen`` whose leading coefficient is invertible modulo the given modulus, finds a polynomial ``Qinv`` of length ``n`` @@ -1055,7 +1045,7 @@ Division coefficient of ``Q`` must be invertible modulo the modulus of ``Q``. This function can be viewed as inverting a power series. -.. function:: void _nmod_poly_inv_series_newton(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_inv_series_newton(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) Given ``Q`` of length ``Qlen`` whose constant coefficient is invertible modulo the given modulus, find a polynomial ``Qinv`` of length ``n`` @@ -1071,7 +1061,7 @@ Division the case or if ``n = 0``. This function can be viewed as inverting a power series via Newton iteration. -.. function:: void _nmod_poly_inv_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_inv_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) Given ``Q`` of length ``Qlenn`` whose constant coefficient is invertible modulo the given modulus, find a polynomial ``Qinv`` of length ``n`` @@ -1086,7 +1076,7 @@ Division the case or if ``n = 0``. This function can be viewed as inverting a power series. -.. function:: void _nmod_poly_div_series_basecase(mp_ptr Q, mp_srcptr A, slong Alen, mp_srcptr B, slong Blen, slong n, nmod_t mod) +.. function:: void _nmod_poly_div_series_basecase(nn_ptr Q, nn_srcptr A, slong Alen, nn_srcptr B, slong Blen, slong n, nmod_t mod) Given polynomials ``A`` and ``B`` of length ``Alen`` and ``Blen``, finds the @@ -1104,7 +1094,7 @@ Division An exception is raised if ``n == 0`` or the constant coefficient of ``B`` is zero. -.. function:: void _nmod_poly_div_series(mp_ptr Q, mp_srcptr A, slong Alen, mp_srcptr B, slong Blen, slong n, nmod_t mod) +.. function:: void _nmod_poly_div_series(nn_ptr Q, nn_srcptr A, slong Alen, nn_srcptr B, slong Blen, slong n, nmod_t mod) Given polynomials ``A`` and ``B`` of length ``Alen`` and ``Blen``, finds the @@ -1122,7 +1112,7 @@ Division An exception is raised if ``n == 0`` or the constant coefficient of ``B`` is zero. -.. function:: void _nmod_poly_div_newton_n_preinv (mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, mp_srcptr Binv, slong lenBinv, nmod_t mod) +.. function:: void _nmod_poly_div_newton_n_preinv (nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nn_srcptr Binv, slong lenBinv, nmod_t mod) Notionally computes polynomials `Q` and `R` such that `A = BQ + R` with `\operatorname{len}(R)` less than ``lenB``, where ``A`` is of length ``lenA`` @@ -1149,7 +1139,7 @@ Division The algorithm used is to reverse the polynomials and divide the resulting power series, then reverse the result. -.. function:: void _nmod_poly_divrem_newton_n_preinv (mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, mp_srcptr Binv, slong lenBinv, nmod_t mod) +.. function:: void _nmod_poly_divrem_newton_n_preinv (nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nn_srcptr Binv, slong lenBinv, nmod_t mod) Computes `Q` and `R` such that `A = BQ + R` with `\operatorname{len}(R)` less than ``lenB``, where `A` is of length ``lenA`` and `B` is of length @@ -1170,14 +1160,14 @@ Division The algorithm used is to call :func:`div_newton_n` and then multiply out and compute the remainder. -.. function:: mp_limb_t _nmod_poly_div_root(mp_ptr Q, mp_srcptr A, slong len, mp_limb_t c, nmod_t mod) +.. function:: ulong _nmod_poly_div_root(nn_ptr Q, nn_srcptr A, slong len, ulong c, nmod_t mod) Sets ``(Q, len-1)`` to the quotient of ``(A, len)`` on division by `(x - c)`, and returns the remainder, equal to the value of `A` evaluated at `c`. `A` and `Q` are allowed to be the same, but may not overlap partially in any other way. -.. function:: mp_limb_t nmod_poly_div_root(nmod_poly_t Q, const nmod_poly_t A, mp_limb_t c) +.. function:: ulong nmod_poly_div_root(nmod_poly_t Q, const nmod_poly_t A, ulong c) Sets `Q` to the quotient of `A` on division by `(x - c)`, and returns the remainder, equal to the value of `A` evaluated at `c`. @@ -1187,7 +1177,7 @@ Divisibility testing -------------------------------------------------------------------------------- -.. function:: int _nmod_poly_divides_classical(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: int _nmod_poly_divides_classical(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Returns `1` if `(B, lenB)` divides `(A, lenA)` and sets `(Q, lenA - lenB + 1)` to the quotient. Otherwise, returns `0` and sets @@ -1198,7 +1188,7 @@ Divisibility testing Returns `1` if `B` divides `A` and sets `Q` to the quotient. Otherwise returns `0` and sets `Q` to zero. -.. function:: int _nmod_poly_divides(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: int _nmod_poly_divides(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Returns `1` if `(B, lenB)` divides `(A, lenA)` and sets `(Q, lenA - lenB + 1)` to the quotient. Otherwise, returns `0` and sets @@ -1219,7 +1209,7 @@ Derivative and integral -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_derivative(mp_ptr x_prime, mp_srcptr x, slong len, nmod_t mod) +.. function:: void _nmod_poly_derivative(nn_ptr x_prime, nn_srcptr x, slong len, nmod_t mod) Sets the first ``len - 1`` coefficients of ``x_prime`` to the derivative of ``x`` which is assumed to be of length ``len``. @@ -1229,7 +1219,7 @@ Derivative and integral Sets ``x_prime`` to the derivative of ``x``. -.. function:: void _nmod_poly_integral(mp_ptr x_int, mp_srcptr x, slong len, nmod_t mod) +.. function:: void _nmod_poly_integral(nn_ptr x_int, nn_srcptr x, slong len, nmod_t mod) Set the first ``len`` coefficients of ``x_int`` to the integral of ``x`` which is assumed to be of length ``len - 1``. @@ -1250,13 +1240,13 @@ Evaluation -------------------------------------------------------------------------------- -.. function:: mp_limb_t _nmod_poly_evaluate_nmod(mp_srcptr poly, slong len, mp_limb_t c, nmod_t mod) +.. function:: ulong _nmod_poly_evaluate_nmod(nn_srcptr poly, slong len, ulong c, nmod_t mod) Evaluates ``poly`` at the value ``c`` and reduces modulo the given modulus of ``poly``. The value ``c`` should be reduced modulo the modulus. The algorithm used is Horner's method. -.. function:: mp_limb_t nmod_poly_evaluate_nmod(const nmod_poly_t poly, mp_limb_t c) +.. function:: ulong nmod_poly_evaluate_nmod(const nmod_poly_t poly, ulong c) Evaluates ``poly`` at the value ``c`` and reduces modulo the modulus of ``poly``. The value ``c`` should be reduced modulo @@ -1290,7 +1280,7 @@ Multipoint evaluation -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, mp_srcptr poly, slong len, mp_srcptr xs, slong n, nmod_t mod) +.. function:: void _nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, nn_srcptr poly, slong len, nn_srcptr xs, slong n, nmod_t mod) Evaluates (``coeffs``, ``len``) at the ``n`` values given in the vector ``xs``, writing the output values @@ -1299,7 +1289,7 @@ Multipoint evaluation Uses Horner's method iteratively. -.. function:: void nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, const nmod_poly_t poly, mp_srcptr xs, slong n) +.. function:: void nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, const nmod_poly_t poly, nn_srcptr xs, slong n) Evaluates ``poly`` at the ``n`` values given in the vector ``xs``, writing the output values to ``ys``. The values in @@ -1307,12 +1297,12 @@ Multipoint evaluation Uses Horner's method iteratively. -.. function:: void _nmod_poly_evaluate_nmod_vec_fast_precomp(mp_ptr vs, mp_srcptr poly, slong plen, const mp_ptr * tree, slong len, nmod_t mod) +.. function:: void _nmod_poly_evaluate_nmod_vec_fast_precomp(nn_ptr vs, nn_srcptr poly, slong plen, const nn_ptr * tree, slong len, nmod_t mod) Evaluates (``poly``, ``plen``) at the ``len`` values given by the precomputed subproduct tree ``tree``. -.. function:: void _nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, mp_srcptr poly, slong len, mp_srcptr xs, slong n, nmod_t mod) +.. function:: void _nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, nn_srcptr poly, slong len, nn_srcptr xs, slong n, nmod_t mod) Evaluates (``coeffs``, ``len``) at the ``n`` values given in the vector ``xs``, writing the output values @@ -1321,7 +1311,7 @@ Multipoint evaluation Uses fast multipoint evaluation, building a temporary subproduct tree. -.. function:: void nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, const nmod_poly_t poly, mp_srcptr xs, slong n) +.. function:: void nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, const nmod_poly_t poly, nn_srcptr xs, slong n) Evaluates ``poly`` at the ``n`` values given in the vector ``xs``, writing the output values to ``ys``. The values in @@ -1330,14 +1320,14 @@ Multipoint evaluation Uses fast multipoint evaluation, building a temporary subproduct tree. -.. function:: void _nmod_poly_evaluate_nmod_vec(mp_ptr ys, mp_srcptr poly, slong len, mp_srcptr xs, slong n, nmod_t mod) +.. function:: void _nmod_poly_evaluate_nmod_vec(nn_ptr ys, nn_srcptr poly, slong len, nn_srcptr xs, slong n, nmod_t mod) Evaluates (``poly``, ``len``) at the ``n`` values given in the vector ``xs``, writing the output values to ``ys``. The values in ``xs`` should be reduced modulo the modulus. -.. function:: void nmod_poly_evaluate_nmod_vec(mp_ptr ys, const nmod_poly_t poly, mp_srcptr xs, slong n) +.. function:: void nmod_poly_evaluate_nmod_vec(nn_ptr ys, const nmod_poly_t poly, nn_srcptr xs, slong n) Evaluates ``poly`` at the ``n`` values given in the vector ``xs``, writing the output values to ``ys``. The values in @@ -1348,7 +1338,7 @@ Interpolation -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_interpolate_nmod_vec(mp_ptr poly, mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +.. function:: void _nmod_poly_interpolate_nmod_vec(nn_ptr poly, nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) Sets ``poly`` to the unique polynomial of length at most ``n`` that interpolates the ``n`` given evaluation points ``xs`` and @@ -1359,19 +1349,19 @@ Interpolation modulus, and all ``xs`` must be distinct. Aliasing between ``poly`` and ``xs`` or ``ys`` is not allowed. -.. function:: void nmod_poly_interpolate_nmod_vec(nmod_poly_t poly, mp_srcptr xs, mp_srcptr ys, slong n) +.. function:: void nmod_poly_interpolate_nmod_vec(nmod_poly_t poly, nn_srcptr xs, nn_srcptr ys, slong n) Sets ``poly`` to the unique polynomial of length ``n`` that interpolates the ``n`` given evaluation points ``xs`` and values ``ys``. The values in ``xs`` and ``ys`` should be reduced modulo the modulus, and all ``xs`` must be distinct. -.. function:: void _nmod_poly_interpolation_weights(mp_ptr w, const mp_ptr * tree, slong len, nmod_t mod) +.. function:: void _nmod_poly_interpolation_weights(nn_ptr w, const nn_ptr * tree, slong len, nmod_t mod) Sets ``w`` to the barycentric interpolation weights for fast Lagrange interpolation with respect to a given subproduct tree. -.. function:: void _nmod_poly_interpolate_nmod_vec_fast_precomp(mp_ptr poly, mp_srcptr ys, const mp_ptr * tree, mp_srcptr weights, slong len, nmod_t mod) +.. function:: void _nmod_poly_interpolate_nmod_vec_fast_precomp(nn_ptr poly, nn_srcptr ys, const nn_ptr * tree, nn_srcptr weights, slong len, nmod_t mod) Performs interpolation using the fast Lagrange interpolation algorithm, generating a temporary subproduct tree. @@ -1381,34 +1371,34 @@ Interpolation interpolation weights ``weights`` corresponding to the roots. -.. function:: void _nmod_poly_interpolate_nmod_vec_fast(mp_ptr poly, mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +.. function:: void _nmod_poly_interpolate_nmod_vec_fast(nn_ptr poly, nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) Performs interpolation using the fast Lagrange interpolation algorithm, generating a temporary subproduct tree. -.. function:: void nmod_poly_interpolate_nmod_vec_fast(nmod_poly_t poly, mp_srcptr xs, mp_srcptr ys, slong n) +.. function:: void nmod_poly_interpolate_nmod_vec_fast(nmod_poly_t poly, nn_srcptr xs, nn_srcptr ys, slong n) Performs interpolation using the fast Lagrange interpolation algorithm, generating a temporary subproduct tree. -.. function:: void _nmod_poly_interpolate_nmod_vec_newton(mp_ptr poly, mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +.. function:: void _nmod_poly_interpolate_nmod_vec_newton(nn_ptr poly, nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) Forms the interpolating polynomial in the Newton basis using the method of divided differences and then converts it to monomial form. -.. function:: void nmod_poly_interpolate_nmod_vec_newton(nmod_poly_t poly, mp_srcptr xs, mp_srcptr ys, slong n) +.. function:: void nmod_poly_interpolate_nmod_vec_newton(nmod_poly_t poly, nn_srcptr xs, nn_srcptr ys, slong n) Forms the interpolating polynomial in the Newton basis using the method of divided differences and then converts it to monomial form. -.. function:: void _nmod_poly_interpolate_nmod_vec_barycentric(mp_ptr poly, mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +.. function:: void _nmod_poly_interpolate_nmod_vec_barycentric(nn_ptr poly, nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) Forms the interpolating polynomial using a naive implementation of the barycentric form of Lagrange interpolation. -.. function:: void nmod_poly_interpolate_nmod_vec_barycentric(nmod_poly_t poly, mp_srcptr xs, mp_srcptr ys, slong n) +.. function:: void nmod_poly_interpolate_nmod_vec_barycentric(nmod_poly_t poly, nn_srcptr xs, nn_srcptr ys, slong n) Forms the interpolating polynomial using a naive implementation of the barycentric form of Lagrange interpolation. @@ -1419,7 +1409,7 @@ Composition -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_compose_horner(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_compose_horner(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Composes ``poly1`` of length ``len1`` with ``poly2`` of length ``len2`` and sets ``res`` to the result, i.e. evaluates @@ -1433,7 +1423,7 @@ Composition i.e. evaluates ``poly1`` at ``poly2``. The algorithm used is Horner's algorithm. -.. function:: void _nmod_poly_compose_divconquer(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_compose_divconquer(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Composes ``poly1`` of length ``len1`` with ``poly2`` of length ``len2`` and sets ``res`` to the result, i.e. evaluates @@ -1448,7 +1438,7 @@ Composition i.e. evaluates ``poly1`` at ``poly2``. The algorithm used is the divide and conquer algorithm. -.. function:: void _nmod_poly_compose(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: void _nmod_poly_compose(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Composes ``poly1`` of length ``len1`` with ``poly2`` of length ``len2`` and sets ``res`` to the result, i.e. evaluates ``poly1`` @@ -1466,33 +1456,33 @@ Taylor shift -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_taylor_shift_horner(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod) +.. function:: void _nmod_poly_taylor_shift_horner(nn_ptr poly, ulong c, slong len, nmod_t mod) Performs the Taylor shift composing ``poly`` by `x+c` in-place. Uses an efficient version Horner's rule. -.. function:: void nmod_poly_taylor_shift_horner(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) +.. function:: void nmod_poly_taylor_shift_horner(nmod_poly_t g, const nmod_poly_t f, ulong c) Performs the Taylor shift composing ``f`` by `x+c`. -.. function:: void _nmod_poly_taylor_shift_convolution(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod) +.. function:: void _nmod_poly_taylor_shift_convolution(nn_ptr poly, ulong c, slong len, nmod_t mod) Performs the Taylor shift composing ``poly`` by `x+c` in-place. Writes the composition as a single convolution with cost `O(M(n))`. We require that the modulus is a prime at least as large as the length. -.. function:: void nmod_poly_taylor_shift_convolution(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) +.. function:: void nmod_poly_taylor_shift_convolution(nmod_poly_t g, const nmod_poly_t f, ulong c) Performs the Taylor shift composing ``f`` by `x+c`. Writes the composition as a single convolution with cost `O(M(n))`. We require that the modulus is a prime at least as large as the length. -.. function:: void _nmod_poly_taylor_shift(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod) +.. function:: void _nmod_poly_taylor_shift(nn_ptr poly, ulong c, slong len, nmod_t mod) Performs the Taylor shift composing ``poly`` by `x+c` in-place. We require that the modulus is a prime. -.. function:: void nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) +.. function:: void nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, ulong c) Performs the Taylor shift composing ``f`` by `x+c`. We require that the modulus is a prime. @@ -1502,7 +1492,7 @@ Modular composition -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_compose_mod_horner(mp_ptr res, mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod) +.. function:: void _nmod_poly_compose_mod_horner(nn_ptr res, nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod) Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero and that the length of `g` is one less than the @@ -1516,7 +1506,7 @@ Modular composition Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero. The algorithm used is Horner's rule. -.. function:: void _nmod_poly_compose_mod_brent_kung(mp_ptr res, mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod) +.. function:: void _nmod_poly_compose_mod_brent_kung(nn_ptr res, nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod) Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero and that the length of `g` is one less than the @@ -1532,7 +1522,7 @@ Modular composition `h` is nonzero and that `f` has smaller degree than `h`. The algorithm used is the Brent-Kung matrix algorithm. -.. function:: void _nmod_poly_compose_mod_brent_kung_preinv(mp_ptr res, mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, mp_srcptr hinv, slong lenhinv, nmod_t mod) +.. function:: void _nmod_poly_compose_mod_brent_kung_preinv(nn_ptr res, nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nn_srcptr hinv, slong lenhinv, nmod_t mod) Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero and that the length of `g` is one less than the @@ -1561,7 +1551,7 @@ Modular composition Worker function version of ``_nmod_poly_precompute_matrix``. Input/output is stored in ``nmod_poly_matrix_precompute_arg_t``. -.. function:: void _nmod_poly_precompute_matrix (nmod_mat_t A, mp_srcptr f, mp_srcptr g, slong leng, mp_srcptr ginv, slong lenginv, nmod_t mod) +.. function:: void _nmod_poly_precompute_matrix (nmod_mat_t A, nn_srcptr f, nn_srcptr g, slong leng, nn_srcptr ginv, slong lenginv, nmod_t mod) Sets the ith row of ``A`` to `f^i` modulo `g` for `i=1,\ldots,\sqrt{\deg(g)}`. We require `A` to be @@ -1584,7 +1574,7 @@ Modular composition Input/output is stored in ``nmod_poly_compose_mod_precomp_preinv_arg_t``. -.. function:: void _nmod_poly_compose_mod_brent_kung_precomp_preinv(mp_ptr res, mp_srcptr f, slong lenf, const nmod_mat_t A, mp_srcptr h, slong lenh, mp_srcptr hinv, slong lenhinv, nmod_t mod) +.. function:: void _nmod_poly_compose_mod_brent_kung_precomp_preinv(nn_ptr res, nn_srcptr f, slong lenf, const nmod_mat_t A, nn_srcptr h, slong lenh, nn_srcptr hinv, slong lenhinv, nmod_t mod) Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero. We require that the ith row of `A` contains `g^i` for @@ -1606,7 +1596,7 @@ Modular composition modular composition is particularly useful if one has to perform several modular composition of the form `f(g)` modulo `h` for fixed `g` and `h`. -.. function:: void _nmod_poly_compose_mod_brent_kung_vec_preinv(nmod_poly_struct * res, const nmod_poly_struct * polys, slong len1, slong l, mp_srcptr g, slong leng, mp_srcptr h, slong lenh, mp_srcptr hinv, slong lenhinv, nmod_t mod) +.. function:: void _nmod_poly_compose_mod_brent_kung_vec_preinv(nmod_poly_struct * res, const nmod_poly_struct * polys, slong len1, slong l, nn_srcptr g, slong leng, nn_srcptr h, slong lenh, nn_srcptr hinv, slong lenhinv, nmod_t mod) Sets ``res`` to the composition `f_i(g)` modulo `h` for `1\leq i \leq l`, where `f_i` are the first ``l`` elements of ``polys``. We require that `h` @@ -1631,7 +1621,7 @@ Modular composition of the reverse of ``h``. No aliasing of ``res`` and ``polys`` is allowed. The algorithm used is the Brent-Kung matrix algorithm. -.. function:: void _nmod_poly_compose_mod_brent_kung_vec_preinv_threaded_pool(nmod_poly_struct * res, const nmod_poly_struct * polys, slong lenpolys, slong l, mp_srcptr g, slong glen, mp_srcptr poly, slong len, mp_srcptr polyinv, slong leninv, nmod_t mod, thread_pool_handle * threads, slong num_threads) +.. function:: void _nmod_poly_compose_mod_brent_kung_vec_preinv_threaded_pool(nmod_poly_struct * res, const nmod_poly_struct * polys, slong lenpolys, slong l, nn_srcptr g, slong glen, nn_srcptr poly, slong len, nn_srcptr polyinv, slong leninv, nmod_t mod, thread_pool_handle * threads, slong num_threads) Multithreaded version of :func:`_nmod_poly_compose_mod_brent_kung_vec_preinv`. Distributing the @@ -1649,7 +1639,7 @@ Modular composition :func:`nmod_poly_compose_mod_brent_kung_vec_preinv`. Distributing the Horner evaluations across :func:`flint_get_num_threads` threads. -.. function:: void _nmod_poly_compose_mod(mp_ptr res, mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod) +.. function:: void _nmod_poly_compose_mod(nn_ptr res, nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod) Sets ``res`` to the composition `f(g)` modulo `h`. We require that `h` is nonzero and that the length of `g` is one less than the @@ -1667,7 +1657,7 @@ Greatest common divisor -------------------------------------------------------------------------------- -.. function:: slong _nmod_poly_gcd_euclidean(mp_ptr G, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: slong _nmod_poly_gcd_euclidean(nn_ptr G, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes the GCD of `A` of length ``lenA`` and `B` of length ``lenB``, where ``lenA >= lenB > 0``. The length of the GCD `G` @@ -1681,7 +1671,7 @@ Greatest common divisor polynomial `P` is defined to be `P`. Except in the case where the GCD is zero, the GCD `G` is made monic. -.. function:: slong _nmod_poly_hgcd(mp_ptr * M, slong * lenM, mp_ptr A, slong * lenA, mp_ptr B, slong * lenB, mp_srcptr a, slong lena, mp_srcptr b, slong lenb, nmod_t mod) +.. function:: slong _nmod_poly_hgcd(nn_ptr * M, slong * lenM, nn_ptr A, slong * lenA, nn_ptr B, slong * lenB, nn_srcptr a, slong lena, nn_srcptr b, slong lenb, nmod_t mod) Computes the HGCD of `a` and `b`, that is, a matrix `M`, a sign `\sigma` and two polynomials `A` and `B` such that @@ -1704,7 +1694,7 @@ Greatest common divisor Assumes that ``M[0]``, ``M[1]``, ``M[2]``, and ``M[3]`` each point to a vector of size at least `\operatorname{len}(a)`. -.. function:: slong _nmod_poly_gcd_hgcd(mp_ptr G, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: slong _nmod_poly_gcd_hgcd(nn_ptr G, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes the monic GCD of `A` and `B`, assuming that `\operatorname{len}(A) \geq \operatorname{len}(B) > 0`. @@ -1722,7 +1712,7 @@ Greatest common divisor The time complexity of the algorithm is `\mathcal{O}(n \log^2 n)`. For further details, see [ThullYap1990]_. -.. function:: slong _nmod_poly_gcd(mp_ptr G, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: slong _nmod_poly_gcd(nn_ptr G, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes the GCD of `A` of length ``lenA`` and `B` of length ``lenB``, where ``lenA >= lenB > 0``. The length of the GCD `G` @@ -1736,7 +1726,7 @@ Greatest common divisor polynomial `P` is defined to be `P`. Except in the case where the GCD is zero, the GCD `G` is made monic. -.. function:: slong _nmod_poly_xgcd_euclidean(mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong A_len, mp_srcptr B, slong B_len, nmod_t mod) +.. function:: slong _nmod_poly_xgcd_euclidean(nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong A_len, nn_srcptr B, slong B_len, nmod_t mod) Computes the GCD of `A` and `B` together with cofactors `S` and `T` such that `S A + T B = G`. Returns the length of `G`. @@ -1764,7 +1754,7 @@ Greatest common divisor ``S*A + T*B = G``. The length of ``S`` will be at most ``lenB`` and the length of ``T`` will be at most ``lenA``. -.. function:: slong _nmod_poly_xgcd_hgcd(mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong A_len, mp_srcptr B, slong B_len, nmod_t mod) +.. function:: slong _nmod_poly_xgcd_hgcd(nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong A_len, nn_srcptr B, slong B_len, nmod_t mod) Computes the GCD of `A` and `B`, where `\operatorname{len}(A) \geq \operatorname{len}(B) > 0`, together with cofactors `S` and `T` such that `S A + T B = G`. Returns @@ -1792,7 +1782,7 @@ Greatest common divisor ``S*A + T*B = G``. The length of ``S`` will be at most ``lenB`` and the length of ``T`` will be at most ``lenA``. -.. function:: slong _nmod_poly_xgcd(mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +.. function:: slong _nmod_poly_xgcd(nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) Computes the GCD of `A` and `B`, where `\operatorname{len}(A) \geq \operatorname{len}(B) > 0`, together with cofactors `S` and `T` such that `S A + T B = G`. Returns @@ -1818,7 +1808,7 @@ Greatest common divisor ``S*A + T*B = G``. The length of ``S`` will be at most ``lenB`` and the length of ``T`` will be at most ``lenA``. -.. function:: mp_limb_t _nmod_poly_resultant_euclidean(mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: ulong _nmod_poly_resultant_euclidean(nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Returns the resultant of ``(poly1, len1)`` and ``(poly2, len2)`` using the Euclidean algorithm. @@ -1827,7 +1817,7 @@ Greatest common divisor Assumes that the modulus is prime. -.. function:: mp_limb_t nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g) +.. function:: ulong nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g) Computes the resultant of `f` and `g` using the Euclidean algorithm. @@ -1842,7 +1832,7 @@ Greatest common divisor For convenience, we define the resultant to be equal to zero if either of the two polynomials is zero. -.. function:: mp_limb_t _nmod_poly_resultant_hgcd(mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: ulong _nmod_poly_resultant_hgcd(nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Returns the resultant of ``(poly1, len1)`` and ``(poly2, len2)`` using the half-gcd algorithm. @@ -1880,7 +1870,7 @@ Greatest common divisor Assumes that the modulus is prime. -.. function:: mp_limb_t nmod_poly_resultant_hgcd(const nmod_poly_t f, const nmod_poly_t g) +.. function:: ulong nmod_poly_resultant_hgcd(const nmod_poly_t f, const nmod_poly_t g) Computes the resultant of `f` and `g` using the half-gcd algorithm. @@ -1897,7 +1887,7 @@ Greatest common divisor For convenience, we define the resultant to be equal to zero if either of the two polynomials is zero. -.. function:: mp_limb_t _nmod_poly_resultant(mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +.. function:: ulong _nmod_poly_resultant(nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) Returns the resultant of ``(poly1, len1)`` and ``(poly2, len2)``. @@ -1906,7 +1896,7 @@ Greatest common divisor Assumes that the modulus is prime. -.. function:: mp_limb_t nmod_poly_resultant(const nmod_poly_t f, const nmod_poly_t g) +.. function:: ulong nmod_poly_resultant(const nmod_poly_t f, const nmod_poly_t g) Computes the resultant of `f` and `g`. @@ -1923,7 +1913,7 @@ Greatest common divisor For convenience, we define the resultant to be equal to zero if either of the two polynomials is zero. -.. function:: slong _nmod_poly_gcdinv(mp_limb_t * G, mp_limb_t * S, const mp_limb_t * A, slong lenA, const mp_limb_t * B, slong lenB, const nmod_t mod) +.. function:: slong _nmod_poly_gcdinv(ulong * G, ulong * S, const ulong * A, slong lenA, const ulong * B, slong lenB, const nmod_t mod) Computes ``(G, lenA)``, ``(S, lenB-1)`` such that `G \cong S A \pmod{B}`, returning the actual length of `G`. @@ -1938,7 +1928,7 @@ Greatest common divisor In the case that `A = 0 \pmod{B}`, returns `G = S = 0`. -.. function:: int _nmod_poly_invmod(mp_limb_t * A, const mp_limb_t * B, slong lenB, const mp_limb_t * P, slong lenP, const nmod_t mod) +.. function:: int _nmod_poly_invmod(ulong * A, const ulong * B, slong lenB, const ulong * P, slong lenP, const nmod_t mod) Attempts to set ``(A, lenP-1)`` to the inverse of ``(B, lenB)`` modulo the polynomial ``(P, lenP)``. Returns `1` if ``(B, lenB)`` @@ -1969,11 +1959,11 @@ Discriminant -------------------------------------------------------------------------------- -.. function:: mp_limb_t _nmod_poly_discriminant(mp_srcptr poly, slong len, nmod_t mod) +.. function:: ulong _nmod_poly_discriminant(nn_srcptr poly, slong len, nmod_t mod) Return the discriminant of ``(poly, len)``. Assumes ``len > 1``. -.. function:: mp_limb_t nmod_poly_discriminant(const nmod_poly_t f) +.. function:: ulong nmod_poly_discriminant(const nmod_poly_t f) Return the discriminant of `f`. We normalise the discriminant so that @@ -1989,7 +1979,7 @@ Discriminant Power series composition -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_compose_series(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong n, nmod_t mod) +.. function:: void _nmod_poly_compose_series(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong n, nmod_t mod) Sets ``res`` to the composition of ``poly1`` and ``poly2`` modulo `x^n`, where the constant term of ``poly2`` is required @@ -2013,7 +2003,7 @@ Power series composition Power series reversion -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_revert_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_revert_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) void nmod_poly_revert_series(nmod_poly_t Qinv, const nmod_poly_t Q, slong n) Sets ``Qinv`` to the compositional inverse or reversion of ``Q`` @@ -2036,7 +2026,7 @@ by means of the generalised binomial theorem It is assumed that `h` has constant term `1` and that the coefficients `2^{-k}` exist in the coefficient ring (i.e. `2` must be invertible). -.. function:: void _nmod_poly_invsqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_invsqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set the first `n` terms of `g` to the series expansion of `1/\sqrt{h}`. It is assumed that `n > 0`, that `h` has constant term 1. Aliasing is not permitted. @@ -2046,7 +2036,7 @@ It is assumed that `h` has constant term `1` and that the coefficients Set `g` to the series expansion of `1/\sqrt{h}` to order `O(x^n)`. It is assumed that `h` has constant term 1. -.. function:: void _nmod_poly_sqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_sqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set the first `n` terms of `g` to the series expansion of `\sqrt{h}`. It is assumed that `n > 0`, that `h` has constant term 1. Aliasing is not permitted. @@ -2056,7 +2046,7 @@ It is assumed that `h` has constant term `1` and that the coefficients Set `g` to the series expansion of `\sqrt{h}` to order `O(x^n)`. It is assumed that `h` has constant term 1. -.. function:: int _nmod_poly_sqrt(mp_ptr s, mp_srcptr p, slong n, nmod_t mod) +.. function:: int _nmod_poly_sqrt(nn_ptr s, nn_srcptr p, slong n, nmod_t mod) If ``(p, n)`` is a perfect square, sets ``(s, n / 2 + 1)`` to a square root of `p` and returns 1. Otherwise returns 0. @@ -2071,7 +2061,7 @@ Power sums -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_power_sums_naive(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod) +.. function:: void _nmod_poly_power_sums_naive(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) Compute the (truncated) power sums series of the polynomial ``(poly,len)`` up to length `n` using Newton identities. @@ -2081,7 +2071,7 @@ Power sums Compute the (truncated) power sum series of the polynomial ``poly`` up to length `n` using Newton identities. -.. function:: void _nmod_poly_power_sums_schoenhage(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod) +.. function:: void _nmod_poly_power_sums_schoenhage(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) Compute the (truncated) power sums series of the polynomial ``(poly,len)`` up to length `n` using a series expansion @@ -2093,7 +2083,7 @@ Power sums ``poly`` up to length `n` using a series expansion (a formula due to Schoenhage). -.. function:: void _nmod_poly_power_sums(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod) +.. function:: void _nmod_poly_power_sums(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) Compute the (truncated) power sums series of the polynomial ``(poly,len)`` up to length `n`. @@ -2103,7 +2093,7 @@ Power sums Compute the (truncated) power sums series of the polynomial ``poly`` up to length `n`. -.. function:: void _nmod_poly_power_sums_to_poly_naive(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod) +.. function:: void _nmod_poly_power_sums_to_poly_naive(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) Compute the (monic) polynomial given by its power sums series ``(poly,len)`` using Newton identities. @@ -2113,7 +2103,7 @@ Power sums Compute the (monic) polynomial given by its power sums series ``Q`` using Newton identities. -.. function:: void _nmod_poly_power_sums_to_poly_schoenhage(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod) +.. function:: void _nmod_poly_power_sums_to_poly_schoenhage(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) Compute the (monic) polynomial given by its power sums series ``(poly,len)`` using series expansion (a formula due to Schoenhage). @@ -2123,7 +2113,7 @@ Power sums Compute the (monic) polynomial given by its power sums series ``Q`` using series expansion (a formula due to Schoenhage). -.. function:: void _nmod_poly_power_sums_to_poly(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod) +.. function:: void _nmod_poly_power_sums_to_poly(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) Compute the (monic) polynomial given by its power sums series ``(poly,len)``. @@ -2165,7 +2155,7 @@ Except where otherwise noted, functions are implemented with optimal (up to constants) complexity `O(M(n))`, where `M(n)` is the cost of polynomial multiplication. -.. function:: void _nmod_poly_log_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_log_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \log(h) + O(x^n)`. Assumes `n > 0` and ``hlen > 0``. Aliasing of `g` and `h` is allowed. @@ -2175,7 +2165,7 @@ of polynomial multiplication. Set `g = \log(h) + O(x^n)`. The case `h = 1+cx^r` is automatically detected and handled efficiently. -.. function:: void _nmod_poly_exp_series(mp_ptr f, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_exp_series(nn_ptr f, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `f = \exp(h) + O(x^n)` where ``h`` is a polynomial. Assume `n > 0`. Aliasing of `g` and `h` is not allowed. @@ -2184,7 +2174,7 @@ of polynomial multiplication. algorithm in [HanZim2004]_). For small `n`, falls back to the basecase algorithm. -.. function:: void _nmod_poly_exp_expinv_series(mp_ptr f, mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_exp_expinv_series(nn_ptr f, nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `f = \exp(h) + O(x^n)` and `g = \exp(-h) + O(x^n)`, more efficiently for large `n` than performing a separate inversion to obtain `g`. @@ -2200,7 +2190,7 @@ of polynomial multiplication. detected and handled efficiently. Otherwise this function automatically uses the basecase algorithm for small `n` and Newton iteration otherwise. -.. function:: void _nmod_poly_atan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_atan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \operatorname{atan}(h) + O(x^n)`. Assumes `n > 0`. Aliasing of `g` and `h` is allowed. @@ -2209,7 +2199,7 @@ of polynomial multiplication. Set `g = \operatorname{atan}(h) + O(x^n)`. -.. function:: void _nmod_poly_atanh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_atanh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \operatorname{atanh}(h) + O(x^n)`. Assumes `n > 0`. Aliasing of `g` and `h` is allowed. @@ -2218,7 +2208,7 @@ of polynomial multiplication. Set `g = \operatorname{atanh}(h) + O(x^n)`. -.. function:: void _nmod_poly_asin_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_asin_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \operatorname{asin}(h) + O(x^n)`. Assumes `n > 0`. Aliasing of `g` and `h` is allowed. @@ -2227,7 +2217,7 @@ of polynomial multiplication. Set `g = \operatorname{asin}(h) + O(x^n)`. -.. function:: void _nmod_poly_asinh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_asinh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \operatorname{asinh}(h) + O(x^n)`. Assumes `n > 0`. Aliasing of `g` and `h` is allowed. @@ -2236,7 +2226,7 @@ of polynomial multiplication. Set `g = \operatorname{asinh}(h) + O(x^n)`. -.. function:: void _nmod_poly_sin_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +.. function:: void _nmod_poly_sin_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) Set `g = \operatorname{sin}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Aliasing of `g` and `h` is @@ -2247,7 +2237,7 @@ of polynomial multiplication. Set `g = \operatorname{sin}(h) + O(x^n)`. -.. function:: void _nmod_poly_cos_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +.. function:: void _nmod_poly_cos_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) Set `g = \operatorname{cos}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Aliasing of `g` and `h` is @@ -2258,7 +2248,7 @@ of polynomial multiplication. Set `g = \operatorname{cos}(h) + O(x^n)`. -.. function:: void _nmod_poly_tan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +.. function:: void _nmod_poly_tan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) Set `g = \operatorname{tan}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Aliasing of `g` and `h` is @@ -2268,7 +2258,7 @@ of polynomial multiplication. Set `g = \operatorname{tan}(h) + O(x^n)`. -.. function:: void _nmod_poly_sinh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +.. function:: void _nmod_poly_sinh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) Set `g = \operatorname{sinh}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Aliasing of `g` and `h` is @@ -2278,7 +2268,7 @@ of polynomial multiplication. Set `g = \operatorname{sinh}(h) + O(x^n)`. -.. function:: void _nmod_poly_cosh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +.. function:: void _nmod_poly_cosh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) Set `g = \operatorname{cos}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Aliasing of `g` and `h` is @@ -2289,7 +2279,7 @@ of polynomial multiplication. Set `g = \operatorname{cosh}(h) + O(x^n)`. -.. function:: void _nmod_poly_tanh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +.. function:: void _nmod_poly_tanh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) Set `g = \operatorname{tanh}(h) + O(x^n)`. Assumes `n > 0` and that `h` is zero-padded as necessary to length `n`. Uses the identity @@ -2303,7 +2293,7 @@ of polynomial multiplication. Special polynomials -------------------------------------------------------------------------------- -.. function:: int _nmod_poly_conway(mp_ptr op, ulong prime, slong deg) +.. function:: int _nmod_poly_conway(nn_ptr op, ulong prime, slong deg) Sets ``op`` to the coefficients to the Conway polynomial `C_{p, d}`, where `p` is ``prime`` and `d` is ``deg``. This is done by checking against Frank @@ -2331,7 +2321,7 @@ Products -------------------------------------------------------------------------------- -.. function:: void _nmod_poly_product_roots_nmod_vec(mp_ptr poly, mp_srcptr xs, slong n, nmod_t mod) +.. function:: void _nmod_poly_product_roots_nmod_vec(nn_ptr poly, nn_srcptr xs, slong n, nmod_t mod) Sets ``(poly, n + 1)`` to the monic polynomial which is the product of `(x - x_0)(x - x_1) \cdots (x - x_{n-1})`, the roots `x_i` being @@ -2339,13 +2329,13 @@ Products Aliasing of the input and output is not allowed. -.. function:: void nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, mp_srcptr xs, slong n) +.. function:: void nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, nn_srcptr xs, slong n) Sets ``poly`` to the monic polynomial which is the product of `(x - x_0)(x - x_1) \cdots (x - x_{n-1})`, the roots `x_i` being given by ``xs``. -.. function:: int nmod_poly_find_distinct_nonzero_roots(mp_limb_t * roots, const nmod_poly_t A) +.. function:: int nmod_poly_find_distinct_nonzero_roots(ulong * roots, const nmod_poly_t A) If ``A`` has `\deg(A)` distinct nonzero roots in `\mathbb{F}_p`, write these roots out to ``roots[0]`` to ``roots[deg(A) - 1]`` and return ``1``. Otherwise, return ``0``. It is assumed that ``A`` is nonzero and that the modulus of ``A`` is prime. @@ -2356,7 +2346,7 @@ Subproduct trees -------------------------------------------------------------------------------- -.. function:: mp_ptr * _nmod_poly_tree_alloc(slong len) +.. function:: nn_ptr * _nmod_poly_tree_alloc(slong len) Allocates space for a subproduct tree of the given length, having linear factors at the lowest level. @@ -2375,11 +2365,11 @@ Subproduct trees XXXX1 XX1 X1 XXXXXXX1 -.. function:: void _nmod_poly_tree_free(mp_ptr * tree, slong len) +.. function:: void _nmod_poly_tree_free(nn_ptr * tree, slong len) Free the allocated space for the subproduct. -.. function:: void _nmod_poly_tree_build(mp_ptr * tree, mp_srcptr roots, slong len, nmod_t mod) +.. function:: void _nmod_poly_tree_build(nn_ptr * tree, nn_srcptr roots, slong len, nmod_t mod) Builds a subproduct tree in the preallocated space from the ``len`` monic linear factors `(x-r_i)`. The top level @@ -2479,7 +2469,7 @@ Berlekamp-Massey Algorithm and it can be seen that `\sum_{i} v_i a_{j + i}` is zero for `1 \le j < n - \deg(R)`. Thus whether or not `V` has annihilated the whole sequence may be checked by comparing the degrees of `V` and `R`. -.. function:: void nmod_berlekamp_massey_init(nmod_berlekamp_massey_t B, mp_limb_t p) +.. function:: void nmod_berlekamp_massey_init(nmod_berlekamp_massey_t B, ulong p) Initialize ``B`` in characteristic ``p`` with an empty stream. @@ -2491,13 +2481,13 @@ Berlekamp-Massey Algorithm Empty the stream of points in ``B``. -.. function:: void nmod_berlekamp_massey_set_prime(nmod_berlekamp_massey_t B, mp_limb_t p) +.. function:: void nmod_berlekamp_massey_set_prime(nmod_berlekamp_massey_t B, ulong p) Set the characteristic of the field and empty the stream of points in ``B``. -.. function:: void nmod_berlekamp_massey_add_points(nmod_berlekamp_massey_t B, const mp_limb_t * a, slong count) +.. function:: void nmod_berlekamp_massey_add_points(nmod_berlekamp_massey_t B, const ulong * a, slong count) void nmod_berlekamp_massey_add_zeros(nmod_berlekamp_massey_t B, slong count) - void nmod_berlekamp_massey_add_point(nmod_berlekamp_massey_t B, mp_limb_t a) + void nmod_berlekamp_massey_add_point(nmod_berlekamp_massey_t B, ulong a) Add point(s) to the stream processed by ``B``. The addition of any number of points will not update the `V` and `R` polynomial. @@ -2511,7 +2501,7 @@ Berlekamp-Massey Algorithm Return the number of points stored in ``B``. -.. function:: const mp_limb_t * nmod_berlekamp_massey_points(const nmod_berlekamp_massey_t B) +.. function:: const ulong * nmod_berlekamp_massey_points(const nmod_berlekamp_massey_t B) Return a pointer to the array of points stored in ``B``. This may be ``NULL`` if :func:`nmod_berlekamp_massey_point_count` returns ``0``. diff --git a/doc/source/nmod_poly_factor.rst b/doc/source/nmod_poly_factor.rst index e38a87488d..a9b9a6bf4e 100644 --- a/doc/source/nmod_poly_factor.rst +++ b/doc/source/nmod_poly_factor.rst @@ -79,7 +79,7 @@ Factorisation Returns 1 if the polynomial ``f`` is irreducible, otherwise returns 0. Uses Rabin irreducibility test. -.. function:: int _nmod_poly_is_squarefree(mp_srcptr f, slong len, nmod_t mod) +.. function:: int _nmod_poly_is_squarefree(nn_srcptr f, slong len, nmod_t mod) Returns 1 if ``(f, len)`` is squarefree, and 0 otherwise. As a special case, the zero polynomial is not considered squarefree. @@ -143,7 +143,7 @@ Factorisation step. If :func:`flint_get_num_threads` is greater than one :func:`nmod_poly_factor_distinct_deg_threaded` is used. -.. function:: mp_limb_t nmod_poly_factor_with_berlekamp(nmod_poly_factor_t res, const nmod_poly_t f) +.. function:: ulong nmod_poly_factor_with_berlekamp(nmod_poly_factor_t res, const nmod_poly_t f) Factorises a general polynomial ``f`` into monic irreducible factors and returns the leading coefficient of ``f``, or 0 if ``f`` @@ -154,7 +154,7 @@ Factorisation square-free factorisation, and finally runs Berlekamp on all the individual square-free factors. -.. function:: mp_limb_t nmod_poly_factor_with_cantor_zassenhaus(nmod_poly_factor_t res, const nmod_poly_t f) +.. function:: ulong nmod_poly_factor_with_cantor_zassenhaus(nmod_poly_factor_t res, const nmod_poly_t f) Factorises a general polynomial ``f`` into monic irreducible factors and returns the leading coefficient of ``f``, or 0 if ``f`` @@ -165,7 +165,7 @@ Factorisation square-free factorisation, and finally runs Cantor-Zassenhaus on all the individual square-free factors. -.. function:: mp_limb_t nmod_poly_factor_with_kaltofen_shoup(nmod_poly_factor_t res, const nmod_poly_t f) +.. function:: ulong nmod_poly_factor_with_kaltofen_shoup(nmod_poly_factor_t res, const nmod_poly_t f) Factorises a general polynomial ``f`` into monic irreducible factors and returns the leading coefficient of ``f``, or 0 if ``f`` @@ -176,7 +176,7 @@ Factorisation square-free factorisation, and finally runs Kaltofen-Shoup on all the individual square-free factors. -.. function:: mp_limb_t nmod_poly_factor(nmod_poly_factor_t res, const nmod_poly_t f) +.. function:: ulong nmod_poly_factor(nmod_poly_factor_t res, const nmod_poly_t f) Factorises a general polynomial ``f`` into monic irreducible factors and returns the leading coefficient of ``f``, or 0 if ``f`` diff --git a/doc/source/nmod_poly_mat.rst b/doc/source/nmod_poly_mat.rst index c6497311c2..35e48c4d69 100644 --- a/doc/source/nmod_poly_mat.rst +++ b/doc/source/nmod_poly_mat.rst @@ -36,7 +36,7 @@ Memory management -------------------------------------------------------------------------------- -.. function:: void nmod_poly_mat_init(nmod_poly_mat_t mat, slong rows, slong cols, mp_limb_t n) +.. function:: void nmod_poly_mat_init(nmod_poly_mat_t mat, slong rows, slong cols, ulong n) Initialises a matrix with the given number of rows and columns for use. The modulus is set to `n`. @@ -88,7 +88,7 @@ Basic properties Returns the number of columns in ``mat``. -.. function:: mp_limb_t nmod_poly_mat_modulus(const nmod_poly_mat_t mat) +.. function:: ulong nmod_poly_mat_modulus(const nmod_poly_mat_t mat) Returns the modulus of ``mat``. @@ -237,7 +237,7 @@ Evaluation -------------------------------------------------------------------------------- -.. function:: void nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, mp_limb_t x) +.. function:: void nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, ulong x) Sets the ``nmod_mat_t`` ``B`` to ``A`` evaluated entrywise at the point ``x``. @@ -252,7 +252,7 @@ Arithmetic Sets ``B`` to ``A`` multiplied entrywise by the polynomial ``c``. -.. function:: void nmod_poly_mat_scalar_mul_nmod(nmod_poly_mat_t B, const nmod_poly_mat_t A, mp_limb_t c) +.. function:: void nmod_poly_mat_scalar_mul_nmod(nmod_poly_mat_t B, const nmod_poly_mat_t A, ulong c) Sets ``B`` to ``A`` multiplied entrywise by the coefficient ``c``, which is assumed to be reduced modulo the modulus. diff --git a/doc/source/nmod_vec.rst b/doc/source/nmod_vec.rst index 4617030f09..1aa24aa4d3 100644 --- a/doc/source/nmod_vec.rst +++ b/doc/source/nmod_vec.rst @@ -7,12 +7,12 @@ Memory management -------------------------------------------------------------------------------- -.. function:: mp_ptr _nmod_vec_init(slong len) +.. function:: nn_ptr _nmod_vec_init(slong len) Returns a vector of the given length. The entries are not necessarily zero. -.. function:: void _nmod_vec_clear(mp_ptr vec) +.. function:: void _nmod_vec_clear(nn_ptr vec) Frees the memory used by the given vector. @@ -21,7 +21,7 @@ Random functions -------------------------------------------------------------------------------- -.. function:: void _nmod_vec_randtest(mp_ptr vec, flint_rand_t state, slong len, nmod_t mod) +.. function:: void _nmod_vec_randtest(nn_ptr vec, flint_rand_t state, slong len, nmod_t mod) Sets ``vec`` to a random vector of the given length with entries reduced modulo ``mod.n``. @@ -31,29 +31,29 @@ Basic manipulation and comparison -------------------------------------------------------------------------------- -.. function:: void _nmod_vec_set(mp_ptr res, mp_srcptr vec, slong len) +.. function:: void _nmod_vec_set(nn_ptr res, nn_srcptr vec, slong len) Copies ``len`` entries from the vector ``vec`` to ``res``. -.. function:: void _nmod_vec_zero(mp_ptr vec, slong len) +.. function:: void _nmod_vec_zero(nn_ptr vec, slong len) Zeros the given vector of the given length. -.. function:: void _nmod_vec_swap(mp_ptr a, mp_ptr b, slong length) +.. function:: void _nmod_vec_swap(nn_ptr a, nn_ptr b, slong length) Swaps the vectors ``a`` and ``b`` of length `n` by actually swapping the entries. -.. function:: void _nmod_vec_reduce(mp_ptr res, mp_srcptr vec, slong len, nmod_t mod) +.. function:: void _nmod_vec_reduce(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod) Reduces the entries of ``(vec, len)`` modulo ``mod.n`` and set ``res`` to the result. -.. function:: flint_bitcnt_t _nmod_vec_max_bits(mp_srcptr vec, slong len) +.. function:: flint_bitcnt_t _nmod_vec_max_bits(nn_srcptr vec, slong len) Returns the maximum number of bits of any entry in the vector. -.. function:: int _nmod_vec_equal(mp_srcptr vec, mp_srcptr vec2, slong len) +.. function:: int _nmod_vec_equal(nn_srcptr vec, nn_srcptr vec2, slong len) Returns~`1` if ``(vec, len)`` is equal to ``(vec2, len)``, otherwise returns~`0`. @@ -63,7 +63,7 @@ Printing -------------------------------------------------------------------------------- -.. function:: void _nmod_vec_print_pretty(mp_srcptr vec, slong len, nmod_t mod) +.. function:: void _nmod_vec_print_pretty(nn_srcptr vec, slong len, nmod_t mod) Pretty-prints ``vec`` to ``stdout``. A header is printed followed by the vector enclosed in brackets. Each entry is right-aligned to the width of @@ -73,15 +73,15 @@ Printing [ 33 181 107 61 32 11 80 138 34 171 86 156] -.. function:: int _nmod_vec_fprint_pretty(FILE * file, mp_srcptr vec, slong len, nmod_t mod) +.. function:: int _nmod_vec_fprint_pretty(FILE * file, nn_srcptr vec, slong len, nmod_t mod) Same as ``_nmod_vec_print_pretty`` but printing to ``file``. -.. function:: int _nmod_vec_print(mp_srcptr vec, slong len, nmod_t mod) +.. function:: int _nmod_vec_print(nn_srcptr vec, slong len, nmod_t mod) Currently, same as ``_nmod_vec_print_pretty``. -.. function:: int _nmod_vec_fprint(FILE * f, mp_srcptr vec, slong len, nmod_t mod) +.. function:: int _nmod_vec_fprint(FILE * f, nn_srcptr vec, slong len, nmod_t mod) Currently, same as ``_nmod_vec_fprint_pretty``. @@ -90,32 +90,32 @@ Arithmetic operations -------------------------------------------------------------------------------- -.. function:: void _nmod_vec_add(mp_ptr res, mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod) +.. function:: void _nmod_vec_add(nn_ptr res, nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod) Sets ``(res, len)`` to the sum of ``(vec1, len)`` and ``(vec2, len)``. -.. function:: void _nmod_vec_sub(mp_ptr res, mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod) +.. function:: void _nmod_vec_sub(nn_ptr res, nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod) Sets ``(res, len)`` to the difference of ``(vec1, len)`` and ``(vec2, len)``. -.. function:: void _nmod_vec_neg(mp_ptr res, mp_srcptr vec, slong len, nmod_t mod) +.. function:: void _nmod_vec_neg(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod) Sets ``(res, len)`` to the negation of ``(vec, len)``. -.. function:: void _nmod_vec_scalar_mul_nmod(mp_ptr res, mp_srcptr vec, slong len, mp_limb_t c, nmod_t mod) +.. function:: void _nmod_vec_scalar_mul_nmod(nn_ptr res, nn_srcptr vec, slong len, ulong c, nmod_t mod) Sets ``(res, len)`` to ``(vec, len)`` multiplied by `c`. The element `c` and all elements of `vec` are assumed to be less than `mod.n`. -.. function:: void _nmod_vec_scalar_mul_nmod_shoup(mp_ptr res, mp_srcptr vec, slong len, mp_limb_t c, nmod_t mod) +.. function:: void _nmod_vec_scalar_mul_nmod_shoup(nn_ptr res, nn_srcptr vec, slong len, ulong c, nmod_t mod) Sets ``(res, len)`` to ``(vec, len)`` multiplied by `c` using :func:`n_mulmod_shoup`. `mod.n` should be less than `2^{\mathtt{FLINT\_BITS} - 1}`. `c` and all elements of `vec` should be less than `mod.n`. -.. function:: void _nmod_vec_scalar_addmul_nmod(mp_ptr res, mp_srcptr vec, slong len, mp_limb_t c, nmod_t mod) +.. function:: void _nmod_vec_scalar_addmul_nmod(nn_ptr res, nn_srcptr vec, slong len, ulong c, nmod_t mod) Adds ``(vec, len)`` times `c` to the vector ``(res, len)``. The element `c` and all elements of `vec` are assumed to be less than `mod.n`. @@ -149,18 +149,18 @@ Dot products ``nmod.h`` has to be included in order for this macro to work (order of inclusions does not matter). -.. function:: mp_limb_t _nmod_vec_dot(mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod, int nlimbs) +.. function:: ulong _nmod_vec_dot(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs) Returns the dot product of (``vec1``, ``len``) and (``vec2``, ``len``). The ``nlimbs`` parameter should be 0, 1, 2 or 3, specifying the number of limbs needed to represent the unreduced result. -.. function:: mp_limb_t _nmod_vec_dot_rev(mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod, int nlimbs) +.. function:: ulong _nmod_vec_dot_rev(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs) The same as ``_nmod_vec_dot``, but reverses ``vec2``. -.. function:: mp_limb_t _nmod_vec_dot_ptr(mp_srcptr vec1, const mp_ptr * vec2, slong offset, slong len, nmod_t mod, int nlimbs) +.. function:: ulong _nmod_vec_dot_ptr(nn_srcptr vec1, const nn_ptr * vec2, slong offset, slong len, nmod_t mod, int nlimbs) Returns the dot product of (``vec1``, ``len``) and the values at ``vec2[i][offset]``. The ``nlimbs`` parameter should be diff --git a/doc/source/qsieve.rst b/doc/source/qsieve.rst index 0543a0ce61..15e655f343 100644 --- a/doc/source/qsieve.rst +++ b/doc/source/qsieve.rst @@ -3,18 +3,18 @@ **qsieve.h** -- Quadratic sieve ================================================================================ -.. function:: mp_limb_t qsieve_knuth_schroeppel(qs_t qs_inf) +.. function:: ulong qsieve_knuth_schroeppel(qs_t qs_inf) Return the Knuth-Schroeppel multiplier for the `n`, integer to be factored based upon the Knuth-Schroeppel function. -.. function:: mp_limb_t qsieve_primes_init(qs_t qs_inf) +.. function:: ulong qsieve_primes_init(qs_t qs_inf) Compute the factor base prime along with there inverse for `kn`, where `k` is Knuth-Schroeppel multiplier and `n` is the integer to be factored. It also computes the square root of `kn` modulo factor base primes. -.. function:: mp_limb_t qsieve_primes_increment(qs_t qs_inf, mp_limb_t delta) +.. function:: ulong qsieve_primes_increment(qs_t qs_inf, ulong delta) It increase the number of factor base primes by amount 'delta' and calculate inverse of those primes along with the square root of `kn` modulo @@ -91,10 +91,10 @@ Call for initialization of polynomial, sieving, and scanning of sieve for all the possible polynomials for particular hypercube i.e. `A`. -.. function:: void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, const fmpz_t Y, const qs_poly_t poly) +.. function:: void qsieve_write_to_file(qs_t qs_inf, ulong prime, const fmpz_t Y, const qs_poly_t poly) Write a relation to the file in a binary format as follows. First, write - large prime of size ``sizeof(mp_limb_t)``, in case of full relation it is 1. + large prime of size ``sizeof(ulong)``, in case of full relation it is 1. After this, write the number of small primes with size ``sizeof(slong)``. Then, write the small primes, with a total size of ``number_of_small_primes * sizeof(slong)``. Then, write the number of @@ -102,14 +102,14 @@ their exponents in the format ``factor_1, exponent_1, factor_2, ...``, all with a total size of ``2 * number_of_factors * sizeof(slong)``. Then write ``Y`` with the size of ``Y`` first (size ``sizeof(slong)``, that may be - negative), and then its limbs (size ``Y_size * sizeof(mp_limb_t)``). + negative), and then its limbs (size ``Y_size * sizeof(ulong)``). -.. function:: hash_t * qsieve_get_table_entry(qs_t qs_inf, mp_limb_t prime) +.. function:: hash_t * qsieve_get_table_entry(qs_t qs_inf, ulong prime) Return the pointer to the location of 'prime' is hash table if it exist, else create and entry for it in hash table and return pointer to that. -.. function:: void qsieve_add_to_hashtable(qs_t qs_inf, mp_limb_t prime) +.. function:: void qsieve_add_to_hashtable(qs_t qs_inf, ulong prime) Add 'prime' to the hast table. diff --git a/doc/source/ulong_extras.rst b/doc/source/ulong_extras.rst index 82a25a7b8b..d9a3df2a08 100644 --- a/doc/source/ulong_extras.rst +++ b/doc/source/ulong_extras.rst @@ -11,9 +11,9 @@ The module includes functions for square roots, factorisation and primality testing. Almost all the functions in this module are highly developed and extremely well optimised. -The basic type is the ``mp_limb_t`` as defined by GMP. Functions +The basic type is the ``ulong`` as defined by GMP. Functions which take a precomputed inverse either have the suffix ``preinv`` and -take an ``mp_limb_t`` precomputed inverse as computed by +take an ``ulong`` precomputed inverse as computed by ``n_preinvert_limb`` or have the suffix ``_precomp`` and accept a ``double`` precomputed inverse as computed by ``n_precompute_inverse``. @@ -42,7 +42,7 @@ inverse, where `a = 12345678, b = 87654321` and `n = 111111111`. #include "ulong_extras.h" int main() { - mp_limb_t r, a, b, n, ninv; + ulong r, a, b, n, ninv; a = UWORD(12345678); b = UWORD(87654321); @@ -452,7 +452,7 @@ Jacobi and Kronecker symbols -------------------------------------------------------------------------------- -.. function:: int n_jacobi(mp_limb_signed_t x, ulong y) +.. function:: int n_jacobi(slong x, ulong y) Computes the Jacobi symbol `\left(\frac{x}{y}\right)` for any `x` and odd `y`. @@ -481,7 +481,7 @@ Modular Arithmetic This is merely an adaption of the extended Euclidean algorithm with appropriate normalisation. -.. function:: ulong n_powmod_precomp(ulong a, mp_limb_signed_t exp, ulong n, double npre) +.. function:: ulong n_powmod_precomp(ulong a, slong exp, ulong n, double npre) Returns ``a^exp`` modulo `n` given a precomputed inverse of `n` computed by :func:`n_precompute_inverse`. We require `n < 2^{53}` @@ -501,7 +501,7 @@ Modular Arithmetic This is implemented as a standard binary powering algorithm using repeated squaring and reducing modulo `n` at each step. -.. function:: ulong n_powmod(ulong a, mp_limb_signed_t exp, ulong n) +.. function:: ulong n_powmod(ulong a, slong exp, ulong n) Returns ``a^exp`` modulo `n`. We require ``n < 2^FLINT_D_BITS`` and `0 \leq a < n`. There are no restrictions on ``exp``, i.e. @@ -510,7 +510,7 @@ Modular Arithmetic This is implemented by precomputing an inverse and calling the ``precomp`` version of this function. -.. function:: ulong n_powmod2_preinv(ulong a, mp_limb_signed_t exp, ulong n, ulong ninv) +.. function:: ulong n_powmod2_preinv(ulong a, slong exp, ulong n, ulong ninv) Returns ``(a^exp) % n`` given a precomputed inverse of `n` computed by :func:`n_preinvert_limb`. We require `0 \leq a < n`, but there are no @@ -522,7 +522,7 @@ Modular Arithmetic If ``exp`` is negative but `a` is not invertible modulo `n`, an exception is raised. -.. function:: ulong n_powmod2(ulong a, mp_limb_signed_t exp, ulong n) +.. function:: ulong n_powmod2(ulong a, slong exp, ulong n) Returns ``(a^exp) % n``. We require `0 \leq a < n`, but there are no restrictions on `n` or on ``exp``, i.e. it can be negative. @@ -596,7 +596,7 @@ Modular Arithmetic ``m`` then 0 is returned by the function and the location ``sqrt`` points to is set to NULL. -.. function:: mp_limb_t n_mulmod_shoup(mp_limb_t w, mp_limb_t t, mp_limb_t w_precomp, mp_limb_t p) +.. function:: ulong n_mulmod_shoup(ulong w, ulong t, ulong w_precomp, ulong p) Returns `w t \bmod{p}` given a precomputed scaled approximation of `w / p` computed by :func:`n_mulmod_precomp_shoup`. The value of `p` should be @@ -604,7 +604,7 @@ Modular Arithmetic Works faster than :func:`n_mulmod2_preinv` if `w` fixed and `t` from array (for example, scalar multiplication of vector). -.. function:: mp_limb_t n_mulmod_precomp_shoup(mp_limb_t w, mp_limb_t p) +.. function:: ulong n_mulmod_precomp_shoup(ulong w, ulong p) Returns `w'`, scaled approximation of `w / p`. `w'` is equal to the integer part of `w \cdot 2^{\mathtt{FLINT\_BITS}} / p`. @@ -613,7 +613,7 @@ Modular Arithmetic Divisibility testing -------------------------------------------------------------------------------- -.. function:: int n_divides(mp_limb_t * q, mp_limb_t n, mp_limb_t p) +.. function:: int n_divides(ulong * q, ulong n, ulong p) Returns ``1`` if ``p`` divides ``n`` and sets ``q`` to the quotient, otherwise returns ``0`` and sets ``q`` to ``0``. @@ -1302,7 +1302,7 @@ Factorisation the time for ``n_factor`` on numbers that reach the ``n_factor_pp1`` stage, i.e. after trial factoring and one line factoring. -.. function:: int n_factor_pollard_brent_single(mp_limb_t * factor, mp_limb_t n, mp_limb_t ninv, mp_limb_t ai, mp_limb_t xi, mp_limb_t normbits, mp_limb_t max_iters) +.. function:: int n_factor_pollard_brent_single(ulong * factor, ulong n, ulong ninv, ulong ai, ulong xi, ulong normbits, ulong max_iters) Pollard Rho algorithm (with Brent modification) for integer factorization. Assumes that the `n` is not prime. `factor` is set as the factor if found. @@ -1321,7 +1321,7 @@ Factorisation suggested by Richard Brent in the paper, available at https://maths-people.anu.edu.au/~brent/pd/rpb051i.pdf -.. function:: int n_factor_pollard_brent(mp_limb_t * factor, flint_rand_t state, mp_limb_t n_in, mp_limb_t max_tries, mp_limb_t max_iters) +.. function:: int n_factor_pollard_brent(ulong * factor, flint_rand_t state, ulong n_in, ulong max_tries, ulong max_iters) Pollard Rho algorithm, modified as suggested by Richard Brent. Makes a call to :func:`n_factor_pollard_brent_single`. The input parameters ai and xi for @@ -1423,11 +1423,11 @@ Primitive Roots and Discrete Logarithms -Elliptic curve method for factorization of ``mp_limb_t`` +Elliptic curve method for factorization of ``ulong`` -------------------------------------------------------------------------------- -.. function:: void n_factor_ecm_double(mp_limb_t * x, mp_limb_t * z, mp_limb_t x0, mp_limb_t z0, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: void n_factor_ecm_double(ulong * x, ulong * z, ulong x0, ulong z0, ulong n, n_ecm_t n_ecm_inf) Sets the point `(x : z)` to two times `(x_0 : z_0)` modulo `n` according to the formula @@ -1439,7 +1439,7 @@ Elliptic curve method for factorization of ``mp_limb_t`` This group doubling is valid only for points expressed in Montgomery projective coordinates. -.. function:: void n_factor_ecm_add(mp_limb_t * x, mp_limb_t * z, mp_limb_t x1, mp_limb_t z1, mp_limb_t x2, mp_limb_t z2, mp_limb_t x0, mp_limb_t z0, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: void n_factor_ecm_add(ulong * x, ulong * z, ulong x1, ulong z1, ulong x2, ulong z2, ulong x0, ulong z0, ulong n, n_ecm_t n_ecm_inf) Sets the point `(x : z)` to the sum of `(x_1 : z_1)` and `(x_2 : z_2)` modulo `n`, given the difference `(x_0 : z_0)` according to the formula @@ -1447,7 +1447,7 @@ Elliptic curve method for factorization of ``mp_limb_t`` This group doubling is valid only for points expressed in Montgomery projective coordinates. -.. function:: void n_factor_ecm_mul_montgomery_ladder(mp_limb_t * x, mp_limb_t * z, mp_limb_t x0, mp_limb_t z0, mp_limb_t k, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: void n_factor_ecm_mul_montgomery_ladder(ulong * x, ulong * z, ulong x0, ulong z0, ulong k, ulong n, n_ecm_t n_ecm_inf) Montgomery ladder algorithm for scalar multiplication of elliptic points. @@ -1455,7 +1455,7 @@ Elliptic curve method for factorization of ``mp_limb_t`` Valid only for points expressed in Montgomery projective coordinates. -.. function:: int n_factor_ecm_select_curve(mp_limb_t * f, mp_limb_t sigma, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: int n_factor_ecm_select_curve(ulong * f, ulong sigma, ulong n, n_ecm_t n_ecm_inf) Selects a random elliptic curve given a random integer ``sigma``, according to Suyama's parameterization. If the factor is found while @@ -1470,7 +1470,7 @@ Elliptic curve method for factorization of ``mp_limb_t`` The curve selected is of Montgomery form, the points selected satisfy the curve and are projective coordinates. -.. function:: int n_factor_ecm_stage_I(mp_limb_t * f, const mp_limb_t * prime_array, mp_limb_t num, mp_limb_t B1, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: int n_factor_ecm_stage_I(ulong * f, const ulong * prime_array, ulong num, ulong B1, ulong n, n_ecm_t n_ecm_inf) Stage I implementation of the ECM algorithm. @@ -1480,7 +1480,7 @@ Elliptic curve method for factorization of ``mp_limb_t`` If the factor is found, `1` is returned, otherwise `0`. -.. function:: int n_factor_ecm_stage_II(mp_limb_t * f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, mp_limb_t n, n_ecm_t n_ecm_inf) +.. function:: int n_factor_ecm_stage_II(ulong * f, ulong B1, ulong B2, ulong P, ulong n, n_ecm_t n_ecm_inf) Stage II implementation of the ECM algorithm. @@ -1490,10 +1490,10 @@ Elliptic curve method for factorization of ``mp_limb_t`` If the factor is found, `1` is returned, otherwise `0`. -.. function:: int n_factor_ecm(mp_limb_t * f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, flint_rand_t state, mp_limb_t n) +.. function:: int n_factor_ecm(ulong * f, ulong curves, ulong B1, ulong B2, flint_rand_t state, ulong n) Outer wrapper function for the ECM algorithm. It factors `n` which - must fit into a ``mp_limb_t``. + must fit into a ``ulong``. The function calls stage I and II, and the precomputations (builds ``prime_array`` for stage I, diff --git a/examples/crt.c b/examples/crt.c index 3e7e009c8b..ea618edd71 100644 --- a/examples/crt.c +++ b/examples/crt.c @@ -23,7 +23,7 @@ int main(int argc, char* argv[]) { slong bit_bound; - mp_limb_t prime, res; + ulong prime, res; fmpz_t x, y, prod; if (argc != 2) diff --git a/examples/multi_crt.c b/examples/multi_crt.c index f32d671bea..f8a1342e20 100644 --- a/examples/multi_crt.c +++ b/examples/multi_crt.c @@ -29,8 +29,8 @@ int main(int argc, char* argv[]) fmpz_comb_t comb; fmpz_comb_temp_t comb_temp; - mp_limb_t * primes; - mp_limb_t * residues; + ulong * primes; + ulong * residues; slong num_primes; @@ -53,8 +53,8 @@ int main(int argc, char* argv[]) fmpz_set_str(x, argv[1], 10); - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); primes[0] = 2; for (i = 1; i < num_primes; i++) diff --git a/examples/primegen.c b/examples/primegen.c index fa38ab0be7..d016b892e4 100644 --- a/examples/primegen.c +++ b/examples/primegen.c @@ -18,7 +18,7 @@ int main(int argc, char* argv[]) { n_primes_t iter; - mp_limb_t p, N; + ulong p, N; if (argc < 2) { diff --git a/src/NTL-interface.h b/src/NTL-interface.h index 2d5de83cd0..90a21f32fb 100644 --- a/src/NTL-interface.h +++ b/src/NTL-interface.h @@ -41,8 +41,8 @@ inline void fmpz_set_ZZ(fmpz_t rop, const ZZ& op) fmpz_zero(rop); else { - const mp_size_t lw = op.size(); - const mp_limb_t *xp = ((mp_limb_t *) (((slong *) (x)) + 2)); + const slong lw = op.size(); + const ulong *xp = ((ulong *) (((slong *) (x)) + 2)); if (lw == 0) fmpz_zero(rop); @@ -52,7 +52,7 @@ inline void fmpz_set_ZZ(fmpz_t rop, const ZZ& op) { mpz_ptr mf = _fmpz_promote(rop); - mpz_import(mf, lw, -1, sizeof(mp_limb_t), 0, 0, xp); + mpz_import(mf, lw, -1, sizeof(ulong), 0, 0, xp); } if (op < WORD(0)) @@ -65,7 +65,7 @@ inline void fmpz_set_ZZ(fmpz_t rop, const ZZ& op) */ inline void fmpz_get_ZZ(NTL_NNS ZZ& rop, const fmpz_t op) { - mp_limb_t *xp; + ulong *xp; _ntl_gbigint *x = &rop.rep; slong lw = fmpz_size(op); fmpz c = *op; @@ -77,7 +77,7 @@ inline void fmpz_get_ZZ(NTL_NNS ZZ& rop, const fmpz_t op) } _ntl_gsetlength(x, lw); - xp = ((mp_limb_t *) (((slong *) (*x)) + 2)); // data + xp = ((ulong *) (((slong *) (*x)) + 2)); // data if (COEFF_IS_MPZ(c)) { diff --git a/src/acb/approx_dot.c b/src/acb/approx_dot.c index 6dc7f8c1e3..11167b52e4 100644 --- a/src/acb/approx_dot.c +++ b/src/acb/approx_dot.c @@ -12,22 +12,22 @@ #include "acb.h" #include "mpn_extras.h" -/* We need uint64_t instead of mp_limb_t on 32-bit systems for +/* We need uint64_t instead of ulong on 32-bit systems for safe summation of 30-bit error bounds. */ #include void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift); void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift); static void -_arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, +_arb_dot_output(arb_t res, nn_ptr sum, slong sn, int negative, slong sum_exp, slong prec) { slong exp_fix; @@ -43,7 +43,7 @@ _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; @@ -79,7 +79,7 @@ _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, #define ARB_DOT_ADD(s_sum, s_serr, s_sn, s_sum_exp, s_subtract, xm) \ if (!arf_is_special(xm)) \ { \ - mp_srcptr xptr; \ + nn_srcptr xptr; \ xexp = ARF_EXP(xm); \ xn = ARF_SIZE(xm); \ xnegative = ARF_SGNBIT(xm); \ @@ -98,9 +98,9 @@ static void _arf_complex_mul_gauss(arf_t e, arf_t f, const arf_t a, const arf_t b, const arf_t c, const arf_t d) { - mp_srcptr ap, bp, cp, dp; + nn_srcptr ap, bp, cp, dp; int asgn, bsgn, csgn, dsgn; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; fmpz texp, uexp; @@ -266,12 +266,12 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong slong im_max_exp, im_min_exp, im_sum_exp; slong re_prec, im_prec; int xnegative, ynegative; - mp_size_t xn, yn, re_sn, im_sn, alloc; + slong xn, yn, re_sn, im_sn, alloc; flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; - mp_limb_t re_serr, im_serr; /* Sum over arithmetic errors */ - mp_ptr tmp, re_sum, im_sum; /* Workspace */ + ulong re_serr, im_serr; /* Sum over arithmetic errors */ + nn_ptr tmp, re_sum, im_sum; /* Workspace */ slong xoff, yoff; char * use_gauss; ARF_ADD_TMP_DECL; @@ -483,7 +483,7 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong for (i = 0; i < len; i++) { arb_srcptr ai, bi, ci, di; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; ai = ((arb_srcptr) x) + 2 * i * xstep; @@ -537,9 +537,9 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong for (yoff = 0; yoff < 2; yoff++) { slong sum_exp; - mp_ptr sum; - mp_size_t sn; - mp_limb_t serr; + nn_ptr sum; + slong sn; + ulong serr; int flipsign; if (xoff == yoff) @@ -589,8 +589,8 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong } else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -688,7 +688,7 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); diff --git a/src/acb/dot.c b/src/acb/dot.c index 0e9ae90f61..d1a2e3c7cb 100644 --- a/src/acb/dot.c +++ b/src/acb/dot.c @@ -12,7 +12,7 @@ #include "acb.h" #include "mpn_extras.h" -/* We need uint64_t instead of mp_limb_t on 32-bit systems for +/* We need uint64_t instead of ulong on 32-bit systems for safe summation of 30-bit error bounds. */ #include @@ -99,17 +99,17 @@ add_errors(mag_t rad, uint64_t Aerr, slong Aexp, uint64_t Berr, slong Bexp, uint } void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift); void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift); static void -_arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, +_arb_dot_output(arb_t res, nn_ptr sum, slong sn, int negative, uint64_t serr, slong sum_exp, uint64_t srad, slong srad_exp, slong prec) { slong exp_fix; @@ -126,7 +126,7 @@ _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; @@ -171,7 +171,7 @@ _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, #define ARB_DOT_ADD(s_sum, s_serr, s_sn, s_sum_exp, s_subtract, xm) \ if (!arf_is_special(xm)) \ { \ - mp_srcptr xptr; \ + nn_srcptr xptr; \ xexp = ARF_EXP(xm); \ xn = ARF_SIZE(xm); \ xnegative = ARF_SGNBIT(xm); \ @@ -204,9 +204,9 @@ static void _arf_complex_mul_gauss(arf_t e, arf_t f, const arf_t a, const arf_t b, const arf_t c, const arf_t d) { - mp_srcptr ap, bp, cp, dp; + nn_srcptr ap, bp, cp, dp; int asgn, bsgn, csgn, dsgn; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; fmpz texp, uexp; @@ -294,16 +294,16 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, slong re_prec, im_prec; slong xrexp, yrexp; int xnegative, ynegative; - mp_size_t xn, yn, re_sn, im_sn, alloc; + slong xn, yn, re_sn, im_sn, alloc; flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mag_srcptr xr, yr; - mp_limb_t xtop, ytop; - mp_limb_t xrad, yrad; - mp_limb_t re_serr, im_serr; /* Sum over arithmetic errors */ + ulong xtop, ytop; + ulong xrad, yrad; + ulong re_serr, im_serr; /* Sum over arithmetic errors */ uint64_t re_srad, im_srad; /* Sum over propagated errors */ - mp_ptr tmp, re_sum, im_sum; /* Workspace */ + nn_ptr tmp, re_sum, im_sum; /* Workspace */ slong xoff, yoff; char * use_gauss; ARF_ADD_TMP_DECL; @@ -629,7 +629,7 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, for (i = 0; i < len; i++) { arb_srcptr ai, bi, ci, di; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; ai = ((arb_srcptr) x) + 2 * i * xstep; @@ -683,9 +683,9 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, for (yoff = 0; yoff < 2; yoff++) { slong sum_exp, srad_exp; - mp_ptr sum; - mp_size_t sn; - mp_limb_t serr; + nn_ptr sum; + slong sn; + ulong serr; uint64_t srad; int flipsign; @@ -745,8 +745,8 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, } else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -858,7 +858,7 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); diff --git a/src/acb/dot_fmpz.c b/src/acb/dot_fmpz.c index 8d0035aa71..be8507bddb 100644 --- a/src/acb/dot_fmpz.c +++ b/src/acb/dot_fmpz.c @@ -16,7 +16,7 @@ acb_dot_fmpz(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong x { arb_ptr t; slong i, ssize, size, tmp_size; - mp_ptr ztmp; + nn_ptr ztmp; fmpz v; ulong av, al; unsigned int bc; @@ -119,7 +119,7 @@ acb_dot_fmpz(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong x if (tmp_size != 0) { - ztmp = TMP_ALLOC(sizeof(mp_limb_t) * tmp_size); + ztmp = TMP_ALLOC(sizeof(ulong) * tmp_size); for (i = 0; i < len; i++) { diff --git a/src/acb/test/main.c b/src/acb/test/main.c index deb61fe96d..fd847ddc8a 100644 --- a/src/acb/test/main.c +++ b/src/acb/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-acos.c" diff --git a/src/acb/test/t-log.c b/src/acb/test/t-log.c index af38114f01..8728ac2c8d 100644 --- a/src/acb/test/t-log.c +++ b/src/acb/test/t-log.c @@ -16,7 +16,7 @@ static int close_to_one(const acb_t z) { - mp_limb_t top; + ulong top; if (arf_abs_bound_lt_2exp_si(arb_midref(acb_imagref(z))) > -3) return 0; diff --git a/src/acb_calc/test/main.c b/src/acb_calc/test/main.c index b53406fe65..e70522fd14 100644 --- a/src/acb_calc/test/main.c +++ b/src/acb_calc/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-cauchy_bound.c" diff --git a/src/acb_dft/test/main.c b/src/acb_dft/test/main.c index 0d07ea9160..c8686820c4 100644 --- a/src/acb_dft/test/main.c +++ b/src/acb_dft/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-convol.c" diff --git a/src/acb_dirichlet/euler_product_real_ui.c b/src/acb_dirichlet/euler_product_real_ui.c index 0d640c6868..723e186539 100644 --- a/src/acb_dirichlet/euler_product_real_ui.c +++ b/src/acb_dirichlet/euler_product_real_ui.c @@ -27,7 +27,7 @@ typedef struct ulong s; int mod; const signed char * chi; - mp_ptr primes; + nn_ptr primes; double * powmags; slong num_primes; slong wp; @@ -146,7 +146,7 @@ void _acb_dirichlet_euler_product_real_ui(arb_t res, ulong s, { n_primes_t iter; slong i; - mp_ptr primes; + nn_ptr primes; double * powmags; slong num_primes = 0; slong alloc = 16; @@ -159,7 +159,7 @@ void _acb_dirichlet_euler_product_real_ui(arb_t res, ulong s, n_primes_init(iter); n_primes_jump_after(iter, 3); - primes = flint_malloc(alloc * sizeof(mp_limb_t)); + primes = flint_malloc(alloc * sizeof(ulong)); powmags = flint_malloc(alloc * sizeof(double)); for (p = 3; p < limit; p = n_primes_next(iter)) @@ -182,7 +182,7 @@ void _acb_dirichlet_euler_product_real_ui(arb_t res, ulong s, if (num_primes >= alloc) { alloc *= 2; - primes = flint_realloc(primes, alloc * sizeof(mp_limb_t)); + primes = flint_realloc(primes, alloc * sizeof(ulong)); powmags = flint_realloc(powmags, alloc * sizeof(double)); } diff --git a/src/acb_dirichlet/test/main.c b/src/acb_dirichlet/test/main.c index 1e357a3809..659fda459e 100644 --- a/src/acb_dirichlet/test/main.c +++ b/src/acb_dirichlet/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-backlund_s_bound.c" diff --git a/src/acb_elliptic/test/main.c b/src/acb_elliptic/test/main.c index 955aca4724..3f038ec84b 100644 --- a/src/acb_elliptic/test/main.c +++ b/src/acb_elliptic/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-e_inc.c" diff --git a/src/acb_hypgeom/airy_direct.c b/src/acb_hypgeom/airy_direct.c index f0c008b2ea..09821e9d62 100644 --- a/src/acb_hypgeom/airy_direct.c +++ b/src/acb_hypgeom/airy_direct.c @@ -70,7 +70,7 @@ static void acb_hypgeom_airy_0f1_sum_inner(acb_t s, acb_srcptr t, slong m, slong n, slong alpha, int real, slong prec) { slong j, k; - mp_limb_t c, chi, clo; + ulong c, chi, clo; acb_zero(s); diff --git a/src/acb_hypgeom/rising_ui_jet_powsum.c b/src/acb_hypgeom/rising_ui_jet_powsum.c index 89b0121cc1..a38c5ea308 100644 --- a/src/acb_hypgeom/rising_ui_jet_powsum.c +++ b/src/acb_hypgeom/rising_ui_jet_powsum.c @@ -60,11 +60,11 @@ acb_hypgeom_rising_ui_jet_powsum(acb_ptr res, const acb_t x, ulong n, slong len, if (n <= 12 || (FLINT_BITS == 64 && n <= 20)) { - mp_ptr c; + nn_ptr c; TMP_START; wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); - c = TMP_ALLOC(sizeof(mp_limb_t) * (n + 1) * len); + c = TMP_ALLOC(sizeof(ulong) * (n + 1) * len); _nmod_vec_zero(c, (n + 1) * len); diff --git a/src/acb_hypgeom/rising_ui_jet_rs.c b/src/acb_hypgeom/rising_ui_jet_rs.c index b78d5d4029..d06e6176af 100644 --- a/src/acb_hypgeom/rising_ui_jet_rs.c +++ b/src/acb_hypgeom/rising_ui_jet_rs.c @@ -27,7 +27,7 @@ acb_hypgeom_rising_ui_jet_rs(acb_ptr res, const acb_t x, ulong n, ulong m, slong slong i, j, k, l, m0, xmlen, tlen, ulen, climbs, climbs_max, wp; acb_ptr tmp, xpow; acb_ptr t, u; - mp_ptr c; + nn_ptr c; TMP_INIT; if (len == 0) @@ -80,7 +80,7 @@ acb_hypgeom_rising_ui_jet_rs(acb_ptr res, const acb_t x, ulong n, ulong m, slong wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); climbs_max = FLINT_BIT_COUNT(n - 1) * m; - c = TMP_ALLOC(sizeof(mp_limb_t) * climbs_max * m); + c = TMP_ALLOC(sizeof(ulong) * climbs_max * m); /* length of (x+t)^m */ xmlen = FLINT_MIN(len, m + 1); diff --git a/src/acb_hypgeom/rising_ui_rs.c b/src/acb_hypgeom/rising_ui_rs.c index 83cf0517d5..51cfb48218 100644 --- a/src/acb_hypgeom/rising_ui_rs.c +++ b/src/acb_hypgeom/rising_ui_rs.c @@ -26,7 +26,7 @@ acb_hypgeom_rising_ui_rs(acb_t res, const acb_t x, ulong n, ulong m, slong prec) slong i, k, l, m0, climbs, climbs_max, wp; acb_ptr xpow; acb_t t, u; - mp_ptr c; + nn_ptr c; TMP_INIT; if (n <= 1) @@ -60,7 +60,7 @@ acb_hypgeom_rising_ui_rs(acb_t res, const acb_t x, ulong n, ulong m, slong prec) wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); climbs_max = FLINT_BIT_COUNT(n - 1) * m; - c = TMP_ALLOC(sizeof(mp_limb_t) * climbs_max * m); + c = TMP_ALLOC(sizeof(ulong) * climbs_max * m); xpow = _acb_vec_init(m + 1); _acb_vec_set_powers(xpow, x, m + 1, wp); diff --git a/src/acb_hypgeom/test/main.c b/src/acb_hypgeom/test/main.c index d4b8da9703..176790f8f2 100644 --- a/src/acb_hypgeom/test/main.c +++ b/src/acb_hypgeom/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-0f1.c" diff --git a/src/acb_mat/test/main.c b/src/acb_mat/test/main.c index f2c5077cb2..0d9c55673a 100644 --- a/src/acb_mat/test/main.c +++ b/src/acb_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-approx_eig_qr.c" diff --git a/src/acb_modular/test/main.c b/src/acb_modular/test/main.c index 2df41d1235..a8252eb077 100644 --- a/src/acb_modular/test/main.c +++ b/src/acb_modular/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-delta.c" diff --git a/src/acb_modular/test/t-hilbert_class_poly.c b/src/acb_modular/test/t-hilbert_class_poly.c index 5c923f3b38..dcf4b07a71 100644 --- a/src/acb_modular/test/t-hilbert_class_poly.c +++ b/src/acb_modular/test/t-hilbert_class_poly.c @@ -119,7 +119,7 @@ TEST_FUNCTION_START(acb_modular_hilbert_class_poly, state) { { slong i; - mp_limb_t c; + ulong c; fmpz_poly_t hd; nmod_poly_t hdp; diff --git a/src/acb_poly/test/main.c b/src/acb_poly/test/main.c index 41379587ac..3e4ac2a255 100644 --- a/src/acb_poly/test/main.c +++ b/src/acb_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/acb_theta/test/main.c b/src/acb_theta/test/main.c index 39d65bd103..dae3eb11cc 100644 --- a/src/acb_theta/test/main.c +++ b/src/acb_theta/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-agm_hadamard.c" diff --git a/src/acf/approx_dot.c b/src/acf/approx_dot.c index 5239b3f481..0d5001d89a 100644 --- a/src/acf/approx_dot.c +++ b/src/acf/approx_dot.c @@ -12,22 +12,22 @@ #include "acf.h" #include "mpn_extras.h" -/* We need uint64_t instead of mp_limb_t on 32-bit systems for +/* We need uint64_t instead of ulong on 32-bit systems for safe summation of 30-bit error bounds. */ #include void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift); void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift); static void -_arb_dot_output(arf_t res, mp_ptr sum, mp_size_t sn, int negative, +_arb_dot_output(arf_t res, nn_ptr sum, slong sn, int negative, slong sum_exp, slong prec, arf_rnd_t rnd) { slong exp_fix; @@ -43,7 +43,7 @@ _arb_dot_output(arf_t res, mp_ptr sum, mp_size_t sn, int negative, if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; @@ -79,7 +79,7 @@ _arb_dot_output(arf_t res, mp_ptr sum, mp_size_t sn, int negative, #define ARB_DOT_ADD(s_sum, s_serr, s_sn, s_sum_exp, s_subtract, xm) \ if (!arf_is_special(xm)) \ { \ - mp_srcptr xptr; \ + nn_srcptr xptr; \ xexp = ARF_EXP(xm); \ xn = ARF_SIZE(xm); \ xnegative = ARF_SGNBIT(xm); \ @@ -98,9 +98,9 @@ static void _arf_complex_mul_gauss(arf_t e, arf_t f, const arf_t a, const arf_t b, const arf_t c, const arf_t d) { - mp_srcptr ap, bp, cp, dp; + nn_srcptr ap, bp, cp, dp; int asgn, bsgn, csgn, dsgn; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; fmpz texp, uexp; @@ -266,12 +266,12 @@ acf_approx_dot(acf_t res, const acf_t initial, int subtract, acf_srcptr x, slong slong im_max_exp, im_min_exp, im_sum_exp; slong re_prec, im_prec; int xnegative, ynegative; - mp_size_t xn, yn, re_sn, im_sn, alloc; + slong xn, yn, re_sn, im_sn, alloc; flint_bitcnt_t shift; arf_srcptr xi, yi; arf_srcptr xm, ym; - mp_limb_t re_serr, im_serr; /* Sum over arithmetic errors */ - mp_ptr tmp, re_sum, im_sum; /* Workspace */ + ulong re_serr, im_serr; /* Sum over arithmetic errors */ + nn_ptr tmp, re_sum, im_sum; /* Workspace */ slong xoff, yoff; char * use_gauss; ARF_ADD_TMP_DECL; @@ -483,7 +483,7 @@ acf_approx_dot(acf_t res, const acf_t initial, int subtract, acf_srcptr x, slong for (i = 0; i < len; i++) { arf_srcptr ai, bi, ci, di; - mp_size_t an, bn, cn, dn; + slong an, bn, cn, dn; slong aexp, bexp, cexp, dexp; ai = ((arf_srcptr) x) + 2 * i * xstep; @@ -537,9 +537,9 @@ acf_approx_dot(acf_t res, const acf_t initial, int subtract, acf_srcptr x, slong for (yoff = 0; yoff < 2; yoff++) { slong sum_exp; - mp_ptr sum; - mp_size_t sn; - mp_limb_t serr; + nn_ptr sum; + slong sn; + ulong serr; int flipsign; if (xoff == yoff) @@ -589,8 +589,8 @@ acf_approx_dot(acf_t res, const acf_t initial, int subtract, acf_srcptr x, slong } else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -688,7 +688,7 @@ acf_approx_dot(acf_t res, const acf_t initial, int subtract, acf_srcptr x, slong } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); diff --git a/src/acf/test/main.c b/src/acf/test/main.c index c8a96b3bca..864fe1a22b 100644 --- a/src/acf/test/main.c +++ b/src/acf/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-approx_dot.c" diff --git a/src/aprcl.h b/src/aprcl.h index f87281988c..73086775b3 100644 --- a/src/aprcl.h +++ b/src/aprcl.h @@ -201,9 +201,9 @@ void unity_zp_aut(unity_zp f, const unity_zp g, ulong x); void unity_zp_aut_inv(unity_zp f, const unity_zp g, ulong x); /* Jacobi sum computation. */ -mp_ptr aprcl_f_table(const ulong q); +nn_ptr aprcl_f_table(const ulong q); -void _unity_zp_jacobi_sum_pq_general(unity_zp f, const mp_ptr table, ulong p, ulong q, ulong k, ulong a, ulong b); +void _unity_zp_jacobi_sum_pq_general(unity_zp f, const nn_ptr table, ulong p, ulong q, ulong k, ulong a, ulong b); void unity_zp_jacobi_sum_pq(unity_zp f, ulong q, ulong p); void unity_zp_jacobi_sum_2q_one(unity_zp f, ulong q); diff --git a/src/aprcl/f_table.c b/src/aprcl/f_table.c index ce2483bac8..587029892a 100644 --- a/src/aprcl/f_table.c +++ b/src/aprcl/f_table.c @@ -20,12 +20,12 @@ f_table[x - 1] = f(x). */ -mp_ptr +nn_ptr aprcl_f_table(const ulong q) { int i; ulong g, g_pow, qinv; - mp_ptr g_table, f_table; + nn_ptr g_table, f_table; g = n_primitive_root_prime(q); g_table = _nmod_vec_init(q); diff --git a/src/aprcl/test/main.c b/src/aprcl/test/main.c index 0d25014a43..04e1dba5c5 100644 --- a/src/aprcl/test/main.c +++ b/src/aprcl/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-config_gauss.c" diff --git a/src/aprcl/test/t-f_table.c b/src/aprcl/test/t-f_table.c index e74168e0f9..ae5a5c314f 100644 --- a/src/aprcl/test/t-f_table.c +++ b/src/aprcl/test/t-f_table.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(aprcl_f_table, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { ulong len, q, p, g; - mp_ptr table; + nn_ptr table; len = n_randint(state, 16); while (len < 2) diff --git a/src/aprcl/test/t-unity_zp_jacobi_sum.c b/src/aprcl/test/t-unity_zp_jacobi_sum.c index fee9cf9529..2c4ffcde26 100644 --- a/src/aprcl/test/t-unity_zp_jacobi_sum.c +++ b/src/aprcl/test/t-unity_zp_jacobi_sum.c @@ -28,7 +28,7 @@ TEST_FUNCTION_START(aprcl_unity_zp_jacobi_sum, state) fmpz_t n; unity_zp f, g; n_factor_t q_factors; - mp_ptr table; + nn_ptr table; n_factor_init(&q_factors); @@ -82,7 +82,7 @@ TEST_FUNCTION_START(aprcl_unity_zp_jacobi_sum, state) fmpz_t n; unity_zp f, g; n_factor_t q_factors; - mp_ptr table; + nn_ptr table; n_factor_init(&q_factors); @@ -135,7 +135,7 @@ TEST_FUNCTION_START(aprcl_unity_zp_jacobi_sum, state) fmpz_t n; unity_zp f, g; n_factor_t q_factors; - mp_ptr table; + nn_ptr table; n_factor_init(&q_factors); diff --git a/src/aprcl/unity_zp_jacobi_sum.c b/src/aprcl/unity_zp_jacobi_sum.c index 6fb166af86..c15cacfbaa 100644 --- a/src/aprcl/unity_zp_jacobi_sum.c +++ b/src/aprcl/unity_zp_jacobi_sum.c @@ -17,7 +17,7 @@ Computes sum \zeta_{p^k}^{a * x + b * f(x)} for x = 1, 2, ..., q - 2. */ void -_unity_zp_jacobi_sum_pq_general(unity_zp f, const mp_ptr table, +_unity_zp_jacobi_sum_pq_general(unity_zp f, const nn_ptr table, ulong p, ulong q, ulong k, ulong a, ulong b) { int i, j; @@ -57,7 +57,7 @@ void unity_zp_jacobi_sum_pq(unity_zp f, ulong q, ulong p) { ulong k; - mp_ptr table; + nn_ptr table; table = aprcl_f_table(q); k = aprcl_p_power_in_q(q - 1, p); @@ -74,7 +74,7 @@ void unity_zp_jacobi_sum_2q_one(unity_zp f, ulong q) { ulong k; - mp_ptr table; + nn_ptr table; table = aprcl_f_table(q); k = aprcl_p_power_in_q(q - 1, 2); @@ -92,7 +92,7 @@ void unity_zp_jacobi_sum_2q_two(unity_zp f, ulong q) { ulong a, b, k; - mp_ptr table; + nn_ptr table; table = aprcl_f_table(q); k = aprcl_p_power_in_q(q - 1, 2); diff --git a/src/arb.h b/src/arb.h index 2e7a23ab7d..4842b0cba5 100644 --- a/src/arb.h +++ b/src/arb.h @@ -885,17 +885,17 @@ _arb_vec_get_unique_fmpz_vec(fmpz * res, arb_srcptr vec, slong len) #define ARB_ATAN_TAB2_PREC 4608 #define ARB_ATAN_TAB2_LIMBS (ARB_ATAN_TAB2_PREC / FLINT_BITS) -FLINT_DLL extern const mp_limb_t arb_atan_tab1[1 << ARB_ATAN_TAB1_BITS][ARB_ATAN_TAB1_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_atan_tab21[1 << ARB_ATAN_TAB21_BITS][ARB_ATAN_TAB2_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_atan_tab22[1 << ARB_ATAN_TAB22_BITS][ARB_ATAN_TAB2_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_atan_pi2_minus_one[ARB_ATAN_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_atan_tab1[1 << ARB_ATAN_TAB1_BITS][ARB_ATAN_TAB1_LIMBS]; +FLINT_DLL extern const ulong arb_atan_tab21[1 << ARB_ATAN_TAB21_BITS][ARB_ATAN_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_atan_tab22[1 << ARB_ATAN_TAB22_BITS][ARB_ATAN_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_atan_pi2_minus_one[ARB_ATAN_TAB2_LIMBS]; void -_arb_atan_taylor_naive(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N, int alternating); +_arb_atan_taylor_naive(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N, int alternating); -void _arb_atan_taylor_rs(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N, int alternating); +void _arb_atan_taylor_rs(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N, int alternating); #define ARB_ATAN_NEWTON_PREC 3400 @@ -914,11 +914,11 @@ void arb_atan_arf_newton(arb_t res, const arf_t x, slong prec); #define ARB_LOG_TAB2_PREC 4608 #define ARB_LOG_TAB2_LIMBS (ARB_LOG_TAB2_PREC / FLINT_BITS) -FLINT_DLL extern const mp_limb_t arb_log_tab11[1 << ARB_LOG_TAB11_BITS][ARB_LOG_TAB1_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_log_tab12[1 << ARB_LOG_TAB12_BITS][ARB_LOG_TAB1_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_log_tab21[1 << ARB_LOG_TAB21_BITS][ARB_LOG_TAB2_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_log_tab22[1 << ARB_LOG_TAB22_BITS][ARB_LOG_TAB2_LIMBS]; -FLINT_DLL extern const mp_srcptr arb_log_log2_tab; +FLINT_DLL extern const ulong arb_log_tab11[1 << ARB_LOG_TAB11_BITS][ARB_LOG_TAB1_LIMBS]; +FLINT_DLL extern const ulong arb_log_tab12[1 << ARB_LOG_TAB12_BITS][ARB_LOG_TAB1_LIMBS]; +FLINT_DLL extern const ulong arb_log_tab21[1 << ARB_LOG_TAB21_BITS][ARB_LOG_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_log_tab22[1 << ARB_LOG_TAB22_BITS][ARB_LOG_TAB2_LIMBS]; +FLINT_DLL extern const nn_srcptr arb_log_log2_tab; void arb_log_newton(arb_t res, const arb_t x, slong prec); void arb_log_arf_newton(arb_t res, const arf_t x, slong prec); @@ -927,7 +927,7 @@ void arb_log_arf_newton(arb_t res, const arf_t x, slong prec); #define ARB_LOG_PRIME_CACHE_NUM 13 -FLINT_DLL extern const mp_limb_t arb_log_p_tab[ARB_LOG_PRIME_CACHE_NUM][ARB_LOG_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_log_p_tab[ARB_LOG_PRIME_CACHE_NUM][ARB_LOG_TAB2_LIMBS]; void arb_log_primes_vec_bsplit(arb_ptr res, slong n, slong prec); void _arb_log_p_ensure_cached(slong prec); @@ -949,21 +949,21 @@ arb_srcptr _arb_log_p_cache_vec(void); #define ARB_EXP_TAB2_PREC 4608 #define ARB_EXP_TAB2_LIMBS (ARB_EXP_TAB2_PREC / FLINT_BITS) -FLINT_DLL extern const mp_limb_t arb_exp_tab1[ARB_EXP_TAB1_NUM][ARB_EXP_TAB1_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_exp_tab21[ARB_EXP_TAB21_NUM][ARB_EXP_TAB2_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_exp_tab22[ARB_EXP_TAB22_NUM][ARB_EXP_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_exp_tab1[ARB_EXP_TAB1_NUM][ARB_EXP_TAB1_LIMBS]; +FLINT_DLL extern const ulong arb_exp_tab21[ARB_EXP_TAB21_NUM][ARB_EXP_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_exp_tab22[ARB_EXP_TAB22_NUM][ARB_EXP_TAB2_LIMBS]; -void _arb_exp_taylor_naive(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N); +void _arb_exp_taylor_naive(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N); -void _arb_exp_taylor_rs(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N); +void _arb_exp_taylor_rs(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N); void arb_exp_arf_bb(arb_t z, const arf_t x, slong prec, int minus_one); void arb_exp_arf_rs_generic(arb_t res, const arf_t x, slong prec, int minus_one); -int _arb_get_mpn_fixed_mod_log2(mp_ptr w, fmpz_t q, mp_limb_t * error, - const arf_t x, mp_size_t wn); +int _arb_get_mpn_fixed_mod_log2(nn_ptr w, fmpz_t q, ulong * error, + const arf_t x, slong wn); slong _arb_exp_taylor_bound(slong mag, slong prec); @@ -997,22 +997,22 @@ void arb_exp_arf(arb_t z, const arf_t x, slong prec, int minus_one, slong maglim #define ARB_SIN_COS_TAB2_PREC 4608 #define ARB_SIN_COS_TAB2_LIMBS (ARB_SIN_COS_TAB2_PREC / FLINT_BITS) -FLINT_DLL extern const mp_limb_t arb_sin_cos_tab1[2 * ARB_SIN_COS_TAB1_NUM][ARB_SIN_COS_TAB1_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_sin_cos_tab21[2 * ARB_SIN_COS_TAB21_NUM][ARB_SIN_COS_TAB2_LIMBS]; -FLINT_DLL extern const mp_limb_t arb_sin_cos_tab22[2 * ARB_SIN_COS_TAB22_NUM][ARB_SIN_COS_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_sin_cos_tab1[2 * ARB_SIN_COS_TAB1_NUM][ARB_SIN_COS_TAB1_LIMBS]; +FLINT_DLL extern const ulong arb_sin_cos_tab21[2 * ARB_SIN_COS_TAB21_NUM][ARB_SIN_COS_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_sin_cos_tab22[2 * ARB_SIN_COS_TAB22_NUM][ARB_SIN_COS_TAB2_LIMBS]; #define ARB_PI4_TAB_LIMBS (4608 / FLINT_BITS) -FLINT_DLL extern const mp_limb_t arb_pi4_tab[ARB_PI4_TAB_LIMBS]; +FLINT_DLL extern const ulong arb_pi4_tab[ARB_PI4_TAB_LIMBS]; -void _arb_sin_cos_taylor_naive(mp_ptr ysin, mp_ptr ycos, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N); +void _arb_sin_cos_taylor_naive(nn_ptr ysin, nn_ptr ycos, ulong * error, + nn_srcptr x, slong xn, ulong N); -void _arb_sin_cos_taylor_rs(mp_ptr ysin, mp_ptr ycos, - mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N, +void _arb_sin_cos_taylor_rs(nn_ptr ysin, nn_ptr ycos, + ulong * error, nn_srcptr x, slong xn, ulong N, int sinonly, int alternating); -int _arb_get_mpn_fixed_mod_pi4(mp_ptr w, fmpz_t q, int * octant, - mp_limb_t * error, const arf_t x, mp_size_t wn); +int _arb_get_mpn_fixed_mod_pi4(nn_ptr w, fmpz_t q, int * octant, + ulong * error, const arf_t x, slong wn); void arb_sin_cos_arf_bb(arb_t zsin, arb_t zcos, const arf_t x, slong prec); void arb_sin_cos_arf_rs_generic(arb_t res_sin, arb_t res_cos, const arf_t x, slong prec); @@ -1031,7 +1031,7 @@ void arb_atan_gauss_primes_vec_bsplit(arb_ptr res, slong n, slong prec); #define ARB_SIN_COS_ATAN_REDUCTION_DEFAULT_MAX_PREC 4000000 #define ARB_SIN_COS_ATAN_REDUCTION_PREC 2600 -FLINT_DLL extern const mp_limb_t arb_atan_gauss_tab[ARB_ATAN_GAUSS_PRIME_CACHE_NUM][ARB_ATAN_TAB2_LIMBS]; +FLINT_DLL extern const ulong arb_atan_gauss_tab[ARB_ATAN_GAUSS_PRIME_CACHE_NUM][ARB_ATAN_TAB2_LIMBS]; void _arb_atan_gauss_p_ensure_cached(slong prec); arb_srcptr _arb_atan_gauss_p_cache_vec(void); @@ -1040,10 +1040,10 @@ void arb_sin_cos_arf_atan_reduction(arb_t res1, arb_t res2, const arf_t x, slong ARB_INLINE flint_bitcnt_t -_arb_mpn_leading_zeros(mp_srcptr d, mp_size_t n) +_arb_mpn_leading_zeros(nn_srcptr d, slong n) { - mp_limb_t t; - mp_size_t zero_limbs; + ulong t; + slong zero_limbs; flint_bitcnt_t bits; zero_limbs = 0; @@ -1102,7 +1102,7 @@ _arb_vec_estimate_allocated_bytes(slong len, slong prec) size = len * (double) sizeof(arb_struct); if (prec > ARF_NOPTR_LIMBS * FLINT_BITS) - size += len * (double) ((prec + FLINT_BITS - 1) / FLINT_BITS) * sizeof(mp_limb_t); + size += len * (double) ((prec + FLINT_BITS - 1) / FLINT_BITS) * sizeof(ulong); return size; } diff --git a/src/arb/approx_dot.c b/src/arb/approx_dot.c index b65c288d7b..f9fd8e2aca 100644 --- a/src/arb/approx_dot.c +++ b/src/arb/approx_dot.c @@ -12,18 +12,18 @@ #include "arb.h" #include "mpn_extras.h" -/* We need uint64_t instead of mp_limb_t on 32-bit systems for +/* We need uint64_t instead of ulong on 32-bit systems for safe summation of 30-bit error bounds. */ #include void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift); void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift); void @@ -67,12 +67,12 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong slong i, j, nonzero, padding, extend; slong xexp, yexp, exp, max_exp, min_exp, sum_exp; int xnegative, ynegative; - mp_size_t xn, yn, sn, alloc; + slong xn, yn, sn, alloc; flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; - mp_limb_t serr; /* Sum over arithmetic errors - not used, but need dummy for calls */ - mp_ptr tmp, sum; /* Workspace */ + ulong serr; /* Sum over arithmetic errors - not used, but need dummy for calls */ + nn_ptr tmp, sum; /* Workspace */ ARF_ADD_TMP_DECL; /* todo: fast fma and fmma (len=2) code */ @@ -215,7 +215,7 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong if (!arf_is_special(xm)) { - mp_srcptr xptr; + nn_srcptr xptr; xexp = ARF_EXP(xm); xn = ARF_SIZE(xm); @@ -259,7 +259,7 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong #if 0 else if (xn == 1 && yn == 1 && sn == 2 && shift < FLINT_BITS) /* Fastest path. */ { - mp_limb_t hi, lo, x0, y0; + ulong hi, lo, x0, y0; x0 = ARF_NOPTR_D(xm)[0]; y0 = ARF_NOPTR_D(ym)[0]; @@ -276,8 +276,8 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong } else if (xn == 2 && yn == 2 && shift < FLINT_BITS && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; x0 = ARF_NOPTR_D(xm)[0]; x1 = ARF_NOPTR_D(xm)[1]; @@ -308,8 +308,8 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong #endif else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -407,7 +407,7 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); @@ -427,7 +427,7 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; diff --git a/src/arb/atan_arf.c b/src/arb/atan_arf.c index 4b18b95074..756a08a9ef 100644 --- a/src/arb/atan_arf.c +++ b/src/arb/atan_arf.c @@ -12,7 +12,7 @@ #include "arb.h" #include "mpn_extras.h" -#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(ulong)) /* atan(x) = x + eps, |eps| < x^3*/ void @@ -88,10 +88,10 @@ arb_atan_arf(arb_t z, const arf_t x, slong prec) else { slong exp, wp, wn, N, r; - mp_srcptr xp; - mp_size_t xn, tn; - mp_ptr tmp, w, t, u; - mp_limb_t p1, q1bits, p2, q2bits, error, error2; + nn_srcptr xp; + slong xn, tn; + nn_ptr tmp, w, t, u; + ulong p1, q1bits, p2, q2bits, error, error2; int negative, inexact, reciprocal; TMP_INIT; @@ -162,7 +162,7 @@ arb_atan_arf(arb_t z, const arf_t x, slong prec) else /* |x| > 1 */ { slong one_exp, one_limbs, one_bits; - mp_ptr one; + nn_ptr one; reciprocal = 1; diff --git a/src/arb/atan_tab.c b/src/arb/atan_tab.c index df986b4425..6bb0804a8c 100644 --- a/src/arb/atan_tab.c +++ b/src/arb/atan_tab.c @@ -21,7 +21,7 @@ /* gaussian primes */ /* todo: first entry duplicates the pi table */ -const mp_limb_t arb_atan_gauss_tab[ARB_ATAN_GAUSS_PRIME_CACHE_NUM][ARB_ATAN_TAB2_LIMBS] = +const ulong arb_atan_gauss_tab[ARB_ATAN_GAUSS_PRIME_CACHE_NUM][ARB_ATAN_TAB2_LIMBS] = {{ Z8(e6cc254b,db7f1447,ced4bb1b,44ce6cba,cf9b14ed,da3edbeb,865a8918,179727b0) Z8(9027d831,b06a53ed,413001ae,e5db382f,ad9e530e,f8ff9406,3dba37bd,c9751e76) @@ -271,7 +271,7 @@ const mp_limb_t arb_atan_gauss_tab[ARB_ATAN_GAUSS_PRIME_CACHE_NUM][ARB_ATAN_TAB2 Z8(25865183,7356c7b3,0de9bc82,58ccd7e1,b5572bd1,55f866a5,b0029c17,bc4de960) }}; -const mp_limb_t arb_atan_pi2_minus_one[ARB_ATAN_TAB2_LIMBS] = { +const ulong arb_atan_pi2_minus_one[ARB_ATAN_TAB2_LIMBS] = { Z8(cd984a96,b6fe288f,9da97637,899cd975,9f3629da,b47db7d7,0cb51231,2f2e4f61) Z8(204fb062,60d4a7db,8260035d,cbb6705e,5b3ca61d,f1ff280d,7b746f7b,92ea3cec) Z8(c04c8dbd,83a9b964,a4f8e04d,6d87f569,68050924,9be86b92,214d811e,0dff6fb9) @@ -292,7 +292,7 @@ const mp_limb_t arb_atan_pi2_minus_one[ARB_ATAN_TAB2_LIMBS] = { Z8(76273644,04177d4c,14cf98e8,52049c11,01b839a2,898cc517,42d18469,921fb544) }; -const mp_limb_t arb_atan_tab1[1 << ARB_ATAN_TAB1_BITS][ARB_ATAN_TAB1_LIMBS] = +const ulong arb_atan_tab1[1 << ARB_ATAN_TAB1_BITS][ARB_ATAN_TAB1_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -1063,7 +1063,7 @@ const mp_limb_t arb_atan_tab1[1 << ARB_ATAN_TAB1_BITS][ARB_ATAN_TAB1_LIMBS] = Z8(7ea0180b,d9884aae,e7b9333e,167b4c95,a90d530c,745076df,cc19d89d,c88f9a8c) }}; -const mp_limb_t arb_atan_tab21[1 << ARB_ATAN_TAB21_BITS][ARB_ATAN_TAB2_LIMBS] = +const ulong arb_atan_tab21[1 << ARB_ATAN_TAB21_BITS][ARB_ATAN_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -1674,7 +1674,7 @@ const mp_limb_t arb_atan_tab21[1 << ARB_ATAN_TAB21_BITS][ARB_ATAN_TAB2_LIMBS] = Z8(4f97d1fa,fe669d99,0039834e,4da621b6,bc9e0221,8cb43d10,bf8fbd54,c4ffaffa) }}; -const mp_limb_t arb_atan_tab22[1 << ARB_ATAN_TAB22_BITS][ARB_ATAN_TAB2_LIMBS] = +const ulong arb_atan_tab22[1 << ARB_ATAN_TAB22_BITS][ARB_ATAN_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) diff --git a/src/arb/atan_taylor_naive.c b/src/arb/atan_taylor_naive.c index 5f094930de..181b835ef4 100644 --- a/src/arb/atan_taylor_naive.c +++ b/src/arb/atan_taylor_naive.c @@ -13,12 +13,12 @@ #include "mpn_extras.h" void -_arb_atan_taylor_naive(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N, int alternating) +_arb_atan_taylor_naive(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N, int alternating) { ulong k; - mp_ptr s, t, x1, x2, u; - mp_size_t nn = xn + 1; + nn_ptr s, t, x1, x2, u; + slong nn = xn + 1; if (N == 0) { @@ -33,11 +33,11 @@ _arb_atan_taylor_naive(mp_ptr y, mp_limb_t * error, error[0] = 0; } - s = flint_malloc(sizeof(mp_limb_t) * nn); - t = flint_malloc(sizeof(mp_limb_t) * nn); - u = flint_malloc(sizeof(mp_limb_t) * 2 * nn); - x1 = flint_malloc(sizeof(mp_limb_t) * nn); - x2 = flint_malloc(sizeof(mp_limb_t) * nn); + s = flint_malloc(sizeof(ulong) * nn); + t = flint_malloc(sizeof(ulong) * nn); + u = flint_malloc(sizeof(ulong) * 2 * nn); + x1 = flint_malloc(sizeof(ulong) * nn); + x2 = flint_malloc(sizeof(ulong) * nn); flint_mpn_zero(s, nn); flint_mpn_zero(t, nn); diff --git a/src/arb/atan_taylor_rs.c b/src/arb/atan_taylor_rs.c index 17add2eabd..9e589a770e 100644 --- a/src/arb/atan_taylor_rs.c +++ b/src/arb/atan_taylor_rs.c @@ -15,13 +15,13 @@ /* See verify_taylor.py for code to generate tables and proof of correctness */ -#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(ulong)) #define ODD_RECIPROCAL_TAB_SIZE 256 #if FLINT_BITS == 64 -const mp_limb_t odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { +const ulong odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(13835020108241056725), UWORD(4611673369413685575), UWORD(2767004021648211345), UWORD(1976431444034436675), UWORD(1537224456471228525), UWORD(1257729100749186975), @@ -152,7 +152,7 @@ const mp_limb_t odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(6291002587483845), UWORD(6266380268159055), }; -const mp_limb_t odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { +const ulong odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(13835020108241056725), UWORD(13835020108241056725), UWORD(13835020108241056725), UWORD(13835020108241056725), UWORD(13835020108241056725), UWORD(13835020108241056725), @@ -285,7 +285,7 @@ const mp_limb_t odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { #else -const mp_limb_t odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { +const ulong odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(1673196525), UWORD(557732175), UWORD(334639305), UWORD(239028075), UWORD(185910725), UWORD(152108775), @@ -416,7 +416,7 @@ const mp_limb_t odd_reciprocal_tab_numer[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(262143), UWORD(261117), }; -const mp_limb_t odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { +const ulong odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { UWORD(1673196525), UWORD(1673196525), UWORD(1673196525), UWORD(1673196525), UWORD(1673196525), UWORD(1673196525), @@ -549,11 +549,11 @@ const mp_limb_t odd_reciprocal_tab_denom[ODD_RECIPROCAL_TAB_SIZE] = { #endif -void _arb_atan_taylor_rs(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N, int alternating) +void _arb_atan_taylor_rs(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N, int alternating) { - mp_ptr s, t, xpow; - mp_limb_t new_denom, old_denom, c; + nn_ptr s, t, xpow; + ulong new_denom, old_denom, c; slong power, k, m; TMP_INIT; diff --git a/src/arb/can_round_mpfr.c b/src/arb/can_round_mpfr.c index c09cc2ddb4..f960e22c43 100644 --- a/src/arb/can_round_mpfr.c +++ b/src/arb/can_round_mpfr.c @@ -12,7 +12,7 @@ #include #include "arb.h" -int mpfr_round_p(mp_srcptr, mp_size_t, mpfr_exp_t, mpfr_prec_t); +int mpfr_round_p(nn_srcptr, slong, mpfr_exp_t, mpfr_prec_t); int arb_can_round_arf(const arb_t x, slong prec, arf_rnd_t rnd) @@ -38,8 +38,8 @@ arb_can_round_mpfr(const arb_t x, slong prec, mpfr_rnd_t rnd) else { slong e, bits; - mp_size_t n; - mp_srcptr d; + slong n; + nn_srcptr d; e = _fmpz_sub_small(ARF_EXPREF(arb_midref(x)), MAG_EXPREF(arb_radref(x))); diff --git a/src/arb/const_euler.c b/src/arb/const_euler.c index 9f35a5af19..cc3c7f7540 100644 --- a/src/arb/const_euler.c +++ b/src/arb/const_euler.c @@ -463,7 +463,7 @@ arb_const_euler_eval(arb_t res, slong prec) ARB_DEF_CACHED_CONSTANT(arb_const_euler_brent_mcmillan, arb_const_euler_eval) -FLINT_DLL extern const mp_limb_t arb_hypgeom_gamma_tab_limbs[]; +FLINT_DLL extern const ulong arb_hypgeom_gamma_tab_limbs[]; void arb_const_euler(arb_t res, slong prec) @@ -471,7 +471,7 @@ arb_const_euler(arb_t res, slong prec) if (prec < ARB_HYPGEOM_GAMMA_TAB_PREC - 16) { slong exp; - mp_size_t n; + slong n; n = ARB_HYPGEOM_GAMMA_TAB_PREC / FLINT_BITS; diff --git a/src/arb/dot.c b/src/arb/dot.c index 81a646c47e..760ba15368 100644 --- a/src/arb/dot.c +++ b/src/arb/dot.c @@ -12,12 +12,12 @@ #include "arb.h" #include "mpn_extras.h" -/* We need uint64_t instead of mp_limb_t on 32-bit systems for +/* We need uint64_t instead of ulong on 32-bit systems for safe summation of 30-bit error bounds. */ #include -void mpfr_mulhigh_n(mp_ptr rp, mp_srcptr np, mp_srcptr mp, mp_size_t n); -void mpfr_sqrhigh_n(mp_ptr rp, mp_srcptr np, mp_size_t n); +void mpfr_mulhigh_n(nn_ptr rp, nn_srcptr np, nn_srcptr mp, slong n); +void mpfr_sqrhigh_n(nn_ptr rp, nn_srcptr np, slong n); /* Add ((a * b) / 2^MAG_BITS) * 2^exp into srad*2^srad_exp. Assumes that srad_exp >= exp and that overflow cannot occur. */ @@ -49,7 +49,7 @@ mag_set_ui_2exp_small(mag_t z, ulong x, slong e) else { slong bits; - mp_limb_t overflow; + ulong overflow; bits = flint_clz(x); bits = FLINT_BITS - bits; @@ -139,9 +139,9 @@ add_errors(mag_t rad, uint64_t Aerr, slong Aexp, uint64_t Berr, slong Bexp, uint } static void -mulhigh(mp_ptr res, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, mp_size_t nn) +mulhigh(nn_ptr res, nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, slong nn) { - mp_ptr tmp, xxx, yyy; + nn_ptr tmp, xxx, yyy; slong k; ARF_MUL_TMP_DECL; @@ -166,14 +166,14 @@ mulhigh(mp_ptr res, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, } void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift) { slong shift_bits, shift_limbs, term_prec; - mp_limb_t cy; - mp_ptr sstart, tstart; - mp_size_t tn, nn; + ulong cy; + nn_ptr sstart, tstart; + slong tn, nn; shift_bits = shift % FLINT_BITS; shift_limbs = shift / FLINT_BITS; @@ -291,14 +291,14 @@ _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, } void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift) { slong shift_bits, shift_limbs, term_prec; - mp_limb_t cy, err; - mp_ptr sstart, tstart; - mp_size_t tn, nn; + ulong cy, err; + nn_ptr sstart, tstart; + slong tn, nn; shift_bits = shift % FLINT_BITS; shift_limbs = shift / FLINT_BITS; @@ -384,16 +384,16 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, slong xexp, yexp, exp, max_exp, min_exp, sum_exp; slong xrexp, yrexp, srad_exp, max_rad_exp; int xnegative, ynegative, inexact; - mp_size_t xn, yn, sn, alloc; + slong xn, yn, sn, alloc; flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mag_srcptr xr, yr; - mp_limb_t xtop, ytop; - mp_limb_t xrad, yrad; - mp_limb_t serr; /* Sum over arithmetic errors */ + ulong xtop, ytop; + ulong xrad, yrad; + ulong serr; /* Sum over arithmetic errors */ uint64_t srad; /* Sum over propagated errors */ - mp_ptr tmp, sum; /* Workspace */ + nn_ptr tmp, sum; /* Workspace */ ARF_ADD_TMP_DECL; /* todo: fast fma and fmma (len=2) code */ @@ -614,7 +614,7 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, if (!arf_is_special(xm)) { - mp_srcptr xptr; + nn_srcptr xptr; xexp = ARF_EXP(xm); xn = ARF_SIZE(xm); @@ -679,7 +679,7 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, #if 0 else if (xn == 1 && yn == 1 && sn == 2 && shift < FLINT_BITS) /* Fastest path. */ { - mp_limb_t hi, lo, out; + ulong hi, lo, out; xtop = ARF_NOPTR_D(xm)[0]; ytop = ARF_NOPTR_D(ym)[0]; @@ -698,8 +698,8 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, } else if (xn == 2 && yn == 2 && shift < FLINT_BITS && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; x0 = ARF_NOPTR_D(xm)[0]; x1 = ARF_NOPTR_D(xm)[1]; @@ -736,8 +736,8 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, #endif else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -849,7 +849,7 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); @@ -924,7 +924,7 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; diff --git a/src/arb/dot_fmpz.c b/src/arb/dot_fmpz.c index e099a0423a..3813c95fe0 100644 --- a/src/arb/dot_fmpz.c +++ b/src/arb/dot_fmpz.c @@ -16,7 +16,7 @@ arb_dot_fmpz(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong x { arb_ptr t; slong i, ssize, size, tmp_size; - mp_ptr ztmp; + nn_ptr ztmp; fmpz v; ulong av, al; unsigned int bc; @@ -119,7 +119,7 @@ arb_dot_fmpz(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong x if (tmp_size != 0) { - ztmp = TMP_ALLOC(sizeof(mp_limb_t) * tmp_size); + ztmp = TMP_ALLOC(sizeof(ulong) * tmp_size); for (i = 0; i < len; i++) { diff --git a/src/arb/euler_number_ui.c b/src/arb/euler_number_ui.c index 2c36f89249..a98779355a 100644 --- a/src/arb/euler_number_ui.c +++ b/src/arb/euler_number_ui.c @@ -428,8 +428,8 @@ typedef struct { ulong n; const unsigned int * divtab; - mp_ptr primes; - mp_ptr residues; + nn_ptr primes; + nn_ptr residues; } mod_p_param_t; @@ -446,7 +446,7 @@ mod_p_worker(slong i, void * param) /* todo: optimize basecase and move to flint */ void -_arb_tree_crt(fmpz_t r, fmpz_t m, mp_srcptr residues, mp_srcptr primes, slong len); +_arb_tree_crt(fmpz_t r, fmpz_t m, nn_srcptr residues, nn_srcptr primes, slong len); void arb_fmpz_euler_number_ui_multi_mod(fmpz_t num, ulong n, double alpha) @@ -454,7 +454,7 @@ arb_fmpz_euler_number_ui_multi_mod(fmpz_t num, ulong n, double alpha) n_primes_t prime_iter; slong i, bits, mod_bits, zeta_bits, num_primes; ulong p; - mp_ptr primes, residues; + nn_ptr primes, residues; mag_t primes_product; unsigned int * divtab_odd; fmpz_t M; @@ -519,8 +519,8 @@ arb_fmpz_euler_number_ui_multi_mod(fmpz_t num, ulong n, double alpha) printf("\nn = %lu, bits = %lu, num_primes = %ld\n", n, bits, num_primes); #endif - primes = flint_malloc(sizeof(mp_limb_t) * num_primes); - residues = flint_malloc(sizeof(mp_limb_t) * num_primes); + primes = flint_malloc(sizeof(ulong) * num_primes); + residues = flint_malloc(sizeof(ulong) * num_primes); p = 5; n_primes_jump_after(prime_iter, 5); diff --git a/src/arb/exp_arf.c b/src/arb/exp_arf.c index c8387ca593..dd28309c11 100644 --- a/src/arb/exp_arf.c +++ b/src/arb/exp_arf.c @@ -13,7 +13,7 @@ #include "thread_support.h" #include "mpn_extras.h" -#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(ulong)) void arb_exp_arf_huge(arb_t z, const arf_t x, slong mag, slong prec, int minus_one) @@ -180,8 +180,8 @@ arb_exp_arf(arb_t z, const arf_t x, slong prec, int minus_one, slong maglim) { slong exp, wp, wn, N, r, wprounded, finaln; fmpz_t n; - mp_ptr tmp, w, t, u, finalvalue; - mp_limb_t p1, q1bits, p2, q2bits, error, error2; + nn_ptr tmp, w, t, u, finalvalue; + ulong p1, q1bits, p2, q2bits, error, error2; int negative, inexact; TMP_INIT; diff --git a/src/arb/exp_arf_rs_generic.c b/src/arb/exp_arf_rs_generic.c index 9e1a41a15e..44556f6fe7 100644 --- a/src/arb/exp_arf_rs_generic.c +++ b/src/arb/exp_arf_rs_generic.c @@ -44,7 +44,7 @@ arb_exp_taylor_sum_rs_generic(arb_t res, const arb_t x, slong N, slong prec) { arb_ptr tpow; slong j, k, m, M, tp, xmag; - mp_limb_t c, d, chi, clo; + ulong c, d, chi, clo; xmag = arf_abs_bound_lt_2exp_si(arb_midref(x)); diff --git a/src/arb/exp_tab.c b/src/arb/exp_tab.c index 66ffc53d68..15cb01506e 100644 --- a/src/arb/exp_tab.c +++ b/src/arb/exp_tab.c @@ -19,7 +19,7 @@ #define Z8(a,b,c,d,e,f,g,h) Z2(a,b), Z2(c,d), Z2(e,f), Z2(g,h), -const mp_limb_t arb_exp_tab1[ARB_EXP_TAB1_NUM][ARB_EXP_TAB1_LIMBS] = +const ulong arb_exp_tab1[ARB_EXP_TAB1_NUM][ARB_EXP_TAB1_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,80000000) @@ -556,7 +556,7 @@ const mp_limb_t arb_exp_tab1[ARB_EXP_TAB1_NUM][ARB_EXP_TAB1_LIMBS] = Z8(408864d5,d5780f49,ebee8587,e45235ee,ce1767a0,47c599e3,152a029f,ff8e0171) }}; -const mp_limb_t arb_exp_tab21[ARB_EXP_TAB21_NUM][ARB_EXP_TAB2_LIMBS] = +const ulong arb_exp_tab21[ARB_EXP_TAB21_NUM][ARB_EXP_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -996,7 +996,7 @@ const mp_limb_t arb_exp_tab21[ARB_EXP_TAB21_NUM][ARB_EXP_TAB2_LIMBS] = Z8(4749b4fd,d2a0a1dd,ad8263b1,12708a7a,32cb9121,61db6846,17c644e9,fe8ef30c) }}; -const mp_limb_t arb_exp_tab22[ARB_EXP_TAB22_NUM][ARB_EXP_TAB2_LIMBS] = +const ulong arb_exp_tab22[ARB_EXP_TAB22_NUM][ARB_EXP_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) diff --git a/src/arb/exp_taylor_naive.c b/src/arb/exp_taylor_naive.c index 96f7149dc9..a6de99d733 100644 --- a/src/arb/exp_taylor_naive.c +++ b/src/arb/exp_taylor_naive.c @@ -13,12 +13,12 @@ #include "mpn_extras.h" void -_arb_exp_taylor_naive(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N) +_arb_exp_taylor_naive(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N) { ulong k; - mp_ptr s, t, u, v; - mp_size_t nn = xn + 1; + nn_ptr s, t, u, v; + slong nn = xn + 1; if (N == 0) { @@ -27,10 +27,10 @@ _arb_exp_taylor_naive(mp_ptr y, mp_limb_t * error, return; } - s = flint_malloc(sizeof(mp_limb_t) * (nn + 1)); - t = flint_malloc(sizeof(mp_limb_t) * nn); - v = flint_malloc(sizeof(mp_limb_t) * nn); - u = flint_malloc(sizeof(mp_limb_t) * 2 * nn); + s = flint_malloc(sizeof(ulong) * (nn + 1)); + t = flint_malloc(sizeof(ulong) * nn); + v = flint_malloc(sizeof(ulong) * nn); + u = flint_malloc(sizeof(ulong) * 2 * nn); /* s = 1 */ flint_mpn_zero(s, nn); diff --git a/src/arb/exp_taylor_rs.c b/src/arb/exp_taylor_rs.c index 7928ee85aa..2dbc7f58f8 100644 --- a/src/arb/exp_taylor_rs.c +++ b/src/arb/exp_taylor_rs.c @@ -15,13 +15,13 @@ /* See verify_taylor.py for code to generate tables and proof of correctness */ -#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(ulong)) #define FACTORIAL_TAB_SIZE 288 #if FLINT_BITS == 64 -const mp_limb_t factorial_tab_numer[FACTORIAL_TAB_SIZE] = { +const ulong factorial_tab_numer[FACTORIAL_TAB_SIZE] = { UWORD(2432902008176640000), UWORD(2432902008176640000), UWORD(1216451004088320000), @@ -312,7 +312,7 @@ const mp_limb_t factorial_tab_numer[FACTORIAL_TAB_SIZE] = { UWORD(1), }; -const mp_limb_t factorial_tab_denom[FACTORIAL_TAB_SIZE] = { +const ulong factorial_tab_denom[FACTORIAL_TAB_SIZE] = { UWORD(2432902008176640000), UWORD(2432902008176640000), UWORD(2432902008176640000), @@ -605,7 +605,7 @@ const mp_limb_t factorial_tab_denom[FACTORIAL_TAB_SIZE] = { #else -const mp_limb_t factorial_tab_numer[FACTORIAL_TAB_SIZE] = { +const ulong factorial_tab_numer[FACTORIAL_TAB_SIZE] = { UWORD(479001600), UWORD(479001600), UWORD(239500800), @@ -896,7 +896,7 @@ const mp_limb_t factorial_tab_numer[FACTORIAL_TAB_SIZE] = { UWORD(288), }; -const mp_limb_t factorial_tab_denom[FACTORIAL_TAB_SIZE] = { +const ulong factorial_tab_denom[FACTORIAL_TAB_SIZE] = { UWORD(479001600), UWORD(479001600), UWORD(479001600), @@ -1189,11 +1189,11 @@ const mp_limb_t factorial_tab_denom[FACTORIAL_TAB_SIZE] = { #endif -void _arb_exp_taylor_rs(mp_ptr y, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N) +void _arb_exp_taylor_rs(nn_ptr y, ulong * error, + nn_srcptr x, slong xn, ulong N) { - mp_ptr s, t, xpow; - mp_limb_t new_denom, old_denom, c; + nn_ptr s, t, xpow; + ulong new_denom, old_denom, c; slong power, k, m; TMP_INIT; diff --git a/src/arb/get_mag_lower.c b/src/arb/get_mag_lower.c index e840e86282..0fc8d9b49d 100644 --- a/src/arb/get_mag_lower.c +++ b/src/arb/get_mag_lower.c @@ -42,7 +42,7 @@ _arb_get_mag_lower(mag_t z, const arf_t mid, const mag_t rad) } else { - mp_limb_t m, xm, rm; + ulong m, xm, rm; ARF_GET_TOP_LIMB(xm, mid); xm = xm >> (FLINT_BITS - MAG_BITS); diff --git a/src/arb/get_mag_lower_nonnegative.c b/src/arb/get_mag_lower_nonnegative.c index 5a91097b47..446eafb82b 100644 --- a/src/arb/get_mag_lower_nonnegative.c +++ b/src/arb/get_mag_lower_nonnegative.c @@ -46,7 +46,7 @@ _arb_get_mag_lower_nonnegative(mag_t z, const arf_t mid, const mag_t rad) } else { - mp_limb_t m, xm, rm; + ulong m, xm, rm; ARF_GET_TOP_LIMB(xm, mid); xm = xm >> (FLINT_BITS - MAG_BITS); diff --git a/src/arb/get_mpn_fixed_mod_log2.c b/src/arb/get_mpn_fixed_mod_log2.c index 9320640662..4c37e69c6e 100644 --- a/src/arb/get_mpn_fixed_mod_log2.c +++ b/src/arb/get_mpn_fixed_mod_log2.c @@ -12,7 +12,7 @@ #include "mpn_extras.h" #include "arb.h" -#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(ulong)) /* Compute wn-limb fixed-point number w, a number of ulps error, and @@ -65,11 +65,11 @@ for a total of 3 ulp. int -_arb_get_mpn_fixed_mod_log2(mp_ptr w, fmpz_t q, mp_limb_t * error, - const arf_t x, mp_size_t wn) +_arb_get_mpn_fixed_mod_log2(nn_ptr w, fmpz_t q, ulong * error, + const arf_t x, slong wn) { - mp_srcptr xp; - mp_size_t xn; + nn_srcptr xp; + slong xn; int negative; slong exp; @@ -102,9 +102,9 @@ _arb_get_mpn_fixed_mod_log2(mp_ptr w, fmpz_t q, mp_limb_t * error, } else { - mp_ptr qp, rp, np; - mp_srcptr dp; - mp_size_t qn, rn, nn, dn, tn, alloc; + nn_ptr qp, rp, np; + nn_srcptr dp; + slong qn, rn, nn, dn, tn, alloc; TMP_INIT; tn = ((exp + 2) + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/arb/get_mpn_fixed_mod_pi4.c b/src/arb/get_mpn_fixed_mod_pi4.c index b5efbb61fa..6fb9ff8679 100644 --- a/src/arb/get_mpn_fixed_mod_pi4.c +++ b/src/arb/get_mpn_fixed_mod_pi4.c @@ -12,14 +12,14 @@ #include "mpn_extras.h" #include "arb.h" -#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(ulong)) int -_arb_get_mpn_fixed_mod_pi4(mp_ptr w, fmpz_t q, int * octant, - mp_limb_t * error, const arf_t x, mp_size_t wn) +_arb_get_mpn_fixed_mod_pi4(nn_ptr w, fmpz_t q, int * octant, + ulong * error, const arf_t x, slong wn) { - mp_srcptr xp; - mp_size_t xn; + nn_srcptr xp; + slong xn; slong exp; ARF_GET_MPN_READONLY(xp, xn, x); @@ -36,7 +36,7 @@ _arb_get_mpn_fixed_mod_pi4(mp_ptr w, fmpz_t q, int * octant, } else if (exp == 0) { - mp_srcptr dp; + nn_srcptr dp; if (wn > ARB_PI4_TAB_LIMBS) return 0; @@ -67,9 +67,9 @@ _arb_get_mpn_fixed_mod_pi4(mp_ptr w, fmpz_t q, int * octant, } else { - mp_ptr qp, rp, np; - mp_srcptr dp; - mp_size_t qn, rn, nn, dn, tn, alloc; + nn_ptr qp, rp, np; + nn_srcptr dp; + slong qn, rn, nn, dn, tn, alloc; TMP_INIT; tn = ((exp + 2) + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/arb/log_arf.c b/src/arb/log_arf.c index e163a5aeb3..e329e8d7f2 100644 --- a/src/arb/log_arf.c +++ b/src/arb/log_arf.c @@ -14,7 +14,7 @@ int _arb_log_ui_smooth(arb_t res, ulong n, slong prec); -#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(ulong)) #if 0 /* requires x != 1 */ @@ -22,18 +22,18 @@ static void arf_log_via_mpfr(arf_t z, const arf_t x, slong prec, arf_rnd_t rnd) { mpfr_t xf, zf; - mp_ptr zptr, tmp; - mp_srcptr xptr; - mp_size_t xn, zn, val; + nn_ptr zptr, tmp; + nn_srcptr xptr; + slong xn, zn, val; TMP_INIT; TMP_START; zn = (prec + FLINT_BITS - 1) / FLINT_BITS; - tmp = TMP_ALLOC(zn * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(zn * sizeof(ulong)); ARF_GET_MPN_READONLY(xptr, xn, x); - xf->_mpfr_d = (mp_ptr) xptr; + xf->_mpfr_d = (nn_ptr) xptr; xf->_mpfr_prec = xn * FLINT_BITS; xf->_mpfr_sign = ARF_SGNBIT(x) ? -1 : 1; xf->_mpfr_exp = ARF_EXP(x); @@ -127,10 +127,10 @@ arb_log_arf(arb_t z, const arf_t x, slong prec) else { slong exp, wp, wn, N, r, closeness_to_one; - mp_srcptr xp; - mp_size_t xn, tn; - mp_ptr tmp, w, t, u; - mp_limb_t p1, q1bits, p2, q2bits, error, error2, cy; + nn_srcptr xp; + slong xn, tn; + nn_ptr tmp, w, t, u; + ulong p1, q1bits, p2, q2bits, error, error2, cy; int negative, inexact, used_taylor_series; TMP_INIT; diff --git a/src/arb/log_base_ui.c b/src/arb/log_base_ui.c index d2607b07f9..5392fe2401 100644 --- a/src/arb/log_base_ui.c +++ b/src/arb/log_base_ui.c @@ -23,8 +23,8 @@ static double _arf_get_mantissa_d(const arf_t x) { - mp_srcptr xp; - mp_size_t xn; + nn_srcptr xp; + slong xn; ARF_GET_MPN_READONLY(xp, xn, x); if (xn == 1) diff --git a/src/arb/log_hypot.c b/src/arb/log_hypot.c index 7caaa53980..e8398dd7ba 100644 --- a/src/arb/log_hypot.c +++ b/src/arb/log_hypot.c @@ -32,7 +32,7 @@ arb_log_abs(arb_t res, const arb_t a, slong prec) static int arf_close_to_one(const arf_t z) { - mp_limb_t top; + ulong top; if (ARF_EXP(z) == 0) { diff --git a/src/arb/log_primes.c b/src/arb/log_primes.c index 6aa96d6191..341212cd01 100644 --- a/src/arb/log_primes.c +++ b/src/arb/log_primes.c @@ -508,7 +508,7 @@ void _arb_log_p_ensure_cached(slong prec) for (i = 0; i < ARB_LOG_PRIME_CACHE_NUM; i++) { slong exp, exp_fix; - mp_size_t n; + slong n; arb_ptr res = _arb_log_p_cache + i; n = ARB_LOG_TAB2_PREC / FLINT_BITS; @@ -729,7 +729,7 @@ void _arb_atan_gauss_p_ensure_cached(slong prec) for (i = 0; i < ARB_ATAN_GAUSS_PRIME_CACHE_NUM; i++) { slong exp, exp_fix; - mp_size_t n; + slong n; static const char exponents[24] = {0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1}; arb_ptr res = _arb_atan_gauss_p_cache + i; diff --git a/src/arb/log_tab.c b/src/arb/log_tab.c index 74f7a21117..2022624ceb 100644 --- a/src/arb/log_tab.c +++ b/src/arb/log_tab.c @@ -20,7 +20,7 @@ #define Z8(a,b,c,d,e,f,g,h) Z2(a,b), Z2(c,d), Z2(e,f), Z2(g,h), /* logarithms of primes */ -const mp_limb_t arb_log_p_tab[ARB_LOG_PRIME_CACHE_NUM][ARB_LOG_TAB2_LIMBS] = +const ulong arb_log_p_tab[ARB_LOG_PRIME_CACHE_NUM][ARB_LOG_TAB2_LIMBS] = {{ Z8(78b63c9f,897a39ce,1e238438,52ab3316,a6c4c60c,062b1a63,e8f70edd,3ea8449f) Z8(26fac51c,6425a415,f95884e0,c5e5767d,8a0e23fa,c0b1b31d,3a49bd0d,85db6ab0) @@ -270,9 +270,9 @@ const mp_limb_t arb_log_p_tab[ARB_LOG_PRIME_CACHE_NUM][ARB_LOG_TAB2_LIMBS] = Z8(c8db7021,b2283a67,7bca0417,a1462505,cf59195f,66ffd699,75626d5d,edab2a2c) }}; -const mp_srcptr arb_log_log2_tab = arb_log_p_tab[0]; +const nn_srcptr arb_log_log2_tab = arb_log_p_tab[0]; -const mp_limb_t arb_log_tab11[1 << ARB_LOG_TAB11_BITS][ARB_LOG_TAB1_LIMBS] = +const ulong arb_log_tab11[1 << ARB_LOG_TAB11_BITS][ARB_LOG_TAB1_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -659,7 +659,7 @@ const mp_limb_t arb_log_tab11[1 << ARB_LOG_TAB11_BITS][ARB_LOG_TAB1_LIMBS] = Z8(c1549af0,96012a8e,ccaabf0a,17ce375a,76554335,e44ad05a,3c46c653,b07197a2) }}; -const mp_limb_t arb_log_tab12[1 << ARB_LOG_TAB12_BITS][ARB_LOG_TAB1_LIMBS] = +const ulong arb_log_tab12[1 << ARB_LOG_TAB12_BITS][ARB_LOG_TAB1_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -1046,7 +1046,7 @@ const mp_limb_t arb_log_tab12[1 << ARB_LOG_TAB12_BITS][ARB_LOG_TAB1_LIMBS] = Z8(fa5d1c6e,62b187fd,6bac0a8b,70ef0e95,ca0d270c,e88d71b2,f0531d8a,01fa0a8e) }}; -const mp_limb_t arb_log_tab21[1 << ARB_LOG_TAB21_BITS][ARB_LOG_TAB2_LIMBS] = +const ulong arb_log_tab21[1 << ARB_LOG_TAB21_BITS][ARB_LOG_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -1657,7 +1657,7 @@ const mp_limb_t arb_log_tab21[1 << ARB_LOG_TAB21_BITS][ARB_LOG_TAB2_LIMBS] = Z8(5b324a78,7753b3c8,8161ccf7,839e0457,1cd40845,4d552f81,acf967d9,ad6a0261) }}; -const mp_limb_t arb_log_tab22[1 << ARB_LOG_TAB22_BITS][ARB_LOG_TAB2_LIMBS] = +const ulong arb_log_tab22[1 << ARB_LOG_TAB22_BITS][ARB_LOG_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) diff --git a/src/arb/primorial.c b/src/arb/primorial.c index b6f87d5850..7034f8e62f 100644 --- a/src/arb/primorial.c +++ b/src/arb/primorial.c @@ -18,10 +18,10 @@ static int basecase(arb_t res, n_primes_t primes, ulong a, ulong b, ulong nmax, slong prec) { ulong n, p, pp; - mp_limb_t prod[NUM_BASECASE]; - mp_limb_t top; - mp_size_t nlimbs; - mp_limb_t hi, lo; + ulong prod[NUM_BASECASE]; + ulong top; + slong nlimbs; + ulong hi, lo; int inexact, more; slong shift; diff --git a/src/arb/set.c b/src/arb/set.c index 18b466117a..d1333d6c76 100644 --- a/src/arb/set.c +++ b/src/arb/set.c @@ -29,9 +29,9 @@ _arf_set_inline(arf_t y, const arf_t x) } else { - mp_ptr yptr; - mp_srcptr xptr; - mp_size_t n; + nn_ptr yptr; + nn_srcptr xptr; + slong n; ARF_GET_MPN_READONLY(xptr, n, x); ARF_GET_MPN_WRITE(yptr, n, y); diff --git a/src/arb/set_interval.c b/src/arb/set_interval.c index eace0e2cf5..f7fffb5476 100644 --- a/src/arb/set_interval.c +++ b/src/arb/set_interval.c @@ -67,7 +67,7 @@ arb_set_interval_mag(arb_t res, const mag_t a, const mag_t b, slong prec) if (MAG_IS_LAGOM(a) && MAG_IS_LAGOM(b)) { slong aexp, bexp; - mp_limb_t aman, bman, mman, rman, tmp; + ulong aman, bman, mman, rman, tmp; aman = MAG_MAN(a); bman = MAG_MAN(b); @@ -170,7 +170,7 @@ arb_set_interval_neg_pos_mag(arb_t res, const mag_t a, const mag_t b, slong prec if (MAG_IS_LAGOM(a) && MAG_IS_LAGOM(b)) { slong aexp, bexp, mexp, shift; - mp_limb_t aman, bman, mman, rman, tmp; + ulong aman, bman, mman, rman, tmp; int negative; aman = MAG_MAN(a); diff --git a/src/arb/sin_cos.c b/src/arb/sin_cos.c index 44b5f3729b..0d5c1e0f82 100644 --- a/src/arb/sin_cos.c +++ b/src/arb/sin_cos.c @@ -12,7 +12,7 @@ #include "mpn_extras.h" #include "arb.h" -#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(ulong)) #define MAGLIM(prec) FLINT_MAX(65536, (4*prec)) static void @@ -78,9 +78,9 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec { int want_sin, want_cos; slong radexp, exp, wp, wn, N, r, wprounded, maglim, orig_prec; - mp_ptr tmp, w, sina, cosa, sinb, cosb, ta, tb; - mp_ptr sinptr, cosptr; - mp_limb_t p1, q1bits, p2, q2bits, error, error2, p1_tab1, radman; + nn_ptr tmp, w, sina, cosa, sinb, cosb, ta, tb; + nn_ptr sinptr, cosptr; + ulong p1, q1bits, p2, q2bits, error, error2, p1_tab1, radman; int negative, inexact, octant; int sinnegative, cosnegative, swapsincos; TMP_INIT; @@ -300,7 +300,7 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec } else if (p1 == 0 || p2 == 0) /* only one table lookup */ { - mp_srcptr sinc, cosc; + nn_srcptr sinc, cosc; if (wp <= ARB_SIN_COS_TAB1_PREC) /* must be in table 1 */ { @@ -339,7 +339,7 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec } else /* two table lookups, must be in table 2 */ { - mp_srcptr sinc, cosc, sind, cosd; + nn_srcptr sinc, cosc, sind, cosd; sinc = arb_sin_cos_tab21[2 * p1] + ARB_SIN_COS_TAB2_LIMBS - wn; cosc = arb_sin_cos_tab21[2 * p1 + 1] + ARB_SIN_COS_TAB2_LIMBS - wn; @@ -380,7 +380,7 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec if (swapsincos) { - mp_ptr tmptr = sinptr; + nn_ptr tmptr = sinptr; sinptr = cosptr; cosptr = tmptr; } @@ -414,7 +414,7 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec else { mag_t sin_err, cos_err, quadratic, comp_err, xrad_copy; - mp_limb_t A_sin, A_cos, A_exp; + ulong A_sin, A_cos, A_exp; /* Copy xrad to support aliasing (note: the exponent has also been clamped earlier). */ @@ -451,7 +451,7 @@ _arb_sin_cos(arb_t zsin, arb_t zcos, const arf_t x, const mag_t xrad, slong prec A_exp = -ARB_SIN_COS_TAB1_BITS; if (swapsincos) { - mp_limb_t tt = A_sin; + ulong tt = A_sin; A_sin = A_cos; A_cos = tt; } diff --git a/src/arb/sin_cos_arf_generic.c b/src/arb/sin_cos_arf_generic.c index ff0514ee99..6dc68c5bae 100644 --- a/src/arb/sin_cos_arf_generic.c +++ b/src/arb/sin_cos_arf_generic.c @@ -60,7 +60,7 @@ arb_sin_cos_taylor_sum_rs(arb_t s, const arb_t x, slong N, int cosine, slong pre { arb_ptr tpow; slong j, k, m, M, tp, xmag; - mp_limb_t c, d, chi, clo; + ulong c, d, chi, clo; xmag = arf_abs_bound_lt_2exp_si(arb_midref(x)); diff --git a/src/arb/sin_cos_tab.c b/src/arb/sin_cos_tab.c index f7139cb0be..ce1f32bd19 100644 --- a/src/arb/sin_cos_tab.c +++ b/src/arb/sin_cos_tab.c @@ -19,7 +19,7 @@ #define Z8(a,b,c,d,e,f,g,h) Z2(a,b), Z2(c,d), Z2(e,f), Z2(g,h), -const mp_limb_t arb_pi4_tab[ARB_PI4_TAB_LIMBS] = { +const ulong arb_pi4_tab[ARB_PI4_TAB_LIMBS] = { Z8(e6cc254b,db7f1447,ced4bb1b,44ce6cba,cf9b14ed,da3edbeb,865a8918,179727b0) Z8(9027d831,b06a53ed,413001ae,e5db382f,ad9e530e,f8ff9406,3dba37bd,c9751e76) Z8(602646de,c1d4dcb2,d27c7026,36c3fab4,34028492,4df435c9,90a6c08f,86ffb7dc) @@ -40,7 +40,7 @@ const mp_limb_t arb_pi4_tab[ARB_PI4_TAB_LIMBS] = { Z8(3b139b22,020bbea6,8a67cc74,29024e08,80dc1cd1,c4c6628b,2168c234,c90fdaa2) }; -const mp_limb_t arb_sin_cos_tab1[2 * ARB_SIN_COS_TAB1_NUM][ARB_SIN_COS_TAB1_LIMBS] = +const ulong arb_sin_cos_tab1[2 * ARB_SIN_COS_TAB1_NUM][ARB_SIN_COS_TAB1_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -1261,7 +1261,7 @@ const mp_limb_t arb_sin_cos_tab1[2 * ARB_SIN_COS_TAB1_NUM][ARB_SIN_COS_TAB1_LIMB Z8(85e8e950,37e63acb,5835239a,8cd14b17,ec8f22a6,dca4cf40,5b1294ca,b45ad497) }}; -const mp_limb_t arb_sin_cos_tab21[2 * ARB_SIN_COS_TAB21_NUM][ARB_SIN_COS_TAB2_LIMBS] = +const ulong arb_sin_cos_tab21[2 * ARB_SIN_COS_TAB21_NUM][ARB_SIN_COS_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -2252,7 +2252,7 @@ const mp_limb_t arb_sin_cos_tab21[2 * ARB_SIN_COS_TAB21_NUM][ARB_SIN_COS_TAB2_LI Z8(6d6b90e8,9d9c53ee,e74fe751,3e73b6e5,4b1a498d,ac786ccf,f7dae915,b5c4c7d4) }}; -const mp_limb_t arb_sin_cos_tab22[2 * ARB_SIN_COS_TAB22_NUM][ARB_SIN_COS_TAB2_LIMBS] = +const ulong arb_sin_cos_tab22[2 * ARB_SIN_COS_TAB22_NUM][ARB_SIN_COS_TAB2_LIMBS] = {{ Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) diff --git a/src/arb/sin_cos_taylor_naive.c b/src/arb/sin_cos_taylor_naive.c index 2a1e88707a..71261a2d87 100644 --- a/src/arb/sin_cos_taylor_naive.c +++ b/src/arb/sin_cos_taylor_naive.c @@ -13,12 +13,12 @@ #include "mpn_extras.h" void -_arb_sin_cos_taylor_naive(mp_ptr ysin, mp_ptr ycos, mp_limb_t * error, - mp_srcptr x, mp_size_t xn, ulong N) +_arb_sin_cos_taylor_naive(nn_ptr ysin, nn_ptr ycos, ulong * error, + nn_srcptr x, slong xn, ulong N) { ulong k; - mp_ptr s, s2, t, u, v; - mp_size_t nn = xn + 1; + nn_ptr s, s2, t, u, v; + slong nn = xn + 1; if (N == 0) { @@ -28,11 +28,11 @@ _arb_sin_cos_taylor_naive(mp_ptr ysin, mp_ptr ycos, mp_limb_t * error, return; } - s = flint_malloc(sizeof(mp_limb_t) * (nn + 1)); - s2 = flint_malloc(sizeof(mp_limb_t) * (nn + 1)); - t = flint_malloc(sizeof(mp_limb_t) * nn); - v = flint_malloc(sizeof(mp_limb_t) * nn); - u = flint_malloc(sizeof(mp_limb_t) * 2 * nn); + s = flint_malloc(sizeof(ulong) * (nn + 1)); + s2 = flint_malloc(sizeof(ulong) * (nn + 1)); + t = flint_malloc(sizeof(ulong) * nn); + v = flint_malloc(sizeof(ulong) * nn); + u = flint_malloc(sizeof(ulong) * 2 * nn); /* s = 1 */ flint_mpn_zero(s, nn); diff --git a/src/arb/sin_cos_taylor_rs.c b/src/arb/sin_cos_taylor_rs.c index a040df2109..43e7afda51 100644 --- a/src/arb/sin_cos_taylor_rs.c +++ b/src/arb/sin_cos_taylor_rs.c @@ -15,19 +15,19 @@ /* See verify_taylor.py for code to generate tables and proof of correctness */ -#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(mp_limb_t)) +#define TMP_ALLOC_LIMBS(size) TMP_ALLOC((size) * sizeof(ulong)) #define FACTORIAL_TAB_SIZE 288 -FLINT_DLL extern const mp_limb_t factorial_tab_numer[FACTORIAL_TAB_SIZE]; -FLINT_DLL extern const mp_limb_t factorial_tab_denom[FACTORIAL_TAB_SIZE]; +FLINT_DLL extern const ulong factorial_tab_numer[FACTORIAL_TAB_SIZE]; +FLINT_DLL extern const ulong factorial_tab_denom[FACTORIAL_TAB_SIZE]; -void _arb_sin_cos_taylor_rs(mp_ptr ysin, mp_ptr ycos, - mp_limb_t * error, mp_srcptr x, mp_size_t xn, ulong N, +void _arb_sin_cos_taylor_rs(nn_ptr ysin, nn_ptr ycos, + ulong * error, nn_srcptr x, slong xn, ulong N, int sinonly, int alternating) { - mp_ptr s, t, xpow; - mp_limb_t new_denom, old_denom, c; + nn_ptr s, t, xpow; + ulong new_denom, old_denom, c; slong power, k, m; int cosorsin; diff --git a/src/arb/test/main.c b/src/arb/test/main.c index a025a3b641..02acf826d1 100644 --- a/src/arb/test/main.c +++ b/src/arb/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-acos.c" diff --git a/src/arb/test/t-atan_tab.c b/src/arb/test/t-atan_tab.c index 0ef787caa2..81ca6110a0 100644 --- a/src/arb/test/t-atan_tab.c +++ b/src/arb/test/t-atan_tab.c @@ -34,7 +34,7 @@ TEST_FUNCTION_START(arb_atan_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_atan_tab1[i]; + tt->_mp_d = (nn_ptr) arb_atan_tab1[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -85,7 +85,7 @@ TEST_FUNCTION_START(arb_atan_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_atan_tab21[i]; + tt->_mp_d = (nn_ptr) arb_atan_tab21[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -136,7 +136,7 @@ TEST_FUNCTION_START(arb_atan_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_atan_tab22[i]; + tt->_mp_d = (nn_ptr) arb_atan_tab22[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; diff --git a/src/arb/test/t-atan_taylor_rs.c b/src/arb/test/t-atan_taylor_rs.c index 44f35b9510..efb544f0f9 100644 --- a/src/arb/test/t-atan_taylor_rs.c +++ b/src/arb/test/t-atan_taylor_rs.c @@ -19,20 +19,20 @@ TEST_FUNCTION_START(arb_atan_taylor_rs, state) for (iter = 0; iter < 100000 * 0.1 * flint_test_multiplier(); iter++) { - mp_ptr x, y1, y2, t; - mp_limb_t err1, err2; + nn_ptr x, y1, y2, t; + ulong err1, err2; ulong N; - mp_size_t xn; + slong xn; int alternating, cmp, result; N = n_randint(state, 256); alternating = n_randint(state, 2); xn = 1 + n_randint(state, 20); - x = flint_malloc(sizeof(mp_limb_t) * xn); - y1 = flint_malloc(sizeof(mp_limb_t) * xn); - y2 = flint_malloc(sizeof(mp_limb_t) * xn); - t = flint_malloc(sizeof(mp_limb_t) * xn); + x = flint_malloc(sizeof(ulong) * xn); + y1 = flint_malloc(sizeof(ulong) * xn); + y2 = flint_malloc(sizeof(ulong) * xn); + t = flint_malloc(sizeof(ulong) * xn); flint_mpn_rrandom(x, state, xn); x[xn - 1] &= (LIMB_ONES >> 4); diff --git a/src/arb/test/t-exp_tab.c b/src/arb/test/t-exp_tab.c index f5d1fa4f1f..184085070b 100644 --- a/src/arb/test/t-exp_tab.c +++ b/src/arb/test/t-exp_tab.c @@ -34,7 +34,7 @@ TEST_FUNCTION_START(arb_exp_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_exp_tab1[i]; + tt->_mp_d = (nn_ptr) arb_exp_tab1[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -85,7 +85,7 @@ TEST_FUNCTION_START(arb_exp_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_exp_tab21[i]; + tt->_mp_d = (nn_ptr) arb_exp_tab21[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -136,7 +136,7 @@ TEST_FUNCTION_START(arb_exp_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_exp_tab22[i]; + tt->_mp_d = (nn_ptr) arb_exp_tab22[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; diff --git a/src/arb/test/t-exp_taylor_rs.c b/src/arb/test/t-exp_taylor_rs.c index aa8ca44487..d0b13785a2 100644 --- a/src/arb/test/t-exp_taylor_rs.c +++ b/src/arb/test/t-exp_taylor_rs.c @@ -19,19 +19,19 @@ TEST_FUNCTION_START(arb_exp_taylor_rs, state) for (iter = 0; iter < 100000 * 0.1 * flint_test_multiplier(); iter++) { - mp_ptr x, y1, y2, t; - mp_limb_t err1, err2; + nn_ptr x, y1, y2, t; + ulong err1, err2; ulong N; - mp_size_t xn; + slong xn; int cmp, result; N = n_randint(state, 288 - 1); xn = 1 + n_randint(state, 20); - x = flint_malloc(sizeof(mp_limb_t) * xn); - y1 = flint_malloc(sizeof(mp_limb_t) * (xn + 1)); - y2 = flint_malloc(sizeof(mp_limb_t) * (xn + 1)); - t = flint_malloc(sizeof(mp_limb_t) * (xn + 1)); + x = flint_malloc(sizeof(ulong) * xn); + y1 = flint_malloc(sizeof(ulong) * (xn + 1)); + y2 = flint_malloc(sizeof(ulong) * (xn + 1)); + t = flint_malloc(sizeof(ulong) * (xn + 1)); flint_mpn_rrandom(x, state, xn); flint_mpn_rrandom(y1, state, xn + 1); diff --git a/src/arb/test/t-get_mpn_fixed_mod_log2.c b/src/arb/test/t-get_mpn_fixed_mod_log2.c index 2035bd5750..535ad29b1a 100644 --- a/src/arb/test/t-get_mpn_fixed_mod_log2.c +++ b/src/arb/test/t-get_mpn_fixed_mod_log2.c @@ -20,12 +20,12 @@ TEST_FUNCTION_START(arb_get_mpn_fixed_mod_log2, state) { arf_t x; fmpz_t q; - mp_ptr w; + nn_ptr w; arb_t wb, t; - mp_size_t wn; + slong wn; slong prec, prec2; int success; - mp_limb_t error; + ulong error; prec = 2 + n_randint(state, 10000); wn = 1 + n_randint(state, 200); @@ -35,7 +35,7 @@ TEST_FUNCTION_START(arb_get_mpn_fixed_mod_log2, state) arb_init(wb); arb_init(t); fmpz_init(q); - w = flint_malloc(sizeof(mp_limb_t) * wn); + w = flint_malloc(sizeof(ulong) * wn); arf_randtest(x, state, prec, 14); diff --git a/src/arb/test/t-get_mpn_fixed_mod_pi4.c b/src/arb/test/t-get_mpn_fixed_mod_pi4.c index e44c4bac1c..6a75f871f1 100644 --- a/src/arb/test/t-get_mpn_fixed_mod_pi4.c +++ b/src/arb/test/t-get_mpn_fixed_mod_pi4.c @@ -21,12 +21,12 @@ TEST_FUNCTION_START(arb_get_mpn_fixed_mod_pi4, state) arf_t x; int octant; fmpz_t q; - mp_ptr w; + nn_ptr w; arb_t wb, t, u; - mp_size_t wn; + slong wn; slong prec, prec2; int success; - mp_limb_t error; + ulong error; prec = 2 + n_randint(state, 10000); wn = 1 + n_randint(state, 200); @@ -37,7 +37,7 @@ TEST_FUNCTION_START(arb_get_mpn_fixed_mod_pi4, state) arb_init(t); arb_init(u); fmpz_init(q); - w = flint_malloc(sizeof(mp_limb_t) * wn); + w = flint_malloc(sizeof(ulong) * wn); arf_randtest(x, state, prec, 14); diff --git a/src/arb/test/t-log_tab.c b/src/arb/test/t-log_tab.c index 207efb28c6..416b246f48 100644 --- a/src/arb/test/t-log_tab.c +++ b/src/arb/test/t-log_tab.c @@ -34,7 +34,7 @@ TEST_FUNCTION_START(arb_log_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_log_tab11[i]; + tt->_mp_d = (nn_ptr) arb_log_tab11[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -86,7 +86,7 @@ TEST_FUNCTION_START(arb_log_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_log_tab12[i]; + tt->_mp_d = (nn_ptr) arb_log_tab12[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -138,7 +138,7 @@ TEST_FUNCTION_START(arb_log_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_log_tab21[i]; + tt->_mp_d = (nn_ptr) arb_log_tab21[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -190,7 +190,7 @@ TEST_FUNCTION_START(arb_log_tab, state) for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_log_tab22[i]; + tt->_mp_d = (nn_ptr) arb_log_tab22[i]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; diff --git a/src/arb/test/t-sin_cos_tab.c b/src/arb/test/t-sin_cos_tab.c index 2b7cef7db1..535ae8469d 100644 --- a/src/arb/test/t-sin_cos_tab.c +++ b/src/arb/test/t-sin_cos_tab.c @@ -36,7 +36,7 @@ TEST_FUNCTION_START(arb_sin_cos_tab, state) { for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_sin_cos_tab1[2 * i + which]; + tt->_mp_d = (nn_ptr) arb_sin_cos_tab1[2 * i + which]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -101,7 +101,7 @@ TEST_FUNCTION_START(arb_sin_cos_tab, state) { for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_sin_cos_tab21[2 * i + which]; + tt->_mp_d = (nn_ptr) arb_sin_cos_tab21[2 * i + which]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; @@ -166,7 +166,7 @@ TEST_FUNCTION_START(arb_sin_cos_tab, state) { for (i = 0; i < num; i++) { - tt->_mp_d = (mp_ptr) arb_sin_cos_tab22[2 * i + which]; + tt->_mp_d = (nn_ptr) arb_sin_cos_tab22[2 * i + which]; tt->_mp_size = prec / FLINT_BITS; tt->_mp_alloc = tt->_mp_size; diff --git a/src/arb/test/t-sin_cos_taylor_rs.c b/src/arb/test/t-sin_cos_taylor_rs.c index 897abb15af..e5607e047a 100644 --- a/src/arb/test/t-sin_cos_taylor_rs.c +++ b/src/arb/test/t-sin_cos_taylor_rs.c @@ -19,21 +19,21 @@ TEST_FUNCTION_START(arb_sin_cos_taylor_rs, state) for (iter = 0; iter < 100000 * 0.1 * flint_test_multiplier(); iter++) { - mp_ptr x, y1s, y1c, y2s, y2c, t; - mp_limb_t err1, err2; + nn_ptr x, y1s, y1c, y2s, y2c, t; + ulong err1, err2; ulong N; - mp_size_t xn; + slong xn; int cmp, result; N = n_randint(state, 144 - 1); xn = 1 + n_randint(state, 20); - x = flint_malloc(sizeof(mp_limb_t) * xn); - y1s = flint_malloc(sizeof(mp_limb_t) * xn); - y1c = flint_malloc(sizeof(mp_limb_t) * xn); - y2s = flint_malloc(sizeof(mp_limb_t) * xn); - y2c = flint_malloc(sizeof(mp_limb_t) * xn); - t = flint_malloc(sizeof(mp_limb_t) * xn); + x = flint_malloc(sizeof(ulong) * xn); + y1s = flint_malloc(sizeof(ulong) * xn); + y1c = flint_malloc(sizeof(ulong) * xn); + y2s = flint_malloc(sizeof(ulong) * xn); + y2c = flint_malloc(sizeof(ulong) * xn); + t = flint_malloc(sizeof(ulong) * xn); flint_mpn_rrandom(x, state, xn); flint_mpn_rrandom(y1s, state, xn); diff --git a/src/arb/ui_pow_ui.c b/src/arb/ui_pow_ui.c index ebaf51defb..f80b9869ad 100644 --- a/src/arb/ui_pow_ui.c +++ b/src/arb/ui_pow_ui.c @@ -72,9 +72,9 @@ arb_ui_pow_ui(arb_t res, ulong a, ulong exp, slong prec) slong wp, aexp, awidth, trailing, wp_limbs; slong exp_fix, alloc, leading; int inexact, i, ebits; - mp_ptr yman, tmp; - mp_size_t yn; - mp_limb_t yexp_hi, yexp_lo, aman, aodd, hi, lo; + nn_ptr yman, tmp; + slong yn; + ulong yexp_hi, yexp_lo, aman, aodd, hi, lo; ARF_MUL_TMP_DECL if (exp <= 2) @@ -89,7 +89,7 @@ arb_ui_pow_ui(arb_t res, ulong a, ulong exp, slong prec) } else { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, a, a); arb_set_round_uiui(res, hi, lo, prec); } @@ -184,7 +184,7 @@ arb_ui_pow_ui(arb_t res, ulong a, ulong exp, slong prec) } else { - mp_limb_t y0, y1; + ulong y0, y1; y0 = yman[0]; y1 = yman[1]; FLINT_MPN_MUL_2X1(yman[2], yman[1], yman[0], y1, y0, aodd); @@ -241,7 +241,7 @@ arb_ui_pow_ui(arb_t res, ulong a, ulong exp, slong prec) /* note: we must have yn == 1 here if wp_limbs == 1 */ if (wp_limbs == 1) { - mp_limb_t hi, lo; + ulong hi, lo; /* y = y^2: mantissa */ umul_ppmm(hi, lo, yman[0], yman[0]); diff --git a/src/arb_calc/test/main.c b/src/arb_calc/test/main.c index 40620c7588..00dc56b43e 100644 --- a/src/arb_calc/test/main.c +++ b/src/arb_calc/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-isolate_roots.c" diff --git a/src/arb_fmpz_poly/test/main.c b/src/arb_fmpz_poly/test/main.c index 4281929c98..ba57786277 100644 --- a/src/arb_fmpz_poly/test/main.c +++ b/src/arb_fmpz_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-complex_roots.c" diff --git a/src/arb_fpwrap/test/main.c b/src/arb_fpwrap/test/main.c index 3459c211f3..a3b39e5e5e 100644 --- a/src/arb_fpwrap/test/main.c +++ b/src/arb_fpwrap/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-fpwrap.c" diff --git a/src/arb_hypgeom/gamma_lower_sum_rs.c b/src/arb_hypgeom/gamma_lower_sum_rs.c index 83e6ea512b..6c6a32fbbc 100644 --- a/src/arb_hypgeom/gamma_lower_sum_rs.c +++ b/src/arb_hypgeom/gamma_lower_sum_rs.c @@ -44,10 +44,10 @@ _arb_hypgeom_gamma_lower_sum_rs_1(arb_t res, ulong p, ulong q, const arb_t z, sl { slong m, j, k, jlen, jbot, wp; double dz, logdz; - mp_limb_t c, chi, clo; + ulong c, chi, clo; arb_t s; arb_ptr zpow; - mp_ptr cs; + nn_ptr cs; m = n_sqrt(N); m = FLINT_MAX(m, 2); @@ -80,7 +80,7 @@ _arb_hypgeom_gamma_lower_sum_rs_1(arb_t res, ulong p, ulong q, const arb_t z, sl arb_init(s); zpow = _arb_vec_init(m + 1); - cs = flint_malloc(sizeof(mp_limb_t) * (m + 1)); + cs = flint_malloc(sizeof(ulong) * (m + 1)); arb_mul_ui(zpow + m, z, q, prec); _arb_vec_set_powers(zpow, zpow + m, m + 1, prec); diff --git a/src/arb_hypgeom/gamma_tab.c b/src/arb_hypgeom/gamma_tab.c index 2997e55df9..bb214feaa6 100644 --- a/src/arb_hypgeom/gamma_tab.c +++ b/src/arb_hypgeom/gamma_tab.c @@ -156,7 +156,7 @@ arb_hypgeom_gamma_coeff_t arb_hypgeom_gamma_coeffs[ARB_HYPGEOM_GAMMA_TAB_NUM] = {-2904, 16095, 1, 1}, {-2911, 16096, 1, 0}, {-2919, 16097, 1, 1}, {-2927, 16098, 1, 1}, }; -const mp_limb_t arb_hypgeom_gamma_tab_limbs[] = { +const ulong arb_hypgeom_gamma_tab_limbs[] = { Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) Z8(00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000) @@ -4189,8 +4189,8 @@ _arb_hypgeom_gamma_coeff_shallow(arf_t c, mag_t err, slong i, slong prec) { slong term_limbs; slong exp, pos; - mp_size_t xn; - mp_ptr xp; + slong xn; + nn_ptr xp; int negative; term_limbs = (prec + FLINT_BITS - 1) / FLINT_BITS; @@ -4211,7 +4211,7 @@ _arb_hypgeom_gamma_coeff_shallow(arf_t c, mag_t err, slong i, slong prec) if (term_limbs > xn) return 0; - xp = (mp_ptr) arb_hypgeom_gamma_tab_limbs + pos; + xp = (nn_ptr) arb_hypgeom_gamma_tab_limbs + pos; ARF_EXP(c) = exp; ARF_XSIZE(c) = ARF_MAKE_XSIZE(term_limbs, negative); diff --git a/src/arb_hypgeom/gamma_upper_sum_rs.c b/src/arb_hypgeom/gamma_upper_sum_rs.c index 45dc7be87b..ea96cd0f5b 100644 --- a/src/arb_hypgeom/gamma_upper_sum_rs.c +++ b/src/arb_hypgeom/gamma_upper_sum_rs.c @@ -43,10 +43,10 @@ _arb_hypgeom_gamma_upper_sum_rs_1(arb_t res, ulong p, ulong q, const arb_t z, sl { slong m, i, j, k, jlen, jbot, jtop, wp; double dz, logdz; - mp_limb_t c, chi, clo; + ulong c, chi, clo; arb_t s, t; arb_ptr zpow; - mp_ptr cs; + nn_ptr cs; m = n_sqrt(N); m = FLINT_MAX(m, 2); @@ -77,7 +77,7 @@ _arb_hypgeom_gamma_upper_sum_rs_1(arb_t res, ulong p, ulong q, const arb_t z, sl arb_init(s); arb_init(t); zpow = _arb_vec_init(m + 1); - cs = flint_malloc(sizeof(mp_limb_t) * (m + 1)); + cs = flint_malloc(sizeof(ulong) * (m + 1)); arb_mul_ui(zpow + m, z, q, prec); arb_inv(zpow + m, zpow + m, prec); _arb_vec_set_powers(zpow, zpow + m, m + 1, prec); diff --git a/src/arb_hypgeom/legendre_p_ui_rec.c b/src/arb_hypgeom/legendre_p_ui_rec.c index 138cffdfb3..b1bebb4e58 100644 --- a/src/arb_hypgeom/legendre_p_ui_rec.c +++ b/src/arb_hypgeom/legendre_p_ui_rec.c @@ -17,7 +17,7 @@ arb_hypgeom_legendre_p_ui_rec(arb_t res, arb_t res_prime, ulong n, const arb_t x { slong wp; ulong k, den; - mp_limb_t denlo, denhi; + ulong denlo, denhi; mpz_t p0, p1, xx, tt; fmpz_t fxx; int error; diff --git a/src/arb_hypgeom/rising_ui_forward.c b/src/arb_hypgeom/rising_ui_forward.c index 7b4dcee2a7..38cb8c16b8 100644 --- a/src/arb_hypgeom/rising_ui_forward.c +++ b/src/arb_hypgeom/rising_ui_forward.c @@ -16,9 +16,9 @@ _arf_increment_fast(arf_t x, slong prec) { if (arf_sgn(x) > 0) { - mp_limb_t hi, v, cy; - mp_ptr xptr; - mp_size_t xn; + ulong hi, v, cy; + nn_ptr xptr; + slong xn; slong xexp; xexp = ARF_EXP(x); diff --git a/src/arb_hypgeom/rising_ui_jet_powsum.c b/src/arb_hypgeom/rising_ui_jet_powsum.c index dd0486a4c4..04ebcdb247 100644 --- a/src/arb_hypgeom/rising_ui_jet_powsum.c +++ b/src/arb_hypgeom/rising_ui_jet_powsum.c @@ -59,11 +59,11 @@ arb_hypgeom_rising_ui_jet_powsum(arb_ptr res, const arb_t x, ulong n, slong len, if (n <= 12 || (FLINT_BITS == 64 && n <= 20)) { - mp_ptr c; + nn_ptr c; TMP_START; wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); - c = TMP_ALLOC(sizeof(mp_limb_t) * (n + 1) * len); + c = TMP_ALLOC(sizeof(ulong) * (n + 1) * len); _nmod_vec_zero(c, (n + 1) * len); diff --git a/src/arb_hypgeom/rising_ui_jet_rs.c b/src/arb_hypgeom/rising_ui_jet_rs.c index 0de1d74405..24190792a5 100644 --- a/src/arb_hypgeom/rising_ui_jet_rs.c +++ b/src/arb_hypgeom/rising_ui_jet_rs.c @@ -26,7 +26,7 @@ arb_hypgeom_rising_ui_jet_rs(arb_ptr res, const arb_t x, ulong n, ulong m, slong slong i, j, k, l, m0, xmlen, tlen, ulen, climbs, climbs_max, wp; arb_ptr tmp, xpow; arb_ptr t, u; - mp_ptr c; + nn_ptr c; TMP_INIT; if (len == 0) @@ -79,7 +79,7 @@ arb_hypgeom_rising_ui_jet_rs(arb_ptr res, const arb_t x, ulong n, ulong m, slong wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); climbs_max = FLINT_BIT_COUNT(n - 1) * m; - c = TMP_ALLOC(sizeof(mp_limb_t) * climbs_max * m); + c = TMP_ALLOC(sizeof(ulong) * climbs_max * m); /* length of (x+t)^m */ xmlen = FLINT_MIN(len, m + 1); diff --git a/src/arb_hypgeom/rising_ui_rs.c b/src/arb_hypgeom/rising_ui_rs.c index d81fe02fde..7cba9899d0 100644 --- a/src/arb_hypgeom/rising_ui_rs.c +++ b/src/arb_hypgeom/rising_ui_rs.c @@ -48,7 +48,7 @@ _arb_hypgeom_rising_coeffs_2(ulong * c, ulong k, slong l) { slong i, j; ulong d; - mp_limb_t hi, lo; + ulong hi, lo; if (l < 2) flint_throw(FLINT_ERROR, "(%s): l < 2\n", __func__); @@ -112,7 +112,7 @@ arb_hypgeom_rising_ui_rs(arb_t res, const arb_t x, ulong n, ulong m, slong prec) slong i, k, l, m0, climbs, climbs_max, wp; arb_ptr xpow; arb_t t, u; - mp_ptr c; + nn_ptr c; TMP_INIT; if (n <= 1) @@ -146,7 +146,7 @@ arb_hypgeom_rising_ui_rs(arb_t res, const arb_t x, ulong n, ulong m, slong prec) wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); climbs_max = FLINT_BIT_COUNT(n - 1) * m; - c = TMP_ALLOC(sizeof(mp_limb_t) * climbs_max * m); + c = TMP_ALLOC(sizeof(ulong) * climbs_max * m); xpow = _arb_vec_init(m + 1); _arb_vec_set_powers(xpow, x, m + 1, wp); diff --git a/src/arb_hypgeom/sum_fmpq_arb_rs.c b/src/arb_hypgeom/sum_fmpq_arb_rs.c index 7c375ee87f..bce35984c1 100644 --- a/src/arb_hypgeom/sum_fmpq_arb_rs.c +++ b/src/arb_hypgeom/sum_fmpq_arb_rs.c @@ -69,8 +69,8 @@ arf_get_d_log2_abs_approx_clamped(const arf_t x) } else { - mp_srcptr tp; - mp_size_t tn; + nn_srcptr tp; + slong tn; double v; slong e = ARF_EXP(x); diff --git a/src/arb_hypgeom/test/main.c b/src/arb_hypgeom/test/main.c index 899c6b4436..23145d24e5 100644 --- a/src/arb_hypgeom/test/main.c +++ b/src/arb_hypgeom/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-1f1_integration.c" diff --git a/src/arb_mat/test/main.c b/src/arb_mat/test/main.c index 3a29277442..bdcdf4678d 100644 --- a/src/arb_mat/test/main.c +++ b/src/arb_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-addmul_rad_mag_fast.c" diff --git a/src/arb_poly.h b/src/arb_poly.h index 066c551df2..2cb7d94872 100644 --- a/src/arb_poly.h +++ b/src/arb_poly.h @@ -648,7 +648,7 @@ FLINT_FORCE_INLINE int n_zerobits(ulong e) { #if defined(__GNUC__) -# if defined(_LONG_LONG_LIMB) +# if FLINT_LONG_LONG return FLINT_BIT_COUNT(e) - __builtin_popcountll(e); # else return FLINT_BIT_COUNT(e) - __builtin_popcountl(e); @@ -670,10 +670,10 @@ n_zerobits(ulong e) FLINT_FORCE_INLINE slong poly_pow_length(slong poly_len, ulong exp, slong trunc) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, poly_len - 1, exp); add_ssaaaa(hi, lo, hi, lo, 0, 1); - if (hi != 0 || lo > (mp_limb_t) WORD_MAX) + if (hi != 0 || lo > (ulong) WORD_MAX) return trunc; return FLINT_MIN((slong) lo, trunc); } diff --git a/src/arb_poly/mullow_block.c b/src/arb_poly/mullow_block.c index ededabe218..675bd0caf6 100644 --- a/src/arb_poly/mullow_block.c +++ b/src/arb_poly/mullow_block.c @@ -184,7 +184,7 @@ _mag_vec_get_fmpz_2exp_blocks(fmpz * coeffs, } else { - mp_limb_t man; + ulong man; double c; man = MAG_MAN(cur); diff --git a/src/arb_poly/test/main.c b/src/arb_poly/test/main.c index 944d766774..89ea290744 100644 --- a/src/arb_poly/test/main.c +++ b/src/arb_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-acos_series.c" diff --git a/src/arb_types.h b/src/arb_types.h index eae88a25b3..c971bb9761 100644 --- a/src/arb_types.h +++ b/src/arb_types.h @@ -21,7 +21,7 @@ extern "C" { typedef struct { fmpz exp; - mp_limb_t man; + ulong man; } mag_struct; diff --git a/src/arf.h b/src/arf.h index 201d21d02b..e4f4c9403b 100644 --- a/src/arf.h +++ b/src/arf.h @@ -98,7 +98,7 @@ arf_rnd_to_mpfr(arf_rnd_t rnd) #define ARF_XSIZE(x) ((x)->size) /* Construct size field value from size in limbs and sign bit. */ -#define ARF_MAKE_XSIZE(size, sgnbit) ((((mp_size_t) size) << 1) | (sgnbit)) +#define ARF_MAKE_XSIZE(size, sgnbit) ((((slong) size) << 1) | (sgnbit)) /* The limb size, and the sign bit. */ #define ARF_SIZE(x) (ARF_XSIZE(x) >> 1) @@ -127,7 +127,7 @@ arf_rnd_to_mpfr(arf_rnd_t rnd) } while (0) -void _arf_promote(arf_t x, mp_size_t n); +void _arf_promote(arf_t x, slong n); void _arf_demote(arf_t x); @@ -152,8 +152,8 @@ void _arf_demote(arf_t x); /* Assumes non-special! */ #define ARF_GET_TOP_LIMB(lmb, x) \ do { \ - mp_srcptr __xptr; \ - mp_size_t __xn; \ + nn_srcptr __xptr; \ + slong __xn; \ ARF_GET_MPN_READONLY(__xptr, __xn, (x)); \ (lmb) = __xptr[__xn - 1]; \ } while (0) @@ -161,7 +161,7 @@ void _arf_demote(arf_t x); /* Get mpn pointer xptr for writing *exactly* xn limbs to x. */ #define ARF_GET_MPN_WRITE(xptr, xn, x) \ do { \ - mp_size_t __xn = (xn); \ + slong __xn = (xn); \ if ((__xn) <= ARF_NOPTR_LIMBS) \ { \ ARF_DEMOTE(x); \ @@ -175,9 +175,9 @@ void _arf_demote(arf_t x); } \ else if (ARF_PTR_ALLOC(x) < (__xn)) \ { \ - ARF_PTR_D(x) = (mp_ptr) \ + ARF_PTR_D(x) = (nn_ptr) \ flint_realloc(ARF_PTR_D(x), \ - (xn) * sizeof(mp_limb_t)); \ + (xn) * sizeof(ulong)); \ ARF_PTR_ALLOC(x) = (__xn); \ } \ xptr = ARF_PTR_D(x); \ @@ -420,7 +420,7 @@ arf_init_neg_shallow(arf_t z, const arf_t x) ARF_INLINE void arf_init_set_mag_shallow(arf_t y, const mag_t x) { - mp_limb_t t = MAG_MAN(x); + ulong t = MAG_MAN(x); ARF_XSIZE(y) = ARF_MAKE_XSIZE(t != 0, 0); ARF_EXP(y) = MAG_EXP(x); ARF_NOPTR_D(y)[0] = t << (FLINT_BITS - MAG_BITS); @@ -451,7 +451,7 @@ arf_mag_cmpabs(const mag_t x, const arf_t y) /* Assumes xn > 0, x[xn-1] != 0. */ /* TBD: 1, 2 limb versions */ -void arf_set_mpn(arf_t y, mp_srcptr x, mp_size_t xn, int sgnbit); +void arf_set_mpn(arf_t y, nn_srcptr x, slong xn, int sgnbit); ARF_INLINE void arf_set_mpz(arf_t y, const mpz_t x) @@ -475,10 +475,10 @@ arf_set_fmpz(arf_t y, const fmpz_t x) int _arf_set_round_ui(arf_t x, ulong v, int sgnbit, slong prec, arf_rnd_t rnd); -int _arf_set_round_uiui(arf_t z, slong * fix, mp_limb_t hi, mp_limb_t lo, int sgnbit, slong prec, arf_rnd_t rnd); +int _arf_set_round_uiui(arf_t z, slong * fix, ulong hi, ulong lo, int sgnbit, slong prec, arf_rnd_t rnd); int -_arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, +_arf_set_round_mpn(arf_t y, slong * exp_shift, nn_srcptr x, slong xn, int sgnbit, slong prec, arf_rnd_t rnd); ARF_INLINE int @@ -571,8 +571,8 @@ arf_bits(const arf_t x) return 0; else { - mp_srcptr xp; - mp_size_t xn; + nn_srcptr xp; + slong xn; slong c; ARF_GET_MPN_READONLY(xp, xn, x); @@ -672,10 +672,10 @@ void arf_frexp(arf_t man, fmpz_t exp, const arf_t x); void arf_get_fmpz_2exp(fmpz_t man, fmpz_t exp, const arf_t x); -int _arf_get_integer_mpn(mp_ptr y, mp_srcptr x, mp_size_t xn, slong exp); +int _arf_get_integer_mpn(nn_ptr y, nn_srcptr x, slong xn, slong exp); -int _arf_set_mpn_fixed(arf_t z, mp_srcptr xp, mp_size_t xn, - mp_size_t fixn, int negative, slong prec, arf_rnd_t rnd); +int _arf_set_mpn_fixed(arf_t z, nn_srcptr xp, slong xn, + slong fixn, int negative, slong prec, arf_rnd_t rnd); int arf_get_fmpz(fmpz_t z, const arf_t x, arf_rnd_t rnd); @@ -728,13 +728,13 @@ void arf_urandom(arf_t x, flint_rand_t state, slong bits, arf_rnd_t rnd); #define ARF_MUL_STACK_ALLOC 40 #define ARF_MUL_TLS_ALLOC 1000 -extern FLINT_TLS_PREFIX mp_ptr __arf_mul_tmp; +extern FLINT_TLS_PREFIX nn_ptr __arf_mul_tmp; extern FLINT_TLS_PREFIX slong __arf_mul_alloc; extern void _arf_mul_tmp_cleanup(void); #define ARF_MUL_TMP_DECL \ - mp_limb_t tmp_stack[ARF_MUL_STACK_ALLOC]; \ + ulong tmp_stack[ARF_MUL_STACK_ALLOC]; \ #define ARF_MUL_TMP_ALLOC(tmp, alloc) \ if (alloc <= ARF_MUL_STACK_ALLOC) \ @@ -749,14 +749,14 @@ extern void _arf_mul_tmp_cleanup(void); { \ flint_register_cleanup_function(_arf_mul_tmp_cleanup); \ } \ - __arf_mul_tmp = flint_realloc(__arf_mul_tmp, sizeof(mp_limb_t) * alloc); \ + __arf_mul_tmp = flint_realloc(__arf_mul_tmp, sizeof(ulong) * alloc); \ __arf_mul_alloc = alloc; \ } \ tmp = __arf_mul_tmp; \ } \ else \ { \ - tmp = flint_malloc(sizeof(mp_limb_t) * alloc); \ + tmp = flint_malloc(sizeof(ulong) * alloc); \ } #define ARF_MUL_TMP_FREE(tmp, alloc) \ @@ -824,13 +824,13 @@ arf_mul_fmpz(arf_ptr z, arf_srcptr x, const fmpz_t y, slong prec, arf_rnd_t rnd) #define ARF_ADD_STACK_ALLOC 40 #define ARF_ADD_TLS_ALLOC 1000 -extern FLINT_TLS_PREFIX mp_ptr __arf_add_tmp; +extern FLINT_TLS_PREFIX nn_ptr __arf_add_tmp; extern FLINT_TLS_PREFIX slong __arf_add_alloc; extern void _arf_add_tmp_cleanup(void); #define ARF_ADD_TMP_DECL \ - mp_limb_t tmp_stack[ARF_ADD_STACK_ALLOC]; \ + ulong tmp_stack[ARF_ADD_STACK_ALLOC]; \ #define ARF_ADD_TMP_ALLOC(tmp, alloc) \ if (alloc <= ARF_ADD_STACK_ALLOC) \ @@ -845,22 +845,22 @@ extern void _arf_add_tmp_cleanup(void); { \ flint_register_cleanup_function(_arf_add_tmp_cleanup); \ } \ - __arf_add_tmp = flint_realloc(__arf_add_tmp, sizeof(mp_limb_t) * alloc); \ + __arf_add_tmp = flint_realloc(__arf_add_tmp, sizeof(ulong) * alloc); \ __arf_add_alloc = alloc; \ } \ tmp = __arf_add_tmp; \ } \ else \ { \ - tmp = flint_malloc(sizeof(mp_limb_t) * alloc); \ + tmp = flint_malloc(sizeof(ulong) * alloc); \ } #define ARF_ADD_TMP_FREE(tmp, alloc) \ if (alloc > ARF_ADD_TLS_ALLOC) \ flint_free(tmp); -int _arf_add_mpn(arf_t z, mp_srcptr xp, mp_size_t xn, int xsgnbit, - const fmpz_t xexp, mp_srcptr yp, mp_size_t yn, int ysgnbit, +int _arf_add_mpn(arf_t z, nn_srcptr xp, slong xn, int xsgnbit, + const fmpz_t xexp, nn_srcptr yp, slong yn, int ysgnbit, flint_bitcnt_t shift, slong prec, arf_rnd_t rnd); int arf_add(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd); @@ -1062,8 +1062,8 @@ mag_fast_init_set_arf(mag_t y, const arf_t x) } else { - mp_srcptr xp; - mp_size_t xn; + nn_srcptr xp; + slong xn; ARF_GET_MPN_READONLY(xp, xn, x); @@ -1146,7 +1146,7 @@ arf_allocated_bytes(const arf_t x) slong size = fmpz_allocated_bytes(ARF_EXPREF(x)); if (ARF_HAS_PTR(x)) - size += ARF_PTR_ALLOC(x) * sizeof(mp_limb_t); + size += ARF_PTR_ALLOC(x) * sizeof(ulong); return size; } diff --git a/src/arf/add.c b/src/arf/add.c index 83ac0810c4..4c4439ca75 100644 --- a/src/arf/add.c +++ b/src/arf/add.c @@ -50,8 +50,8 @@ arf_add_special(arf_t z, const arf_t x, const arf_t y, slong prec, arf_rnd_t rnd int arf_add(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; + slong xn, yn; + nn_srcptr xptr, yptr; slong shift; if (arf_is_special(x) || arf_is_special(y)) @@ -77,9 +77,9 @@ arf_add(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) int arf_add_si(arf_ptr z, arf_srcptr x, slong y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; @@ -125,9 +125,9 @@ arf_add_si(arf_ptr z, arf_srcptr x, slong y, slong prec, arf_rnd_t rnd) int arf_add_ui(arf_ptr z, arf_srcptr x, ulong y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; @@ -169,9 +169,9 @@ arf_add_ui(arf_ptr z, arf_srcptr x, ulong y, slong prec, arf_rnd_t rnd) int arf_add_fmpz(arf_ptr z, arf_srcptr x, const fmpz_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; @@ -209,9 +209,9 @@ arf_add_fmpz(arf_ptr z, arf_srcptr x, const fmpz_t y, slong prec, arf_rnd_t rnd) int arf_add_fmpz_2exp(arf_ptr z, arf_srcptr x, const fmpz_t y, const fmpz_t exp, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit, inexact; fmpz_t yexp; slong shift; diff --git a/src/arf/add_mpn.c b/src/arf/add_mpn.c index ccecf0d0f2..0169ad23e3 100644 --- a/src/arf/add_mpn.c +++ b/src/arf/add_mpn.c @@ -12,7 +12,7 @@ #include "mpn_extras.h" #include "arf.h" -FLINT_TLS_PREFIX mp_ptr __arf_add_tmp = NULL; +FLINT_TLS_PREFIX nn_ptr __arf_add_tmp = NULL; FLINT_TLS_PREFIX slong __arf_add_alloc = 0; void _arf_add_tmp_cleanup(void) @@ -24,23 +24,23 @@ void _arf_add_tmp_cleanup(void) /* Assumptions: top limbs of x and y nonzero. */ int -_arf_add_mpn(arf_t z, mp_srcptr xp, mp_size_t xn, int xsgnbit, const fmpz_t xexp, - mp_srcptr yp, mp_size_t yn, int ysgnbit, flint_bitcnt_t shift, +_arf_add_mpn(arf_t z, nn_srcptr xp, slong xn, int xsgnbit, const fmpz_t xexp, + nn_srcptr yp, slong yn, int ysgnbit, flint_bitcnt_t shift, slong prec, arf_rnd_t rnd) { - mp_size_t wn, zn, zn_original, alloc, xbase, wbase; - mp_size_t shift_limbs; + slong wn, zn, zn_original, alloc, xbase, wbase; + slong shift_limbs; flint_bitcnt_t shift_bits; int inexact; slong fix; - mp_limb_t cy; - mp_ptr tmp; + ulong cy; + nn_ptr tmp; ARF_ADD_TMP_DECL /* very fast case */ if (xn == 1 && yn == 1 && shift < FLINT_BITS - 1) { - mp_limb_t hi, lo, xhi, xlo, yhi, ylo; + ulong hi, lo, xhi, xlo, yhi, ylo; xhi = xp[0]; yhi = yp[0]; @@ -93,7 +93,7 @@ _arf_add_mpn(arf_t z, mp_srcptr xp, mp_size_t xn, int xsgnbit, const fmpz_t xexp /* somewhat fast case */ if (xn <= 2 && yn <= 2 && shift <= 2 * FLINT_BITS) { - mp_limb_t t[5], xtmp[4], ytmp[4], yhi, ylo; + ulong t[5], xtmp[4], ytmp[4], yhi, ylo; slong fix2; xtmp[0] = 0; diff --git a/src/arf/addmul.c b/src/arf/addmul.c index 83a638948a..bb024b6747 100644 --- a/src/arf/addmul.c +++ b/src/arf/addmul.c @@ -15,9 +15,9 @@ int arf_addmul(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, tn, alloc; - mp_srcptr xptr, yptr, zptr; - mp_ptr tptr, tptr2; + slong xn, yn, zn, tn, alloc; + nn_srcptr xptr, yptr, zptr; + nn_ptr tptr, tptr2; fmpz_t texp; slong shift; int tsgnbit, inexact; @@ -80,9 +80,9 @@ arf_addmul(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) int arf_addmul_mpz(arf_ptr z, arf_srcptr x, const mpz_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, tn, alloc; - mp_srcptr xptr, yptr, zptr; - mp_ptr tptr, tptr2; + slong xn, yn, zn, tn, alloc; + nn_srcptr xptr, yptr, zptr; + nn_ptr tptr, tptr2; fmpz_t texp, yexp; slong shift; int tsgnbit, ysgnbit, inexact; diff --git a/src/arf/approx_dot.c b/src/arf/approx_dot.c index 81e0c8d890..0c086345e8 100644 --- a/src/arf/approx_dot.c +++ b/src/arf/approx_dot.c @@ -13,13 +13,13 @@ #include "mpn_extras.h" void -_arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, +_arb_dot_addmul_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, nn_srcptr yptr, slong yn, int negative, flint_bitcnt_t shift); void -_arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, - mp_srcptr xptr, mp_size_t xn, +_arb_dot_add_generic(nn_ptr sum, nn_ptr serr, nn_ptr tmp, slong sn, + nn_srcptr xptr, slong xn, int negative, flint_bitcnt_t shift); void @@ -63,12 +63,12 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong slong i, j, nonzero, padding, extend; slong xexp, yexp, exp, max_exp, min_exp, sum_exp; int xnegative, ynegative; - mp_size_t xn, yn, sn, alloc; + slong xn, yn, sn, alloc; flint_bitcnt_t shift; arf_srcptr xi, yi; arf_srcptr xm, ym; - mp_limb_t serr; /* Sum over arithmetic errors - not used, but need dummy for calls */ - mp_ptr tmp, sum; /* Workspace */ + ulong serr; /* Sum over arithmetic errors - not used, but need dummy for calls */ + nn_ptr tmp, sum; /* Workspace */ ARF_ADD_TMP_DECL; /* todo: fast fma and fmma (len=2) code */ @@ -211,7 +211,7 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong if (!arf_is_special(xm)) { - mp_srcptr xptr; + nn_srcptr xptr; xexp = ARF_EXP(xm); xn = ARF_SIZE(xm); @@ -255,7 +255,7 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong #if 0 else if (xn == 1 && yn == 1 && sn == 2 && shift < FLINT_BITS) /* Fastest path. */ { - mp_limb_t hi, lo, x0, y0; + ulong hi, lo, x0, y0; x0 = ARF_NOPTR_D(xm)[0]; y0 = ARF_NOPTR_D(ym)[0]; @@ -272,8 +272,8 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong } else if (xn == 2 && yn == 2 && shift < FLINT_BITS && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; x0 = ARF_NOPTR_D(xm)[0]; x1 = ARF_NOPTR_D(xm)[1]; @@ -304,8 +304,8 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong #endif else if (xn <= 2 && yn <= 2 && sn <= 3) { - mp_limb_t x1, x0, y1, y0; - mp_limb_t u3, u2, u1, u0; + ulong x1, x0, y1, y0; + ulong u3, u2, u1, u0; if (xn == 1 && yn == 1) { @@ -403,7 +403,7 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong } else { - mp_srcptr xptr, yptr; + nn_srcptr xptr, yptr; xptr = (xn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(xm) : ARF_PTR_D(xm); yptr = (yn <= ARF_NOPTR_LIMBS) ? ARF_NOPTR_D(ym) : ARF_PTR_D(ym); @@ -423,7 +423,7 @@ arf_approx_dot(arf_t res, const arf_t initial, int subtract, arf_srcptr x, slong if (sum[sn - 1] == 0) { slong sum_exp2; - mp_size_t sn2; + slong sn2; sn2 = sn; sum_exp2 = sum_exp; diff --git a/src/arf/cmp.c b/src/arf/cmp.c index cf9e47ae8a..21322fffba 100644 --- a/src/arf/cmp.c +++ b/src/arf/cmp.c @@ -15,8 +15,8 @@ int arf_cmp(const arf_t x, const arf_t y) { int xs, ys, ec, mc; - mp_size_t xn, yn; - mp_srcptr xp, yp; + slong xn, yn; + nn_srcptr xp, yp; if (arf_is_special(x) || arf_is_special(y)) { @@ -85,8 +85,8 @@ int arf_cmpabs(const arf_t x, const arf_t y) { int ec, mc; - mp_size_t xn, yn; - mp_srcptr xp, yp; + slong xn, yn; + nn_srcptr xp, yp; if (arf_is_special(x) || arf_is_special(y)) { diff --git a/src/arf/complex_mul.c b/src/arf/complex_mul.c index 1ed2f736db..6e2f1c0d5d 100644 --- a/src/arf/complex_mul.c +++ b/src/arf/complex_mul.c @@ -79,10 +79,10 @@ int arf_complex_mul(arf_t e, arf_t f, const arf_t a, const arf_t b, const arf_t c, const arf_t d, slong prec, arf_rnd_t rnd) { - mp_srcptr ap, bp, cp, dp; + nn_srcptr ap, bp, cp, dp; int asgn, bsgn, csgn, dsgn, inex1, inex2; - mp_ptr tmp, acp, bdp, adp, bcp; - mp_size_t an, bn, cn, dn, acn, bdn, adn, bcn, alloc; + nn_ptr tmp, acp, bdp, adp, bcp; + slong an, bn, cn, dn, acn, bdn, adn, bcn, alloc; slong shift; slong aexp, bexp, cexp, dexp; fmpz texp, uexp; @@ -238,10 +238,10 @@ int arf_complex_sqr(arf_t e, arf_t f, } else { - mp_srcptr ap, bp; + nn_srcptr ap, bp; int inex1, inex2; - mp_ptr tmp, aap, bbp; - mp_size_t an, bn, aan, bbn, alloc; + nn_ptr tmp, aap, bbp; + slong an, bn, aan, bbn, alloc; slong shift; slong aexp, bexp; fmpz texp, uexp; @@ -291,7 +291,7 @@ int arf_complex_sqr(arf_t e, arf_t f, TMP_START; - tmp = TMP_ALLOC(alloc * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(alloc * sizeof(ulong)); aap = tmp; bbp = tmp + aan; diff --git a/src/arf/debug.c b/src/arf/debug.c index 24abc5c28a..0a14cf1d22 100644 --- a/src/arf/debug.c +++ b/src/arf/debug.c @@ -14,8 +14,8 @@ void arf_debug(const arf_t x) { - mp_srcptr d; - mp_size_t n; + nn_srcptr d; + slong n; slong i; flint_printf("{exp="); diff --git a/src/arf/div.c b/src/arf/div.c index 7da47d4ae1..e2ba0d82a7 100644 --- a/src/arf/div.c +++ b/src/arf/div.c @@ -12,7 +12,7 @@ #include "mpn_extras.h" #include "arf.h" -void __gmpn_div_q(mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr); +void __gmpn_div_q(nn_ptr, nn_srcptr, slong, nn_srcptr, slong, nn_ptr); void arf_div_special(arf_t z, const arf_t x, const arf_t y) @@ -36,10 +36,10 @@ arf_div_special(arf_t z, const arf_t x, const arf_t y) int arf_div(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, sn, tn, alloc; - mp_srcptr xptr, yptr; - mp_ptr tmp; - mp_ptr tptr, zptr; + slong xn, yn, zn, sn, tn, alloc; + nn_srcptr xptr, yptr; + nn_ptr tmp; + nn_ptr tptr, zptr; int inexact; slong fix, fix2; ARF_MUL_TMP_DECL diff --git a/src/arf/equal.c b/src/arf/equal.c index d90848a9fc..368190e43f 100644 --- a/src/arf/equal.c +++ b/src/arf/equal.c @@ -14,7 +14,7 @@ int arf_equal(const arf_t x, const arf_t y) { - mp_size_t n; + slong n; if (x == y) return 1; diff --git a/src/arf/fma.c b/src/arf/fma.c index 238af25ff4..a2bdd2be9e 100644 --- a/src/arf/fma.c +++ b/src/arf/fma.c @@ -15,9 +15,9 @@ int arf_fma(arf_ptr res, arf_srcptr x, arf_srcptr y, arf_srcptr z, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, tn, alloc; - mp_srcptr xptr, yptr, zptr; - mp_ptr tptr, tptr2; + slong xn, yn, zn, tn, alloc; + nn_srcptr xptr, yptr, zptr; + nn_ptr tptr, tptr2; fmpz_t texp; slong shift; int tsgnbit, inexact; diff --git a/src/arf/get.c b/src/arf/get.c index 84180b6006..9ad108f66a 100644 --- a/src/arf/get.c +++ b/src/arf/get.c @@ -62,8 +62,8 @@ arf_get_d(const arf_t x, arf_rnd_t rnd) else { arf_t t; - mp_srcptr tp; - mp_size_t tn; + nn_srcptr tp; + slong tn; double v; /* also catches bignum exponents */ @@ -81,7 +81,7 @@ arf_get_d(const arf_t x, arf_rnd_t rnd) mpfr_t xx; ARF_GET_MPN_READONLY(tp, tn, x); - xx->_mpfr_d = (mp_ptr) tp; + xx->_mpfr_d = (nn_ptr) tp; xx->_mpfr_prec = tn * FLINT_BITS; xx->_mpfr_sign = ARF_SGNBIT(x) ? -1 : 1; xx->_mpfr_exp = ARF_EXP(x); @@ -159,8 +159,8 @@ arf_get_fmpz_2exp(fmpz_t man, fmpz_t exp, const arf_t x) } else { - mp_srcptr xptr; - mp_size_t xn; + nn_srcptr xptr; + slong xn; int shift; ARF_GET_MPN_READONLY(xptr, xn, x); @@ -199,11 +199,11 @@ arf_get_fmpz(fmpz_t z, const arf_t x, arf_rnd_t rnd) { slong exp; int negative, inexact, value, roundup; - mp_size_t xn, zn; - mp_srcptr xp; + slong xn, zn; + nn_srcptr xp; mpz_ptr zz; - mp_ptr zp; - mp_limb_t v, v2, v3; + nn_ptr zp; + ulong v, v2, v3; if (arf_is_special(x)) { @@ -407,13 +407,13 @@ arf_get_fmpz_fixed_si(fmpz_t y, const arf_t x, slong e) } int -_arf_get_integer_mpn(mp_ptr y, mp_srcptr x, mp_size_t xn, slong exp) +_arf_get_integer_mpn(nn_ptr y, nn_srcptr x, slong xn, slong exp) { slong bot_exp = exp - xn * FLINT_BITS; if (bot_exp >= 0) { - mp_size_t bot_limbs; + slong bot_limbs; flint_bitcnt_t bot_bits; bot_limbs = bot_exp / FLINT_BITS; @@ -436,9 +436,9 @@ _arf_get_integer_mpn(mp_ptr y, mp_srcptr x, mp_size_t xn, slong exp) } else { - mp_size_t top_limbs; + slong top_limbs; flint_bitcnt_t top_bits; - mp_limb_t cy; + ulong cy; top_limbs = exp / FLINT_BITS; top_bits = exp % FLINT_BITS; @@ -473,7 +473,7 @@ arf_get_mag(mag_t y, const arf_t x) } else { - mp_limb_t t, u; + ulong t, u; ARF_GET_TOP_LIMB(t, x); t = (t >> (FLINT_BITS - MAG_BITS)) + LIMB_ONE; @@ -504,7 +504,7 @@ arf_get_mag_lower(mag_t y, const arf_t x) } else { - mp_limb_t t; + ulong t; ARF_GET_TOP_LIMB(t, x); MAG_MAN(y) = t >> (FLINT_BITS - MAG_BITS); _fmpz_set_fast(MAG_EXPREF(y), ARF_EXPREF(x)); @@ -576,12 +576,12 @@ arf_get_mpfr(mpfr_t x, const arf_t y, mpfr_rnd_t rnd) else { __mpfr_struct t; - mp_size_t n; - mp_srcptr d; + slong n; + nn_srcptr d; ARF_GET_MPN_READONLY(d, n, y); - t._mpfr_d = (mp_ptr) d; + t._mpfr_d = (nn_ptr) d; t._mpfr_exp = ARF_EXP(y); t._mpfr_prec = n * FLINT_BITS; t._mpfr_sign = ARF_SGNBIT(y) ? -1 : 1; diff --git a/src/arf/is_int.c b/src/arf/is_int.c index e80bdb45a2..1c43043b9c 100644 --- a/src/arf/is_int.c +++ b/src/arf/is_int.c @@ -14,8 +14,8 @@ int arf_is_int(const arf_t x) { - mp_size_t xn; - mp_srcptr xp; + slong xn; + nn_srcptr xp; slong exp, c; exp = ARF_EXP(x); diff --git a/src/arf/memory_manager.c b/src/arf/memory_manager.c index 383685ec74..6a77a1f0da 100644 --- a/src/arf/memory_manager.c +++ b/src/arf/memory_manager.c @@ -15,7 +15,7 @@ #define ARF_MAX_CACHE_LIMBS 64 -FLINT_TLS_PREFIX mp_ptr * arf_free_arr = NULL; +FLINT_TLS_PREFIX nn_ptr * arf_free_arr = NULL; FLINT_TLS_PREFIX ulong arf_free_num = 0; FLINT_TLS_PREFIX ulong arf_free_alloc = 0; FLINT_TLS_PREFIX int arf_have_registered_cleanup = 0; @@ -34,12 +34,12 @@ void _arf_cleanup(void) } void -_arf_promote(arf_t x, mp_size_t n) +_arf_promote(arf_t x, slong n) { if (ARF_USE_CACHE && n <= ARF_MAX_CACHE_LIMBS && arf_free_num != 0) { - mp_ptr ptr; - mp_size_t alloc; + nn_ptr ptr; + slong alloc; ptr = arf_free_arr[--arf_free_num]; alloc = ptr[0]; @@ -51,7 +51,7 @@ _arf_promote(arf_t x, mp_size_t n) } else { - ptr = flint_realloc(ptr, n * sizeof(mp_limb_t)); + ptr = flint_realloc(ptr, n * sizeof(ulong)); ARF_PTR_ALLOC(x) = n; ARF_PTR_D(x) = ptr; } @@ -59,15 +59,15 @@ _arf_promote(arf_t x, mp_size_t n) else { ARF_PTR_ALLOC(x) = n; - ARF_PTR_D(x) = flint_malloc(n * sizeof(mp_limb_t)); + ARF_PTR_D(x) = flint_malloc(n * sizeof(ulong)); } } void _arf_demote(arf_t x) { - mp_ptr ptr; - mp_size_t alloc; + nn_ptr ptr; + slong alloc; alloc = ARF_PTR_ALLOC(x); ptr = ARF_PTR_D(x); @@ -84,7 +84,7 @@ _arf_demote(arf_t x) arf_free_alloc = FLINT_MAX(64, arf_free_alloc * 2); arf_free_arr = flint_realloc(arf_free_arr, - arf_free_alloc * sizeof(mp_ptr)); + arf_free_alloc * sizeof(nn_ptr)); } ptr[0] = alloc; diff --git a/src/arf/mul_rnd_any.c b/src/arf/mul_rnd_any.c index a1d5782279..e392a43749 100644 --- a/src/arf/mul_rnd_any.c +++ b/src/arf/mul_rnd_any.c @@ -16,7 +16,7 @@ int arf_mul_rnd_any(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; + slong xn, yn; slong fix; int sgnbit, inexact; @@ -29,7 +29,7 @@ arf_mul_rnd_any(arf_ptr z, arf_srcptr x, arf_srcptr y, if (yn > xn) { FLINT_SWAP(arf_srcptr, x, y); - FLINT_SWAP(mp_size_t, xn, yn); + FLINT_SWAP(slong, xn, yn); } /* Either operand is a special value. */ @@ -40,9 +40,9 @@ arf_mul_rnd_any(arf_ptr z, arf_srcptr x, arf_srcptr y, } else { - mp_size_t zn, alloc; - mp_srcptr xptr, yptr; - mp_ptr tmp; + slong zn, alloc; + nn_srcptr xptr, yptr; + nn_ptr tmp; ARF_MUL_TMP_DECL ARF_GET_MPN_READONLY(xptr, xn, x); diff --git a/src/arf/mul_rnd_down.c b/src/arf/mul_rnd_down.c index b7be17d637..485d99fb37 100644 --- a/src/arf/mul_rnd_down.c +++ b/src/arf/mul_rnd_down.c @@ -15,11 +15,11 @@ int arf_mul_rnd_down(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec) { - mp_size_t xn, yn, zn; - mp_limb_t hi, lo; + slong xn, yn, zn; + ulong hi, lo; slong expfix; int sgnbit, ret, fix; - mp_ptr zptr; + nn_ptr zptr; xn = ARF_XSIZE(x); yn = ARF_XSIZE(y); @@ -30,7 +30,7 @@ arf_mul_rnd_down(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec) if (yn > xn) { FLINT_SWAP(arf_srcptr, x, y); - FLINT_SWAP(mp_size_t, xn, yn); + FLINT_SWAP(slong, xn, yn); } /* Either operand is a special value. */ @@ -107,8 +107,8 @@ arf_mul_rnd_down(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec) } else if (xn == 2) { - mp_limb_t zz[4]; - mp_limb_t x1, x0, y1, y0; + ulong zz[4]; + ulong x1, x0, y1, y0; x0 = ARF_NOPTR_D(x)[0]; x1 = ARF_NOPTR_D(x)[1]; @@ -183,9 +183,9 @@ arf_mul_rnd_down(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec) } else { - mp_size_t zn, alloc; - mp_srcptr xptr, yptr; - mp_ptr tmp; + slong zn, alloc; + nn_srcptr xptr, yptr; + nn_ptr tmp; ARF_MUL_TMP_DECL ARF_GET_MPN_READONLY(xptr, xn, x); @@ -207,7 +207,7 @@ arf_mul_rnd_down(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec) int arf_mul_mpz(arf_ptr z, arf_srcptr x, const mpz_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; + slong xn, yn; slong fix, shift; int sgnbit, inexact; @@ -234,9 +234,9 @@ arf_mul_mpz(arf_ptr z, arf_srcptr x, const mpz_t y, slong prec, arf_rnd_t rnd) } else { - mp_size_t zn, alloc; - mp_srcptr xptr, yptr; - mp_ptr tmp; + slong zn, alloc; + nn_srcptr xptr, yptr; + nn_ptr tmp; ARF_MUL_TMP_DECL ARF_GET_MPN_READONLY(xptr, xn, x); diff --git a/src/arf/mul_tmp_cleanup.c b/src/arf/mul_tmp_cleanup.c index e91124f1ce..905818abc4 100644 --- a/src/arf/mul_tmp_cleanup.c +++ b/src/arf/mul_tmp_cleanup.c @@ -11,7 +11,7 @@ #include "arf.h" -FLINT_TLS_PREFIX mp_ptr __arf_mul_tmp = NULL; +FLINT_TLS_PREFIX nn_ptr __arf_mul_tmp = NULL; FLINT_TLS_PREFIX slong __arf_mul_alloc = 0; void _arf_mul_tmp_cleanup(void) diff --git a/src/arf/mul_via_mpfr.c b/src/arf/mul_via_mpfr.c index 0b19b95e06..0c463617ac 100644 --- a/src/arf/mul_via_mpfr.c +++ b/src/arf/mul_via_mpfr.c @@ -17,9 +17,9 @@ int arf_mul_via_mpfr(arf_t z, const arf_t x, const arf_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, val; - mp_srcptr xptr, yptr; - mp_ptr tmp, zptr; + slong xn, yn, zn, val; + nn_srcptr xptr, yptr; + nn_ptr tmp, zptr; mpfr_t xf, yf, zf; int ret; ARF_MUL_TMP_DECL @@ -43,7 +43,7 @@ arf_mul_via_mpfr(arf_t z, const arf_t x, const arf_t y, zf->_mpfr_sign = 1; zf->_mpfr_exp = 0; - xf->_mpfr_d = (mp_ptr) xptr; + xf->_mpfr_d = (nn_ptr) xptr; xf->_mpfr_prec = xn * FLINT_BITS; xf->_mpfr_sign = ARF_SGNBIT(x) ? -1 : 1; xf->_mpfr_exp = 0; @@ -54,7 +54,7 @@ arf_mul_via_mpfr(arf_t z, const arf_t x, const arf_t y, } else { - yf->_mpfr_d = (mp_ptr) yptr; + yf->_mpfr_d = (nn_ptr) yptr; yf->_mpfr_prec = yn * FLINT_BITS; yf->_mpfr_sign = ARF_SGNBIT(y) ? -1 : 1; yf->_mpfr_exp = 0; diff --git a/src/arf/neg_round.c b/src/arf/neg_round.c index 43b1e56591..9d4cf8b889 100644 --- a/src/arf/neg_round.c +++ b/src/arf/neg_round.c @@ -23,8 +23,8 @@ arf_neg_round(arf_t y, const arf_t x, slong prec, arf_rnd_t rnd) { int inexact; slong fix; - mp_size_t xn; - mp_srcptr xptr; + slong xn; + nn_srcptr xptr; if (y == x) { diff --git a/src/arf/root.c b/src/arf/root.c index 69707dabb0..50128c1ef3 100644 --- a/src/arf/root.c +++ b/src/arf/root.c @@ -16,9 +16,9 @@ int arf_root(arf_ptr z, arf_srcptr x, ulong k, slong prec, arf_rnd_t rnd) { - mp_size_t xn, zn, val; - mp_srcptr xptr; - mp_ptr tmp, zptr; + slong xn, zn, val; + nn_srcptr xptr; + nn_ptr tmp, zptr; mpfr_t xf, zf; fmpz_t q, r; int inexact; @@ -62,12 +62,12 @@ arf_root(arf_ptr z, arf_srcptr x, ulong k, slong prec, arf_rnd_t rnd) ARF_GET_MPN_READONLY(xptr, xn, x); zn = (prec + FLINT_BITS - 1) / FLINT_BITS; - zf->_mpfr_d = tmp = flint_malloc(zn * sizeof(mp_limb_t)); + zf->_mpfr_d = tmp = flint_malloc(zn * sizeof(ulong)); zf->_mpfr_prec = prec; zf->_mpfr_sign = 1; zf->_mpfr_exp = 0; - xf->_mpfr_d = (mp_ptr) xptr; + xf->_mpfr_d = (nn_ptr) xptr; xf->_mpfr_prec = xn * FLINT_BITS; xf->_mpfr_sign = 1; xf->_mpfr_exp = fmpz_get_ui(r); diff --git a/src/arf/rsqrt.c b/src/arf/rsqrt.c index 92767628b1..f43ab75ba7 100644 --- a/src/arf/rsqrt.c +++ b/src/arf/rsqrt.c @@ -16,9 +16,9 @@ int arf_rsqrt(arf_ptr z, arf_srcptr x, slong prec, arf_rnd_t rnd) { - mp_size_t xn, zn, val; - mp_srcptr xptr; - mp_ptr tmp, zptr; + slong xn, zn, val; + nn_srcptr xptr; + nn_ptr tmp, zptr; mpfr_t xf, zf; int inexact, odd_exp; ARF_MUL_TMP_DECL @@ -61,7 +61,7 @@ arf_rsqrt(arf_ptr z, arf_srcptr x, slong prec, arf_rnd_t rnd) zf->_mpfr_sign = 1; zf->_mpfr_exp = 0; - xf->_mpfr_d = (mp_ptr) xptr; + xf->_mpfr_d = (nn_ptr) xptr; xf->_mpfr_prec = xn * FLINT_BITS; xf->_mpfr_sign = 1; xf->_mpfr_exp = odd_exp; diff --git a/src/arf/set.c b/src/arf/set.c index 83196c5c8e..1b1929e286 100644 --- a/src/arf/set.c +++ b/src/arf/set.c @@ -33,9 +33,9 @@ arf_set(arf_t y, const arf_t x) } else { - mp_ptr yptr; - mp_srcptr xptr; - mp_size_t n; + nn_ptr yptr; + nn_srcptr xptr; + slong n; ARF_GET_MPN_READONLY(xptr, n, x); ARF_GET_MPN_WRITE(yptr, n, y); @@ -51,9 +51,9 @@ void arf_set_d(arf_t x, double v) { #if FLINT_BITS == 64 - mp_limb_t h, sign, exp, frac; + ulong h, sign, exp, frac; slong real_exp, real_man; - union { double uf; mp_limb_t ul; } u; + union { double uf; ulong ul; } u; u.uf = v; h = u.ul; @@ -103,7 +103,7 @@ arf_set_d(arf_t x, double v) arf_set_si_2exp_si(x, real_man, real_exp); #else mpfr_t t; - mp_limb_t tmp[2]; + ulong tmp[2]; t->_mpfr_prec = 53; t->_mpfr_sign = 1; @@ -132,19 +132,19 @@ arf_set_mpfr(arf_t x, const mpfr_t y) } else { - mp_size_t n = (y->_mpfr_prec + FLINT_BITS - 1) / FLINT_BITS; + slong n = (y->_mpfr_prec + FLINT_BITS - 1) / FLINT_BITS; arf_set_mpn(x, y->_mpfr_d, n, y->_mpfr_sign < 0); fmpz_set_si(ARF_EXPREF(x), y->_mpfr_exp); } } void -arf_set_mpn(arf_t y, mp_srcptr x, mp_size_t xn, int sgnbit) +arf_set_mpn(arf_t y, nn_srcptr x, slong xn, int sgnbit) { unsigned int leading; - mp_size_t yn, xn1; - mp_ptr yptr; - mp_limb_t bot; + slong yn, xn1; + nn_ptr yptr; + ulong bot; xn1 = xn; @@ -182,8 +182,8 @@ arf_set_mpn(arf_t y, mp_srcptr x, mp_size_t xn, int sgnbit) } int -_arf_set_mpn_fixed(arf_t z, mp_srcptr xp, mp_size_t xn, - mp_size_t fixn, int negative, slong prec, arf_rnd_t rnd) +_arf_set_mpn_fixed(arf_t z, nn_srcptr xp, slong xn, + slong fixn, int negative, slong prec, arf_rnd_t rnd) { slong exp, exp_shift; int inexact; diff --git a/src/arf/set_round.c b/src/arf/set_round.c index 6a5aa7d42d..f9334d9e67 100644 --- a/src/arf/set_round.c +++ b/src/arf/set_round.c @@ -24,12 +24,12 @@ arf_set_round(arf_t y, const arf_t x, slong prec, arf_rnd_t rnd) { int inexact; slong fix; - mp_size_t xn; - mp_srcptr xptr; + slong xn; + nn_srcptr xptr; if (y == x) { - mp_ptr xtmp; + nn_ptr xtmp; TMP_INIT; ARF_GET_MPN_READONLY(xptr, xn, x); @@ -47,7 +47,7 @@ arf_set_round(arf_t y, const arf_t x, slong prec, arf_rnd_t rnd) /* inexact */ TMP_START; - xtmp = TMP_ALLOC(xn * sizeof(mp_limb_t)); + xtmp = TMP_ALLOC(xn * sizeof(ulong)); flint_mpn_copyi(xtmp, xptr, xn); inexact = _arf_set_round_mpn(y, &fix, xtmp, xn, ARF_SGNBIT(x), prec, rnd); _fmpz_add_fast(ARF_EXPREF(y), ARF_EXPREF(x), fix); @@ -67,14 +67,14 @@ arf_set_round(arf_t y, const arf_t x, slong prec, arf_rnd_t rnd) } int -_arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, +_arf_set_round_mpn(arf_t y, slong * exp_shift, nn_srcptr x, slong xn, int sgnbit, slong prec, arf_rnd_t rnd) { unsigned int leading; flint_bitcnt_t exp, bc, val, val_bits; - mp_size_t yn, val_limbs; - mp_ptr yptr; - mp_limb_t t; + slong yn, val_limbs; + nn_ptr yptr; + ulong t; int increment, inexact; /* Compute the total bit length of x. */ @@ -121,7 +121,7 @@ _arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, { /* The bit to the right of the truncation point determines the rounding direction. */ - mp_size_t exc_limbs = (exp - prec - 1) / FLINT_BITS; + slong exc_limbs = (exp - prec - 1) / FLINT_BITS; flint_bitcnt_t exc_bits = (exp - prec - 1) % FLINT_BITS; increment = (x[exc_limbs] >> exc_bits) & 1; @@ -234,7 +234,7 @@ _arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, } \ else \ { \ - mp_limb_t hi_mask, lo_mask, rndn_mask, __t, __u; \ + ulong hi_mask, lo_mask, rndn_mask, __t, __u; \ hi_mask = LIMB_ONES << (FLINT_BITS - prec); \ __t = v & hi_mask; \ inexact = (__t != v); \ @@ -287,7 +287,7 @@ _arf_set_round_ui(arf_t x, ulong v, int sgnbit, slong prec, arf_rnd_t rnd) } int -_arf_set_round_uiui(arf_t z, slong * fix, mp_limb_t hi, mp_limb_t lo, int sgnbit, slong prec, arf_rnd_t rnd) +_arf_set_round_uiui(arf_t z, slong * fix, ulong hi, ulong lo, int sgnbit, slong prec, arf_rnd_t rnd) { int leading, trailing, bc, inexact, zn, up, exp; @@ -378,7 +378,7 @@ _arf_set_round_uiui(arf_t z, slong * fix, mp_limb_t hi, mp_limb_t lo, int sgnbit if (up) { - mp_limb_t t, ovf; + ulong t, ovf; t = lo + (LIMB_ONE << (FLINT_BITS - prec)); ovf = (t == 0); leading -= ovf; diff --git a/src/arf/sosq.c b/src/arf/sosq.c index 1f58c33358..129afafbdd 100644 --- a/src/arf/sosq.c +++ b/src/arf/sosq.c @@ -32,10 +32,10 @@ arf_sosq(arf_t res, const arf_t a, const arf_t b, slong prec, arf_rnd_t rnd) } else { - mp_srcptr ap, bp; + nn_srcptr ap, bp; int inexact; - mp_ptr tmp, aap, bbp; - mp_size_t an, bn, aan, bbn, alloc; + nn_ptr tmp, aap, bbp; + slong an, bn, aan, bbn, alloc; slong shift; fmpz_t texp, uexp; ARF_MUL_TMP_DECL diff --git a/src/arf/sqrt.c b/src/arf/sqrt.c index 0c4f7779cb..2183f51553 100644 --- a/src/arf/sqrt.c +++ b/src/arf/sqrt.c @@ -16,9 +16,9 @@ int arf_sqrt(arf_ptr z, arf_srcptr x, slong prec, arf_rnd_t rnd) { - mp_size_t xn, zn, val; - mp_srcptr xptr; - mp_ptr tmp, zptr; + slong xn, zn, val; + nn_srcptr xptr; + nn_ptr tmp, zptr; mpfr_t xf, zf; int inexact, odd_exp; ARF_MUL_TMP_DECL @@ -57,7 +57,7 @@ arf_sqrt(arf_ptr z, arf_srcptr x, slong prec, arf_rnd_t rnd) zf->_mpfr_sign = 1; zf->_mpfr_exp = 0; - xf->_mpfr_d = (mp_ptr) xptr; + xf->_mpfr_d = (nn_ptr) xptr; xf->_mpfr_prec = xn * FLINT_BITS; xf->_mpfr_sign = 1; xf->_mpfr_exp = odd_exp; diff --git a/src/arf/sub.c b/src/arf/sub.c index 8d07d1255b..4e498291be 100644 --- a/src/arf/sub.c +++ b/src/arf/sub.c @@ -50,8 +50,8 @@ arf_sub_special(arf_t z, const arf_t x, const arf_t y, slong prec, arf_rnd_t rnd int arf_sub(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; + slong xn, yn; + nn_srcptr xptr, yptr; slong shift; if (arf_is_special(x) || arf_is_special(y)) @@ -75,9 +75,9 @@ arf_sub(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) int arf_sub_si(arf_ptr z, arf_srcptr x, slong y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; @@ -126,9 +126,9 @@ arf_sub_si(arf_ptr z, arf_srcptr x, slong y, slong prec, arf_rnd_t rnd) int arf_sub_ui(arf_ptr z, arf_srcptr x, ulong y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; @@ -173,9 +173,9 @@ arf_sub_ui(arf_ptr z, arf_srcptr x, ulong y, slong prec, arf_rnd_t rnd) int arf_sub_fmpz(arf_ptr z, arf_srcptr x, const fmpz_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn; - mp_srcptr xptr, yptr; - mp_limb_t ytmp; + slong xn, yn; + nn_srcptr xptr, yptr; + ulong ytmp; int xsgnbit, ysgnbit; fmpz yexp; slong shift; diff --git a/src/arf/submul.c b/src/arf/submul.c index 03cd0e46ac..5b917a9960 100644 --- a/src/arf/submul.c +++ b/src/arf/submul.c @@ -15,9 +15,9 @@ int arf_submul(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, tn, alloc; - mp_srcptr xptr, yptr, zptr; - mp_ptr tptr, tptr2; + slong xn, yn, zn, tn, alloc; + nn_srcptr xptr, yptr, zptr; + nn_ptr tptr, tptr2; fmpz_t texp; slong shift; int tsgnbit, inexact; @@ -80,9 +80,9 @@ arf_submul(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd) int arf_submul_mpz(arf_ptr z, arf_srcptr x, const mpz_t y, slong prec, arf_rnd_t rnd) { - mp_size_t xn, yn, zn, tn, alloc; - mp_srcptr xptr, yptr, zptr; - mp_ptr tptr, tptr2; + slong xn, yn, zn, tn, alloc; + nn_srcptr xptr, yptr, zptr; + nn_ptr tptr, tptr2; fmpz_t texp, yexp; slong shift; int tsgnbit, ysgnbit, inexact; diff --git a/src/arf/test/main.c b/src/arf/test/main.c index d5eb9762b6..22d05bd82b 100644 --- a/src/arf/test/main.c +++ b/src/arf/test/main.c @@ -9,8 +9,6 @@ (at your option) any later version. See . */ -#include -#include #include /* Include functions *********************************************************/ diff --git a/src/arf/test/t-set_round_ui.c b/src/arf/test/t-set_round_ui.c index e55a40c186..b0cb0fd653 100644 --- a/src/arf/test/t-set_round_ui.c +++ b/src/arf/test/t-set_round_ui.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(arf_set_round_ui, state) arf_t x, y; slong prec, fix1; int ret1, ret2; - mp_limb_t t; + ulong t; arf_rnd_t rnd; prec = 2 + n_randint(state, 1000); diff --git a/src/arf/test/t-set_round_uiui.c b/src/arf/test/t-set_round_uiui.c index daa48e01a6..ee21f5ab74 100644 --- a/src/arf/test/t-set_round_uiui.c +++ b/src/arf/test/t-set_round_uiui.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(arf_set_round_uiui, state) arf_t x, y; slong prec, fix1, fix2; int ret1, ret2, sgnbit; - mp_limb_t t[2]; + ulong t[2]; arf_rnd_t rnd; prec = 2 + n_randint(state, 1000); diff --git a/src/arf_types.h b/src/arf_types.h index f7c214da2b..bb15f6d272 100644 --- a/src/arf_types.h +++ b/src/arf_types.h @@ -23,14 +23,14 @@ extern "C" { typedef struct { - mp_limb_t d[ARF_NOPTR_LIMBS]; + ulong d[ARF_NOPTR_LIMBS]; } mantissa_noptr_struct; typedef struct { - mp_size_t alloc; - mp_ptr d; + slong alloc; + nn_ptr d; } mantissa_ptr_struct; @@ -44,7 +44,7 @@ mantissa_struct; typedef struct { fmpz exp; - mp_size_t size; + slong size; mantissa_struct d; } arf_struct; diff --git a/src/arith.h b/src/arith.h index 0e125ec1fd..ce3c9d054d 100644 --- a/src/arith.h +++ b/src/arith.h @@ -63,7 +63,7 @@ void arith_stirling_matrix_2(fmpz_mat_t mat); #define BELL_NUMBER_TAB_SIZE 16 #endif -FLINT_DLL extern const mp_limb_t bell_number_tab[]; +FLINT_DLL extern const ulong bell_number_tab[]; double arith_bell_number_size(ulong n); @@ -75,12 +75,12 @@ void arith_bell_number_vec(fmpz * b, slong n); void arith_bell_number_vec_recursive(fmpz * b, slong n); void arith_bell_number_vec_multi_mod(fmpz * b, slong n); -mp_limb_t arith_bell_number_nmod(ulong n, nmod_t mod); +ulong arith_bell_number_nmod(ulong n, nmod_t mod); -void arith_bell_number_nmod_vec(mp_ptr b, slong n, nmod_t mod); -void arith_bell_number_nmod_vec_recursive(mp_ptr b, slong n, nmod_t mod); -int arith_bell_number_nmod_vec_series(mp_ptr b, slong n, nmod_t mod); -void arith_bell_number_nmod_vec_ogf(mp_ptr res, slong len, nmod_t mod); +void arith_bell_number_nmod_vec(nn_ptr b, slong n, nmod_t mod); +void arith_bell_number_nmod_vec_recursive(nn_ptr b, slong n, nmod_t mod); +int arith_bell_number_nmod_vec_series(nn_ptr b, slong n, nmod_t mod); +void arith_bell_number_nmod_vec_ogf(nn_ptr res, slong len, nmod_t mod); /* Euler numbers *************************************************************/ @@ -91,7 +91,7 @@ void arith_bell_number_nmod_vec_ogf(mp_ptr res, slong len, nmod_t mod); #define SMALL_EULER_LIMIT 15 #endif -static const mp_limb_t euler_number_small[] = { +static const ulong euler_number_small[] = { UWORD(1), UWORD(1), UWORD(5), UWORD(61), UWORD(1385), UWORD(50521), UWORD(2702765), UWORD(199360981), #if FLINT64 @@ -170,10 +170,10 @@ typedef struct { int n; int prefactor; - mp_limb_t sqrt_p; - mp_limb_t sqrt_q; - mp_limb_signed_t cos_p[FLINT_BITS]; - mp_limb_t cos_q[FLINT_BITS]; + ulong sqrt_p; + ulong sqrt_q; + slong cos_p[FLINT_BITS]; + ulong cos_q[FLINT_BITS]; } trig_prod_struct; typedef trig_prod_struct trig_prod_t[1]; @@ -187,13 +187,13 @@ void trig_prod_init(trig_prod_t sum) sum->sqrt_q = 1; } -void arith_hrr_expsum_factored(trig_prod_t prod, mp_limb_t k, mp_limb_t n); +void arith_hrr_expsum_factored(trig_prod_t prod, ulong k, ulong n); /* Number of partitions ******************************************************/ FLINT_DLL extern const unsigned int partitions_lookup[128]; -void arith_number_of_partitions_nmod_vec(mp_ptr res, slong len, nmod_t mod); +void arith_number_of_partitions_nmod_vec(nn_ptr res, slong len, nmod_t mod); void arith_number_of_partitions_vec(fmpz * res, slong len); void arith_number_of_partitions(fmpz_t x, ulong n); diff --git a/src/arith/bell_number.c b/src/arith/bell_number.c index 80b96ba115..bfc0269d09 100644 --- a/src/arith/bell_number.c +++ b/src/arith/bell_number.c @@ -25,7 +25,7 @@ static void arith_bell_number_recursive(fmpz_t res, ulong n) { - mp_limb_t t[3 * MAX_N_3LIMBS]; + ulong t[3 * MAX_N_3LIMBS]; slong i, k; t[0] = 1; diff --git a/src/arith/bell_number_multi_mod.c b/src/arith/bell_number_multi_mod.c index 15867d35eb..8704fc14f7 100644 --- a/src/arith/bell_number_multi_mod.c +++ b/src/arith/bell_number_multi_mod.c @@ -13,11 +13,11 @@ #include "fmpz.h" #include "arith.h" -static mp_limb_t -arith_bell_number_nmod2(unsigned int * divtab, mp_ptr facs, mp_ptr pows, ulong n, nmod_t mod) +static ulong +arith_bell_number_nmod2(unsigned int * divtab, nn_ptr facs, nn_ptr pows, ulong n, nmod_t mod) { - mp_limb_t s, t, u, v, s2, s1, s0, t1, t0; - mp_limb_t qq[3]; + ulong s, t, u, v, s2, s1, s0, t1, t0; + ulong qq[3]; slong i; /* Compute inverse factorials */ @@ -94,7 +94,7 @@ arith_bell_number_multi_mod(fmpz_t res, ulong n) fmpz_comb_temp_t temp; fmpz_comb_t comb; nmod_t mod; - mp_ptr primes, residues, t, u; + nn_ptr primes, residues, t, u; slong k, num_primes; flint_bitcnt_t size, prime_bits; unsigned int * divtab; @@ -109,11 +109,11 @@ arith_bell_number_multi_mod(fmpz_t res, ulong n) prime_bits = FLINT_BITS - 1; num_primes = (size + prime_bits - 1) / prime_bits; - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); divtab = flint_malloc(2 * sizeof(unsigned int) * (n + 1)); - t = flint_malloc((n + 1) * sizeof(mp_limb_t)); - u = flint_malloc((n + 1) * sizeof(mp_limb_t)); + t = flint_malloc((n + 1) * sizeof(ulong)); + u = flint_malloc((n + 1) * sizeof(ulong)); divisor_table(divtab, n + 1); diff --git a/src/arith/bell_number_nmod.c b/src/arith/bell_number_nmod.c index 02c446a555..347346dd5a 100644 --- a/src/arith/bell_number_nmod.c +++ b/src/arith/bell_number_nmod.c @@ -9,10 +9,11 @@ (at your option) any later version. See . */ +#include #include "nmod.h" #include "arith.h" -const mp_limb_t bell_number_tab[] = +const ulong bell_number_tab[] = { UWORD(1), UWORD(1), UWORD(2), UWORD(5), UWORD(15), UWORD(52), UWORD(203), UWORD(877), UWORD(4140), UWORD(21147), UWORD(115975), UWORD(678570), UWORD(4213597), UWORD(27644437), UWORD(190899322), UWORD(1382958545), @@ -27,18 +28,18 @@ const mp_limb_t bell_number_tab[] = static const char bell_mod_2[3] = {1, 1, 0}; static const char bell_mod_3[13] = {1, 1, 2, 2, 0, 1, 2, 1, 0, 0, 1, 0, 1}; -mp_limb_t +ulong arith_bell_number_nmod_fallback(ulong n, nmod_t mod) { - mp_ptr bvec; - mp_limb_t s; + nn_ptr bvec; + ulong s; if (n > WORD_MAX / 4) { flint_throw(FLINT_ERROR, "arith_bell_number_nmod: too large n\n"); } - bvec = flint_malloc(sizeof(mp_limb_t) * (n + 1)); + bvec = flint_malloc(sizeof(ulong) * (n + 1)); arith_bell_number_nmod_vec(bvec, n + 1, mod); s = bvec[n]; flint_free(bvec); @@ -46,13 +47,13 @@ arith_bell_number_nmod_fallback(ulong n, nmod_t mod) } -mp_limb_t nmod_inv_check(mp_limb_t x, nmod_t mod); +ulong nmod_inv_check(ulong x, nmod_t mod); -mp_limb_t +ulong arith_bell_number_nmod(ulong n, nmod_t mod) { - mp_limb_t s, t, u, inv_fac; - mp_ptr facs, pows; + ulong s, t, u, inv_fac; + nn_ptr facs, pows; slong i, j; int success; @@ -67,7 +68,7 @@ arith_bell_number_nmod(ulong n, nmod_t mod) /* Compute inverse factorials */ /* We actually compute (n! / i!) and divide out (n!)^2 at the end */ - facs = flint_malloc(sizeof(mp_limb_t) * (n + 1)); + facs = flint_malloc(sizeof(ulong) * (n + 1)); facs[n] = 1; for (i = n - 1; i >= 0; i--) facs[i] = nmod_mul(facs[i + 1], i + 1, mod); @@ -82,10 +83,10 @@ arith_bell_number_nmod(ulong n, nmod_t mod) } else { - mp_limb_t v, s2, s1, s0, t1, t0, qq[3]; + ulong v, s2, s1, s0, t1, t0, qq[3]; /* Compute powers */ - pows = flint_calloc(n + 1, sizeof(mp_limb_t)); + pows = flint_calloc(n + 1, sizeof(ulong)); pows[0] = nmod_pow_ui(0, n, mod); pows[1] = nmod_pow_ui(1, n, mod); diff --git a/src/arith/bell_number_nmod_vec.c b/src/arith/bell_number_nmod_vec.c index 618bf09861..96345619ee 100644 --- a/src/arith/bell_number_nmod_vec.c +++ b/src/arith/bell_number_nmod_vec.c @@ -13,7 +13,7 @@ #include "arith.h" void -arith_bell_number_nmod_vec(mp_ptr b, slong len, nmod_t mod) +arith_bell_number_nmod_vec(nn_ptr b, slong len, nmod_t mod) { if (len < 300) { diff --git a/src/arith/bell_number_nmod_vec_ogf.c b/src/arith/bell_number_nmod_vec_ogf.c index f0acc65e35..73c3697729 100644 --- a/src/arith/bell_number_nmod_vec_ogf.c +++ b/src/arith/bell_number_nmod_vec_ogf.c @@ -15,7 +15,7 @@ #include "arith.h" static void -bsplit_nmod(mp_ptr R, mp_ptr Q, slong a, slong b, nmod_t mod) +bsplit_nmod(nn_ptr R, nn_ptr Q, slong a, slong b, nmod_t mod) { if (b - a == 1) { @@ -27,7 +27,7 @@ bsplit_nmod(mp_ptr R, mp_ptr Q, slong a, slong b, nmod_t mod) else { slong m, len1, len2; - mp_ptr R1, R2, Q1, Q2; + nn_ptr R1, R2, Q1, Q2; m = a + (b - a) / 2; len1 = (m - a) + 1; @@ -50,9 +50,9 @@ bsplit_nmod(mp_ptr R, mp_ptr Q, slong a, slong b, nmod_t mod) } void -arith_bell_number_nmod_vec_ogf(mp_ptr res, slong len, nmod_t mod) +arith_bell_number_nmod_vec_ogf(nn_ptr res, slong len, nmod_t mod) { - mp_ptr R, Q; + nn_ptr R, Q; if (len <= 2 || mod.n == 1) { diff --git a/src/arith/bell_number_nmod_vec_recursive.c b/src/arith/bell_number_nmod_vec_recursive.c index 5e828c5ad9..5f62a41850 100644 --- a/src/arith/bell_number_nmod_vec_recursive.c +++ b/src/arith/bell_number_nmod_vec_recursive.c @@ -14,7 +14,7 @@ #include "arith.h" void -arith_bell_number_nmod_vec_recursive(mp_ptr b, slong n, nmod_t mod) +arith_bell_number_nmod_vec_recursive(nn_ptr b, slong n, nmod_t mod) { if (mod.n == 1 || n == 0) { @@ -29,12 +29,12 @@ arith_bell_number_nmod_vec_recursive(mp_ptr b, slong n, nmod_t mod) if (n >= 3) { slong i, k; - mp_ptr t; + nn_ptr t; TMP_INIT; TMP_START; n -= 1; - t = TMP_ALLOC(n * sizeof(mp_limb_t)); + t = TMP_ALLOC(n * sizeof(ulong)); t[0] = 1; for (i = 1; i < n; i++) diff --git a/src/arith/bell_number_nmod_vec_series.c b/src/arith/bell_number_nmod_vec_series.c index 8252d1437f..436744440f 100644 --- a/src/arith/bell_number_nmod_vec_series.c +++ b/src/arith/bell_number_nmod_vec_series.c @@ -14,9 +14,9 @@ #include "nmod_poly.h" #include "arith.h" -mp_limb_t nmod_inv_check(mp_limb_t x, nmod_t mod) +ulong nmod_inv_check(ulong x, nmod_t mod) { - mp_limb_t r, g; + ulong r, g; g = n_gcdinv(&r, x, mod.n); if (g != 1) @@ -26,10 +26,10 @@ mp_limb_t nmod_inv_check(mp_limb_t x, nmod_t mod) } int -arith_bell_number_nmod_vec_series(mp_ptr res, slong n, nmod_t mod) +arith_bell_number_nmod_vec_series(nn_ptr res, slong n, nmod_t mod) { - mp_limb_t c; - mp_ptr tmp; + ulong c; + nn_ptr tmp; slong k; int success; @@ -39,7 +39,7 @@ arith_bell_number_nmod_vec_series(mp_ptr res, slong n, nmod_t mod) if (mod.n == 1) return 0; - tmp = flint_malloc(sizeof(mp_limb_t) * n); + tmp = flint_malloc(sizeof(ulong) * n); /* Compute inverse factorials */ c = 1; diff --git a/src/arith/bell_number_vec_multi_mod.c b/src/arith/bell_number_vec_multi_mod.c index 91c0a8d9d9..e0917aa3e0 100644 --- a/src/arith/bell_number_vec_multi_mod.c +++ b/src/arith/bell_number_vec_multi_mod.c @@ -22,8 +22,8 @@ arith_bell_number_vec_multi_mod(fmpz * res, slong n) { fmpz_comb_t comb[CRT_MAX_RESOLUTION]; fmpz_comb_temp_t temp[CRT_MAX_RESOLUTION]; - mp_ptr primes, residues; - mp_ptr * polys; + nn_ptr primes, residues; + nn_ptr * polys; nmod_t mod; slong i, j, k, num_primes, num_primes_k, resolution; flint_bitcnt_t size, prime_bits; @@ -37,9 +37,9 @@ arith_bell_number_vec_multi_mod(fmpz * res, slong n) prime_bits = FLINT_BITS - 1; num_primes = (size + prime_bits - 1) / prime_bits; - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); - polys = flint_malloc(num_primes * sizeof(mp_ptr)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); + polys = flint_malloc(num_primes * sizeof(nn_ptr)); /* Compute Bell numbers mod p */ primes[0] = n_nextprime(UWORD(1)<. */ +#include #include "ulong_extras.h" #include "arith.h" @@ -19,10 +20,10 @@ static const int gcd24_tab[24] = { 12, 1, 2, 3, 8, 1, 6, 1, 4, 3, 2, 1 }; -static mp_limb_t -n_sqrtmod_2exp(mp_limb_t a, int k) +static ulong +n_sqrtmod_2exp(ulong a, int k) { - mp_limb_t x; + ulong x; int i; if (a == 0 || k == 0) @@ -48,10 +49,10 @@ n_sqrtmod_2exp(mp_limb_t a, int k) return x; } -static mp_limb_t -n_sqrtmod_ppow(mp_limb_t a, mp_limb_t p, int k, mp_limb_t pk, mp_limb_t pkinv) +static ulong +n_sqrtmod_ppow(ulong a, ulong p, int k, ulong pk, ulong pkinv) { - mp_limb_t r, t; + ulong r, t; int i; /* n_sqrtmod assumes that a is reduced */ @@ -73,10 +74,10 @@ n_sqrtmod_ppow(mp_limb_t a, mp_limb_t p, int k, mp_limb_t pk, mp_limb_t pkinv) } void -trigprod_mul_prime_power(trig_prod_t prod, mp_limb_t k, mp_limb_t n, - mp_limb_t p, int exp) +trigprod_mul_prime_power(trig_prod_t prod, ulong k, ulong n, + ulong p, int exp) { - mp_limb_t m, mod, inv; + ulong m, mod, inv; if (k <= 3) { @@ -127,7 +128,7 @@ trigprod_mul_prime_power(trig_prod_t prod, mp_limb_t k, mp_limb_t n, if (exp % 2 == 1) prod->prefactor *= -1; prod->sqrt_p *= k; - prod->cos_p[prod->n] = (mp_limb_signed_t)(k - m); + prod->cos_p[prod->n] = (slong)(k - m); prod->cos_q[prod->n] = 2 * k; prod->n++; return; @@ -148,7 +149,7 @@ trigprod_mul_prime_power(trig_prod_t prod, mp_limb_t k, mp_limb_t n, prod->prefactor *= -1; prod->sqrt_p *= k; prod->sqrt_q *= 3; - prod->cos_p[prod->n] = (mp_limb_signed_t)(3 * k - 8 * m); + prod->cos_p[prod->n] = (slong)(3 * k - 8 * m); prod->cos_q[prod->n] = 6 * k; prod->n++; return; @@ -191,11 +192,11 @@ Solve (k2^2 * d2 * e) * n1 = (d2 * e * n + (k2^2 - 1) / d1) mod k2 TODO: test this on 32 bit */ -static mp_limb_t -solve_n1(mp_limb_t n, mp_limb_t k1, mp_limb_t k2, - mp_limb_t d1, mp_limb_t d2, mp_limb_t e) +static ulong +solve_n1(ulong n, ulong k1, ulong k2, + ulong d1, ulong d2, ulong e) { - mp_limb_t inv, n1, u, t[2]; + ulong inv, n1, u, t[2]; inv = n_preinvert_limb(k1); @@ -215,7 +216,7 @@ solve_n1(mp_limb_t n, mp_limb_t k1, mp_limb_t k2, void -arith_hrr_expsum_factored(trig_prod_t prod, mp_limb_t k, mp_limb_t n) +arith_hrr_expsum_factored(trig_prod_t prod, ulong k, ulong n) { n_factor_t fac; int i; @@ -232,7 +233,7 @@ arith_hrr_expsum_factored(trig_prod_t prod, mp_limb_t k, mp_limb_t n) /* Repeatedly factor A_k(n) into A_k1(n1)*A_k2(n2) with k1, k2 coprime */ for (i = 0; i + 1 < fac.num && prod->prefactor != 0; i++) { - mp_limb_t p, k1, k2, inv, n1, n2; + ulong p, k1, k2, inv, n1, n2; p = fac.p[i]; @@ -270,7 +271,7 @@ arith_hrr_expsum_factored(trig_prod_t prod, mp_limb_t k, mp_limb_t n) /* k = k1 * k2 with k1 odd or divisible by 8 */ else { - mp_limb_t d1, d2, e; + ulong d1, d2, e; k1 = n_pow(fac.p[i], fac.exp[i]); k2 = k / k1; diff --git a/src/arith/euler_number_vec.c b/src/arith/euler_number_vec.c index 7a85d33ecc..8ba1f32976 100644 --- a/src/arith/euler_number_vec.c +++ b/src/arith/euler_number_vec.c @@ -17,9 +17,9 @@ /* Computes length-m vector containing |E_{2k}| */ static void -__euler_number_vec_mod_p(mp_ptr res, mp_ptr tmp, slong m, nmod_t mod) +__euler_number_vec_mod_p(nn_ptr res, nn_ptr tmp, slong m, nmod_t mod) { - mp_limb_t fac, c; + ulong fac, c; slong k; /* Divide by factorials */ @@ -49,10 +49,10 @@ void __euler_number_vec_multi_mod(fmpz * res, slong n) { fmpz_comb_t comb[CRT_MAX_RESOLUTION]; fmpz_comb_temp_t temp[CRT_MAX_RESOLUTION]; - mp_limb_t * primes; - mp_limb_t * residues; - mp_ptr * polys; - mp_ptr temppoly; + ulong * primes; + ulong * residues; + nn_ptr * polys; + nn_ptr temppoly; nmod_t mod; slong i, j, k, m, num_primes, num_primes_k, resolution; flint_bitcnt_t size, prime_bits; @@ -69,9 +69,9 @@ void __euler_number_vec_multi_mod(fmpz * res, slong n) prime_bits = FLINT_BITS - 1; num_primes = (size + prime_bits - 1) / prime_bits; - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); - polys = flint_malloc(num_primes * sizeof(mp_ptr)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); + polys = flint_malloc(num_primes * sizeof(nn_ptr)); /* Compute Euler numbers mod p */ primes[0] = n_nextprime(UWORD(1)<= 2 */ -static mp_limb_t +static ulong stirling_2_nmod(const unsigned int * divtab, ulong n, ulong k, nmod_t mod) { - mp_ptr t, u; + nn_ptr t, u; slong i, bin_len, pow_len; - mp_limb_t s1, s2, bden, bd; + ulong s1, s2, bden, bd; int bound_limbs; TMP_INIT; TMP_START; @@ -544,8 +544,8 @@ stirling_2_nmod(const unsigned int * divtab, ulong n, ulong k, nmod_t mod) pow_len = k + 1; bin_len = FLINT_MIN(pow_len, k / 2 + 1); - t = TMP_ALLOC(bin_len * sizeof(mp_limb_t)); - u = TMP_ALLOC(pow_len * sizeof(mp_limb_t)); + t = TMP_ALLOC(bin_len * sizeof(ulong)); + u = TMP_ALLOC(pow_len * sizeof(ulong)); /* compute binomial coefficients + denominator */ t[0] = 1; @@ -612,7 +612,7 @@ _fmpz_crt_combine(fmpz_t r1r2, fmpz_t m1m2, const fmpz_t r1, const fmpz_t m1, co } static void -tree_crt(fmpz_t r, fmpz_t m, mp_srcptr residues, mp_srcptr primes, slong len) +tree_crt(fmpz_t r, fmpz_t m, nn_srcptr residues, nn_srcptr primes, slong len) { if (len == 0) { @@ -649,7 +649,7 @@ stirling_2_multi_mod(fmpz_t res, ulong n, ulong k) { fmpz_t tmp; nmod_t mod; - mp_ptr primes, residues; + nn_ptr primes, residues; slong i, num_primes; flint_bitcnt_t size, prime_bits; unsigned int * divtab; @@ -659,8 +659,8 @@ stirling_2_multi_mod(fmpz_t res, ulong n, ulong k) num_primes = (size + prime_bits - 1) / prime_bits; fmpz_init(tmp); - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); divtab = flint_malloc(2 * sizeof(unsigned int) * (n + 1)); divisor_table(divtab, n + 1); @@ -708,13 +708,13 @@ arith_stirling_number_2(fmpz_t res, ulong n, ulong k) } else if (n <= MAX_N_1LIMB) { - mp_limb_t c[MAX_N_2LIMB + 1]; + ulong c[MAX_N_2LIMB + 1]; triangular_1(c, n, k + 1); fmpz_set_ui(res, c[k]); } else if (n <= MAX_N_2LIMB) { - mp_limb_t c[2 * MAX_N_2LIMB + 2]; + ulong c[2 * MAX_N_2LIMB + 2]; triangular_2(c, n, k + 1); fmpz_set_uiui(res, c[2 * k + 1], c[2 * k]); } diff --git a/src/arith/test/main.c b/src/arith/test/main.c index c0a2c9329e..1795283af3 100644 --- a/src/arith/test/main.c +++ b/src/arith/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-bell_number.c" diff --git a/src/arith/test/t-bell_number.c b/src/arith/test/t-bell_number.c index e03e78332d..531544a7f1 100644 --- a/src/arith/test/t-bell_number.c +++ b/src/arith/test/t-bell_number.c @@ -32,7 +32,7 @@ TEST_FUNCTION_START(arith_bell_number, state) slong len, prev_len; fmpz * vb1, * vb2; fmpz_t b; - mp_ptr vnb, vnr; + nn_ptr vnb, vnr; slong n, iter; ulong nb; nmod_t mod; diff --git a/src/arith/test/t-bell_number_nmod.c b/src/arith/test/t-bell_number_nmod.c index 6937cd1193..ea193caed3 100644 --- a/src/arith/test/t-bell_number_nmod.c +++ b/src/arith/test/t-bell_number_nmod.c @@ -20,10 +20,10 @@ TEST_FUNCTION_START(arith_bell_number_nmod, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { - mp_ptr b; + nn_ptr b; slong n; nmod_t mod; - mp_limb_t p, u; + ulong p, u; n = n_randint(state, 800); if (n_randint(state, 2)) diff --git a/src/arith/test/t-bell_number_nmod_vec.c b/src/arith/test/t-bell_number_nmod_vec.c index 2d085ebfda..af690c3657 100644 --- a/src/arith/test/t-bell_number_nmod_vec.c +++ b/src/arith/test/t-bell_number_nmod_vec.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(arith_bell_number_nmod_vec, state) { - mp_ptr b1, b2, b3; + nn_ptr b1, b2, b3; slong n; const slong maxn = 3000; @@ -29,7 +29,7 @@ TEST_FUNCTION_START(arith_bell_number_nmod_vec, state) for (n = 0; n < maxn; n += (n < 50) ? + 1 : n/4) { nmod_t mod; - mp_limb_t p; + ulong p; p = n_randtest_not_zero(state); nmod_init(&mod, p); diff --git a/src/arith/test/t-landau_function_vec.c b/src/arith/test/t-landau_function_vec.c index ff0225d56c..aa10b98d30 100644 --- a/src/arith/test/t-landau_function_vec.c +++ b/src/arith/test/t-landau_function_vec.c @@ -16,7 +16,7 @@ /* Defined in t-landau_function_vec.c and t-sum_of_squares.c */ #define known known_landau_function_vec -static const mp_limb_t known[] = { +static const ulong known[] = { 1, 1, 2, 3, 4, 6, 6, 12, 15, 20, 30, 30, 60, 60, 84, 105, 140, 210, 210, 420, 420, 420, 420, 840, 840, 1260, 1260, 1540, 2310, 2520, 4620, 4620, 5460, 5460, 9240, 9240, 13860, 13860, 16380, 16380, diff --git a/src/arith/test/t-number_of_partitions_vec.c b/src/arith/test/t-number_of_partitions_vec.c index 4fcbc233d9..dcd1280802 100644 --- a/src/arith/test/t-number_of_partitions_vec.c +++ b/src/arith/test/t-number_of_partitions_vec.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(arith_number_of_partitions_vec, state) { fmpz * p; - mp_ptr pmod; + nn_ptr pmod; slong k, n; const slong maxn = 1000; diff --git a/src/arith/test/t-swinnerton_dyer_polynomial.c b/src/arith/test/t-swinnerton_dyer_polynomial.c index 3ea2b6837e..d0c384fa98 100644 --- a/src/arith/test/t-swinnerton_dyer_polynomial.c +++ b/src/arith/test/t-swinnerton_dyer_polynomial.c @@ -13,7 +13,7 @@ #include "fmpz_poly.h" #include "arith.h" -static const mp_limb_t known_values[] = +static const ulong known_values[] = { UWORD(2147483629), UWORD(1073742093), @@ -31,7 +31,7 @@ static const mp_limb_t known_values[] = TEST_FUNCTION_START(arith_swinnerton_dyer_polynomial, state) { fmpz_poly_t S; - mp_limb_t r; + ulong r; slong n; diff --git a/src/bernoulli/bound_2exp_si.c b/src/bernoulli/bound_2exp_si.c index 5784fa4e91..af0c25af0f 100644 --- a/src/bernoulli/bound_2exp_si.c +++ b/src/bernoulli/bound_2exp_si.c @@ -55,7 +55,7 @@ bernoulli_bound_2exp_si(ulong n) if (n == 1) return -WORD(1); else - return LONG_MIN; + return WORD_MIN; } else if (n < 512) { @@ -64,7 +64,7 @@ bernoulli_bound_2exp_si(ulong n) else { /* |B_n| < 4 * n! / (2*pi)^n < 4 * (n+1)^(n+1) e^(-n) / (2*pi)^n */ - mp_limb_t l, u, hi, lo; + ulong l, u, hi, lo; int b, shift; b = FLINT_BIT_COUNT(n + 1); diff --git a/src/bernoulli/fmpq_ui_multi_mod.c b/src/bernoulli/fmpq_ui_multi_mod.c index 10034756b1..43288cc6ed 100644 --- a/src/bernoulli/fmpq_ui_multi_mod.c +++ b/src/bernoulli/fmpq_ui_multi_mod.c @@ -26,8 +26,8 @@ crt_res_t; typedef struct { - mp_srcptr residues; - mp_srcptr primes; + nn_srcptr residues; + nn_srcptr primes; } crt_args_t; @@ -95,7 +95,7 @@ crt_basecase(crt_res_t * res, slong a, slong b, crt_args_t * args) /* todo: optimize basecase and move to flint */ void -_arb_tree_crt(fmpz_t r, fmpz_t m, mp_srcptr residues, mp_srcptr primes, slong len) +_arb_tree_crt(fmpz_t r, fmpz_t m, nn_srcptr residues, nn_srcptr primes, slong len) { crt_res_t res; crt_args_t args; @@ -123,8 +123,8 @@ _arb_tree_crt(fmpz_t r, fmpz_t m, mp_srcptr residues, mp_srcptr primes, slong le typedef struct { ulong n; - mp_ptr primes; - mp_ptr residues; + nn_ptr primes; + nn_ptr residues; } mod_p_param_t; @@ -142,7 +142,7 @@ _bernoulli_fmpq_ui_multi_mod(fmpz_t num, fmpz_t den, ulong n, double alpha) n_primes_t prime_iter; slong i, bits, mod_bits, zeta_bits, num_primes; ulong p; - mp_ptr primes, residues; + nn_ptr primes, residues; mag_t primes_product; fmpz_t M; #if TIMING @@ -197,8 +197,8 @@ _bernoulli_fmpq_ui_multi_mod(fmpz_t num, fmpz_t den, ulong n, double alpha) flint_printf("\nn = %lu, bits = %lu, num_primes = %ld\n", n, bits, num_primes); #endif - primes = flint_malloc(sizeof(mp_limb_t) * num_primes); - residues = flint_malloc(sizeof(mp_limb_t) * num_primes); + primes = flint_malloc(sizeof(ulong) * num_primes); + residues = flint_malloc(sizeof(ulong) * num_primes); p = 5; n_primes_jump_after(prime_iter, 5); diff --git a/src/bernoulli/mod_p_harvey.c b/src/bernoulli/mod_p_harvey.c index 26b9f984b3..3b115dff1c 100644 --- a/src/bernoulli/mod_p_harvey.c +++ b/src/bernoulli/mod_p_harvey.c @@ -56,15 +56,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================== */ +#include +#include #include "ulong_extras.h" #include "bernoulli.h" -#ifdef __GNUC__ -# define memset __builtin_memset -#else -# include -#endif - #define DEBUG 0 #define TIMING 1 @@ -147,7 +143,7 @@ bernoulli_sum_powg(ulong p, ulong pinv, ulong k, ulong g) The constructor takes p and max_words as input. Must have 1 <= max_words <= MAX_INV. It computes an approximation to 1/p. - The function expand(mp_ptr res, long s, long n) computes n limbs of s/p. + The function expand(nn_ptr res, long s, long n) computes n limbs of s/p. Must have 0 < s < p and 1 <= n <= max_words. The output is written to res. The first word of output is junk. The next n words are the digits of s/p, from least to most significant. The buffer must be at least n+2 words long @@ -159,8 +155,8 @@ bernoulli_sum_powg(ulong p, ulong pinv, ulong k, ulong g) typedef struct { /* Approximation to 1/p. We store (max_words + 1) limbs. */ - mp_limb_t pinv[MAX_INV + 2]; - mp_limb_t p; + ulong pinv[MAX_INV + 2]; + ulong p; int max_words; } expander_t; @@ -168,7 +164,7 @@ expander_t; static void expander_init(expander_t * this, ulong p, int max_words) { - mp_limb_t one; + ulong one; FLINT_ASSERT(max_words >= 1); FLINT_ASSERT(max_words <= MAX_INV); @@ -180,7 +176,7 @@ expander_init(expander_t * this, ulong p, int max_words) } static void -expander_expand(mp_ptr res, expander_t * this, ulong s, ulong n) +expander_expand(nn_ptr res, expander_t * this, ulong s, ulong n) { slong i; @@ -196,15 +192,15 @@ expander_expand(mp_ptr res, expander_t * this, ulong s, ulong n) } else { - mpn_mul_1(res, this->pinv + this->max_words - n, n + 1, (mp_limb_t) s); + mpn_mul_1(res, this->pinv + this->max_words - n, n + 1, (ulong) s); /* If the first output limb is really close to 0xFFFF..., then there's a possibility of overflow, so fall back on doing division directly. This should happen extremely rarely --- essentially never on a 64-bit system, and very occasionally on a 32-bit system. */ - if (res[0] > -((mp_limb_t) s)) + if (res[0] > -((ulong) s)) { - mp_limb_t ss = s; + ulong ss = s; mpn_divrem_1(res, n + 1, &ss, 1, this->p); } } @@ -312,9 +308,9 @@ ulong bernsum_pow2(ulong p, ulong pinv, ulong k, ulong g, ulong n) /* memory locality. */ for (nn = n; nn > 0; nn -= MAX_INV * FLINT_BITS) { - mp_limb_t s_over_p[MAX_INV + 2]; + ulong s_over_p[MAX_INV + 2]; slong bits, words; - mp_ptr next; + nn_ptr next; if (nn >= MAX_INV * FLINT_BITS) { @@ -337,11 +333,11 @@ ulong bernsum_pow2(ulong p, ulong pinv, ulong k, ulong g, ulong n) /* loop over whole words */ for (; bits >= FLINT_BITS; bits -= FLINT_BITS, next--) { - mp_limb_t y; + ulong y; #if NUM_TABLES != 8 && NUM_TABLES != 4 - mp_ptr target; + nn_ptr target; #else - mp_ptr target0, target1, target2, target3, target4, target5, target6, target7; + nn_ptr target0, target1, target2, target3, target4, target5, target6, target7; #endif y = *next; @@ -588,7 +584,7 @@ ulong bernsum_pow2_redc(ulong p, ulong pinv, ulong k, ulong g, ulong n) { ulong s, x, y; slong nn, bits, words; - mp_ptr next; + nn_ptr next; s = g_to_i; /* always in [0, p) */ if (s >= p) diff --git a/src/bernoulli/test/main.c b/src/bernoulli/test/main.c index 10dfe47000..3c781669ee 100644 --- a/src/bernoulli/test/main.c +++ b/src/bernoulli/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-bound_2exp_si.c" diff --git a/src/bernoulli/test/t-fmpq_vec.c b/src/bernoulli/test/t-fmpq_vec.c index e52813e2b5..5a62dc0341 100644 --- a/src/bernoulli/test/t-fmpq_vec.c +++ b/src/bernoulli/test/t-fmpq_vec.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(bernoulli_fmpq_vec, state) { slong iter; slong n, bound; - mp_limb_t p, pinv, m1, m2; + ulong p, pinv, m1, m2; nmod_poly_t A; bound = 1000 * FLINT_MIN(1.0, 0.1 * flint_test_multiplier()); diff --git a/src/bernoulli/test/t-mod_p_harvey.c b/src/bernoulli/test/t-mod_p_harvey.c index a474e4cce8..38ae1d2fcb 100644 --- a/src/bernoulli/test/t-mod_p_harvey.c +++ b/src/bernoulli/test/t-mod_p_harvey.c @@ -51,7 +51,7 @@ TEST_FUNCTION_START(bernoulli_mod_p_harvey, state) for (iter = 0; iter < 100000 * 0.1 * flint_test_multiplier(); iter++) { ulong a, b, n, q1, r1, r2; - mp_limb_t q2[2]; + ulong q2[2]; double bnpre; a = n_randtest_bits(state, FLINT_D_BITS); diff --git a/src/bernoulli/test/t-rev.c b/src/bernoulli/test/t-rev.c index 1ba28dea35..850d173e68 100644 --- a/src/bernoulli/test/t-rev.c +++ b/src/bernoulli/test/t-rev.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(bernoulli_rev, state) { slong nmax, n, bound, count; - mp_limb_t p, pinv, m1, m2; + ulong p, pinv, m1, m2; nmod_poly_t A; bound = 100000 * FLINT_MIN(1.0, 0.1 * flint_test_multiplier()); diff --git a/src/bool_mat/randtest.c b/src/bool_mat/randtest.c index f1834e649f..edec4d390c 100644 --- a/src/bool_mat/randtest.c +++ b/src/bool_mat/randtest.c @@ -16,7 +16,7 @@ void bool_mat_randtest(bool_mat_t mat, flint_rand_t state) { slong i, j; - mp_limb_t density; + ulong density; density = n_randint(state, 101); for (i = 0; i < bool_mat_nrows(mat); i++) diff --git a/src/bool_mat/test/main.c b/src/bool_mat/test/main.c index b3079c5c20..14734ad97f 100644 --- a/src/bool_mat/test/main.c +++ b/src/bool_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-all_pairs_longest_walk.c" diff --git a/src/ca/test/main.c b/src/ca/test/main.c index 84a5a0aff0..13dfc46875 100644 --- a/src/ca/test/main.c +++ b/src/ca/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-acos.c" diff --git a/src/ca_ext/test/main.c b/src/ca_ext/test/main.c index 57d8a6eb39..0b9fadfb61 100644 --- a/src/ca_ext/test/main.c +++ b/src/ca_ext/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-cache_insert.c" diff --git a/src/ca_field/cmp.c b/src/ca_field/cmp.c index c0123372d6..0d63588631 100644 --- a/src/ca_field/cmp.c +++ b/src/ca_field/cmp.c @@ -70,7 +70,7 @@ static int _fmpz_mpoly_cmp2(const fmpz_mpoly_t x, const fmpz_mpoly_t y, fmpz_mpoly_ctx_t ctx) { slong lenx, leny, nvars; - mp_limb_t expx, expy; + ulong expx, expy; slong i, j; int c; diff --git a/src/ca_field/test/main.c b/src/ca_field/test/main.c index 416a61ab87..2d0afd2408 100644 --- a/src/ca_field/test/main.c +++ b/src/ca_field/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-cache_insert.c" diff --git a/src/ca_mat/test/main.c b/src/ca_mat/test/main.c index 12961ff43f..1a15e67123 100644 --- a/src/ca_mat/test/main.c +++ b/src/ca_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-adjugate.c" diff --git a/src/ca_poly/test/main.c b/src/ca_poly/test/main.c index bd9844243c..143e74eeb1 100644 --- a/src/ca_poly/test/main.c +++ b/src/ca_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-compose.c" diff --git a/src/crt_helpers.h b/src/crt_helpers.h index 1ec4e89557..de729d0dd9 100644 --- a/src/crt_helpers.h +++ b/src/crt_helpers.h @@ -89,232 +89,232 @@ FLINT_FORCE_INLINE unsigned char _subborrow_ulong(unsigned char cf, ulong x, ulo #define add_sssssaaaaaaaaaa(s4,s3,s2,s1,s0, a4,a3,a2,a1,a0, b4,b3,b2,b1,b0) \ __asm__ ("addq %14,%q4\n\tadcq %12,%q3\n\tadcq %10,%q2\n\tadcq %8,%q1\n\tadcq %6,%q0" \ : "=r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "1" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "2" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "3" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "4" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "1" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "2" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "3" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "4" ((ulong)(a0)), "rme" ((ulong)(b0))) #define add_ssssssaaaaaaaaaaaa(s5,s4,s3,s2,s1,s0, a5,a4,a3,a2,a1,a0, b5,b4,b3,b2,b1,b0) \ __asm__ ("addq %17,%q5\nadcq %15,%q4\n\tadcq %13,%q3\n\tadcq %11,%q2\n\tadcq %9,%q1\n\tadcq %7,%q0" \ : "=r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "1" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "2" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "3" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "4" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "5" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "1" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "2" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "3" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "4" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "5" ((ulong)(a0)), "rme" ((ulong)(b0))) #define add_sssssssaaaaaaaaaaaaaa(s6,s5,s4,s3,s2,s1,s0, a6,a5,a4,a3,a2,a1,a0, b6,b5,b4,b3,b2,b1,b0) \ __asm__ ("addq %20,%q6\nadcq %18,%q5\nadcq %16,%q4\n\tadcq %14,%q3\n\tadcq %12,%q2\n\tadcq %10,%q1\n\tadcq %8,%q0" \ : "=r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a6)), "rme" ((mp_limb_t)(b6)), \ - "1" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "2" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "3" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "4" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "5" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "6" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a6)), "rme" ((ulong)(b6)), \ + "1" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "2" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "3" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "4" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "5" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "6" ((ulong)(a0)), "rme" ((ulong)(b0))) #define add_ssssssssaaaaaaaaaaaaaaaa(s7,s6,s5,s4,s3,s2,s1,s0, a7,a6,a5,a4,a3,a2,a1,a0, b7,b6,b5,b4,b3,b2,b1,b0) \ __asm__ ("addq %23,%q7\nadcq %21,%q6\nadcq %19,%q5\n\tadcq %17,%q4\n\tadcq %15,%q3\n\tadcq %13,%q2\n\tadcq %11,%q1\n\tadcq %9,%q0" \ : "=r" (s7), "=&r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a7)), "rme" ((mp_limb_t)(b7)), \ - "1" ((mp_limb_t)(a6)), "rme" ((mp_limb_t)(b6)), \ - "2" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "3" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "4" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "5" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "6" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "7" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a7)), "rme" ((ulong)(b7)), \ + "1" ((ulong)(a6)), "rme" ((ulong)(b6)), \ + "2" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "3" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "4" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "5" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "6" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "7" ((ulong)(a0)), "rme" ((ulong)(b0))) #define sub_ddddmmmmssss(s3, s2, s1, s0, a3, a2, a1, a0, b3, b2, b1, b0) \ __asm__ ("subq %11,%q3\n\tsbbq %9,%q2\n\tsbbq %7,%q1\n\tsbbq %5,%q0" \ : "=r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "1" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "2" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "3" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "1" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "2" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "3" ((ulong)(a0)), "rme" ((ulong)(b0))) #define sub_dddddmmmmmsssss(s4,s3,s2,s1,s0, a4,a3,a2,a1,a0, b4,b3,b2,b1,b0) \ __asm__ ("subq %14,%q4\n\tsbbq %12,%q3\n\tsbbq %10,%q2\n\tsbbq %8,%q1\n\tsbbq %6,%q0" \ : "=r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "1" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "2" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "3" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "4" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "1" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "2" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "3" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "4" ((ulong)(a0)), "rme" ((ulong)(b0))) #define sub_ddddddmmmmmmssssss(s5,s4,s3,s2,s1,s0, a5,a4,a3,a2,a1,a0, b5,b4,b3,b2,b1,b0) \ __asm__ ("subq %17,%q5\nsbbq %15,%q4\n\tsbbq %13,%q3\n\tsbbq %11,%q2\n\tsbbq %9,%q1\n\tsbbq %7,%q0" \ : "=r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "1" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "2" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "3" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "4" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "5" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "1" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "2" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "3" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "4" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "5" ((ulong)(a0)), "rme" ((ulong)(b0))) #define sub_dddddddmmmmmmmsssssss(s6,s5,s4,s3,s2,s1,s0, a6,a5,a4,a3,a2,a1,a0, b6,b5,b4,b3,b2,b1,b0) \ __asm__ ("subq %20,%q6\nsbbq %18,%q5\nsbbq %16,%q4\n\tsbbq %14,%q3\n\tsbbq %12,%q2\n\tsbbq %10,%q1\n\tsbbq %8,%q0" \ : "=r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a6)), "rme" ((mp_limb_t)(b6)), \ - "1" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "2" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "3" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "4" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "5" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "6" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a6)), "rme" ((ulong)(b6)), \ + "1" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "2" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "3" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "4" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "5" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "6" ((ulong)(a0)), "rme" ((ulong)(b0))) #define sub_ddddddddmmmmmmmmssssssss(s7,s6,s5,s4,s3,s2,s1,s0, a7,a6,a5,a4,a3,a2,a1,a0, b7,b6,b5,b4,b3,b2,b1,b0) \ __asm__ ("subq %23,%q7\nsbbq %21,%q6\nsbbq %19,%q5\n\tsbbq %17,%q4\n\tsbbq %15,%q3\n\tsbbq %13,%q2\n\tsbbq %11,%q1\n\tsbbq %9,%q0" \ : "=r" (s7), "=&r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "0" ((mp_limb_t)(a7)), "rme" ((mp_limb_t)(b7)), \ - "1" ((mp_limb_t)(a6)), "rme" ((mp_limb_t)(b6)), \ - "2" ((mp_limb_t)(a5)), "rme" ((mp_limb_t)(b5)), \ - "3" ((mp_limb_t)(a4)), "rme" ((mp_limb_t)(b4)), \ - "4" ((mp_limb_t)(a3)), "rme" ((mp_limb_t)(b3)), \ - "5" ((mp_limb_t)(a2)), "rme" ((mp_limb_t)(b2)), \ - "6" ((mp_limb_t)(a1)), "rme" ((mp_limb_t)(b1)), \ - "7" ((mp_limb_t)(a0)), "rme" ((mp_limb_t)(b0))) + : "0" ((ulong)(a7)), "rme" ((ulong)(b7)), \ + "1" ((ulong)(a6)), "rme" ((ulong)(b6)), \ + "2" ((ulong)(a5)), "rme" ((ulong)(b5)), \ + "3" ((ulong)(a4)), "rme" ((ulong)(b4)), \ + "4" ((ulong)(a3)), "rme" ((ulong)(b3)), \ + "5" ((ulong)(a2)), "rme" ((ulong)(b2)), \ + "6" ((ulong)(a1)), "rme" ((ulong)(b1)), \ + "7" ((ulong)(a0)), "rme" ((ulong)(b0))) #elif defined(__GNUC__) && defined(__ARM_NEON) #define add_sssssaaaaaaaaaa(s4, s3, s2, s1, s0, a4, a3, a2, a1, a0, b4, b3, b2, b1, b0) \ __asm__ ("adds %4,%9,%14\n\tadcs %3,%8,%13\n\tadcs %2,%7,%12\n\tadcs %1,%6,%11\n\tadc %0,%5,%10"\ : "=r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define add_ssssssaaaaaaaaaaaa(s5, s4, s3, s2, s1, s0, a5, a4, a3, a2, a1, a0, b5, b4, b3, b2, b1, b0) \ __asm__ ("adds %5,%11,%17\n\tadcs %4,%10,%16\n\tadcs %3,%9,%15\n\tadcs %2,%8,%14\n\tadcs %1,%7,%13\n\tadc %0,%6,%12"\ : "=r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define add_sssssssaaaaaaaaaaaaaa(s6, s5, s4, s3, s2, s1, s0, a6, a5, a4, a3, a2, a1, a0, b6, b5, b4, b3, b2, b1, b0) \ __asm__ ("adds %6,%13,%20\n\tadcs %5,%12,%19\n\tadcs %4,%11,%18\n\tadcs %3,%10,%17\n\tadcs %2,%9,%16\n\tadcs %1,%8,%15\n\tadc %0,%7,%14"\ : "=r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a6)), "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b6)), "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a6)), "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b6)), "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define add_ssssssssaaaaaaaaaaaaaaaa(s7, s6, s5, s4, s3, s2, s1, s0, a7, a6, a5, a4, a3, a2, a1, a0, b7, b6, b5, b4, b3, b2, b1, b0) \ __asm__ ("adds %7,%15,%23\n\tadcs %6,%14,%22\n\tadcs %5,%13,%21\n\tadcs %4,%12,%20\n\tadcs %3,%11,%19\n\tadcs %2,%10,%18\n\tadcs %1,%9,%17\n\tadc %0,%8,%16"\ : "=r" (s7), "=&r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a7)), "r" ((mp_limb_t)(a6)), "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b7)), "r" ((mp_limb_t)(b6)), "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a7)), "r" ((ulong)(a6)), "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b7)), "r" ((ulong)(b6)), "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define sub_ddddmmmmssss(s3, s2, s1, s0, a3, a2, a1, a0, b3, b2, b1, b0) \ __asm__ ("subs %3,%7,%11\n\tsbcs %2,%6,%10\n\tsbcs %1,%5,%9\n\tsbc %0,%4,%8"\ : "=r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define sub_dddddmmmmmsssss(s4, s3, s2, s1, s0, a4, a3, a2, a1, a0, b4, b3, b2, b1, b0) \ __asm__ ("subs %4,%9,%14\n\tsbcs %3,%8,%13\n\tsbcs %2,%7,%12\n\tsbcs %1,%6,%11\n\tsbc %0,%5,%10"\ : "=r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define sub_ddddddmmmmmmssssss(s5, s4, s3, s2, s1, s0, a5, a4, a3, a2, a1, a0, b5, b4, b3, b2, b1, b0) \ __asm__ ("subs %5,%11,%17\n\tsbcs %4,%10,%16\n\tsbcs %3,%9,%15\n\tsbcs %2,%8,%14\n\tsbcs %1,%7,%13\n\tsbc %0,%6,%12"\ : "=r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define sub_dddddddmmmmmmmsssssss(s6, s5, s4, s3, s2, s1, s0, a6, a5, a4, a3, a2, a1, a0, b6, b5, b4, b3, b2, b1, b0) \ __asm__ ("subs %6,%13,%20\n\tsbcs %5,%12,%19\n\tsbcs %4,%11,%18\n\tsbcs %3,%10,%17\n\tsbcs %2,%9,%16\n\tsbcs %1,%8,%15\n\tsbc %0,%7,%14"\ : "=r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a6)), "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b6)), "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a6)), "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b6)), "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #define sub_ddddddddmmmmmmmmssssssss(s7, s6, s5, s4, s3, s2, s1, s0, a7, a6, a5, a4, a3, a2, a1, a0, b7, b6, b5, b4, b3, b2, b1, b0) \ __asm__ ("subs %7,%15,%23\n\tsbcs %6,%14,%22\n\tsbcs %5,%13,%21\n\tsbcs %4,%12,%20\n\tsbcs %3,%11,%19\n\tsbcs %2,%10,%18\n\tsbcs %1,%9,%17\n\tsbc %0,%8,%16"\ : "=r" (s7), "=&r" (s6), "=&r" (s5), "=&r" (s4), "=&r" (s3), "=&r" (s2), "=&r" (s1), "=&r" (s0) \ - : "r" ((mp_limb_t)(a7)), "r" ((mp_limb_t)(a6)), "r" ((mp_limb_t)(a5)), "r" ((mp_limb_t)(a4)), "r" ((mp_limb_t)(a3)), "r" ((mp_limb_t)(a2)), "r" ((mp_limb_t)(a1)), "r" ((mp_limb_t)(a0)), \ - "r" ((mp_limb_t)(b7)), "r" ((mp_limb_t)(b6)), "r" ((mp_limb_t)(b5)), "r" ((mp_limb_t)(b4)), "r" ((mp_limb_t)(b3)), "r" ((mp_limb_t)(b2)), "r" ((mp_limb_t)(b1)), "rI" ((mp_limb_t)(b0)) \ + : "r" ((ulong)(a7)), "r" ((ulong)(a6)), "r" ((ulong)(a5)), "r" ((ulong)(a4)), "r" ((ulong)(a3)), "r" ((ulong)(a2)), "r" ((ulong)(a1)), "r" ((ulong)(a0)), \ + "r" ((ulong)(b7)), "r" ((ulong)(b6)), "r" ((ulong)(b5)), "r" ((ulong)(b4)), "r" ((ulong)(b3)), "r" ((ulong)(b2)), "r" ((ulong)(b1)), "rI" ((ulong)(b0)) \ : "cc") #elif defined(_MSC_VER) && (defined(__AVX2__) || defined(_M_ARM64)) #define add_sssssaaaaaaaaaa(s4, s3, s2, s1, s0, a4, a3, a2, a1, a0, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t0 = 0; \ - add_ssssaaaaaaaa(__t0, s2, s1, s0, (mp_limb_t) 0, a2, a1, a0, (mp_limb_t) 0, b2, b1, b0); \ + ulong __t0 = 0; \ + add_ssssaaaaaaaa(__t0, s2, s1, s0, (ulong) 0, a2, a1, a0, (ulong) 0, b2, b1, b0); \ add_ssaaaa(s4, s3, a4, a3, b4, b3); \ - add_ssaaaa(s4, s3, s4, s3, (mp_limb_t) 0, __t0); \ + add_ssaaaa(s4, s3, s4, s3, (ulong) 0, __t0); \ } while (0) #define add_ssssssaaaaaaaaaaaa(s5, s4, s3, s2, s1, s0, a5, a4, a3, a2, a1, a0, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t1 = 0; \ - add_sssssaaaaaaaaaa(__t1, s3, s2, s1, s0, (mp_limb_t) 0, a3, a2, a1, a0, (mp_limb_t) 0, b3, b2, b1, b0);\ + ulong __t1 = 0; \ + add_sssssaaaaaaaaaa(__t1, s3, s2, s1, s0, (ulong) 0, a3, a2, a1, a0, (ulong) 0, b3, b2, b1, b0);\ add_ssaaaa(s5, s4, a5, a4, b5, b4); \ - add_ssaaaa(s5, s4, s5, s4, (mp_limb_t) 0, __t1); \ + add_ssaaaa(s5, s4, s5, s4, (ulong) 0, __t1); \ } while (0) #define add_sssssssaaaaaaaaaaaaaa(s6, s5, s4, s3, s2, s1, s0, a6, a5, a4, a3, a2, a1, a0, b6, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t2 = 0; \ - add_ssssssaaaaaaaaaaaa(__t2, s4, s3, s2, s1, s0, (mp_limb_t) 0, a4, a3, a2, a1, a0, (mp_limb_t) 0, b4, b3, b2, b1, b0); \ + ulong __t2 = 0; \ + add_ssssssaaaaaaaaaaaa(__t2, s4, s3, s2, s1, s0, (ulong) 0, a4, a3, a2, a1, a0, (ulong) 0, b4, b3, b2, b1, b0); \ add_ssaaaa(s6, s5, a6, a5, b6, b5); \ - add_ssaaaa(s6, s5, s6, s5, (mp_limb_t) 0, __t2); \ + add_ssaaaa(s6, s5, s6, s5, (ulong) 0, __t2); \ } while (0) #define add_ssssssssaaaaaaaaaaaaaaaa(s7, s6, s5, s4, s3, s2, s1, s0, a7, a6, a5, a4, a3, a2, a1, a0, b7, b6, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t3 = 0; \ - add_sssssssaaaaaaaaaaaaaa(__t3, s5, s4, s3, s2, s1, s0, (mp_limb_t) 0, a5, a4, a3, a2, a1, a0, (mp_limb_t) 0, b5, b4, b3, b2, b1, b0); \ + ulong __t3 = 0; \ + add_sssssssaaaaaaaaaaaaaa(__t3, s5, s4, s3, s2, s1, s0, (ulong) 0, a5, a4, a3, a2, a1, a0, (ulong) 0, b5, b4, b3, b2, b1, b0); \ add_ssaaaa(s7, s6, a7, a6, b7, b6); \ - add_ssaaaa(s7, s6, s7, s6, (mp_limb_t) 0, __t3); \ + add_ssaaaa(s7, s6, s7, s6, (ulong) 0, __t3); \ } while (0) #define sub_ddddmmmmssss(s3, s2, s1, s0, a3, a2, a1, a0, b3, b2, b1, b0) \ do { \ - mp_limb_t __t1, __u1; \ - sub_dddmmmsss(__t1, s1, s0, (mp_limb_t) 0, a1, a0, (mp_limb_t) 0, b1, b0); \ - sub_ddmmss(__u1, s2, (mp_limb_t) 0, a2, (mp_limb_t) 0, b2); \ + ulong __t1, __u1; \ + sub_dddmmmsss(__t1, s1, s0, (ulong) 0, a1, a0, (ulong) 0, b1, b0); \ + sub_ddmmss(__u1, s2, (ulong) 0, a2, (ulong) 0, b2); \ sub_ddmmss(s3, s2, (a3) - (b3), s2, -__u1, -__t1); \ } while (0) #define sub_dddddmmmmmsssss(s4, s3, s2, s1, s0, a4, a3, a2, a1, a0, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t2, __u2; \ - sub_ddddmmmmssss(__t2, s2, s1, s0, (mp_limb_t) 0, a2, a1, a0, (mp_limb_t) 0, b2, b1, b0); \ - sub_ddmmss(__u2, s3, (mp_limb_t) 0, a3, (mp_limb_t) 0, b3); \ + ulong __t2, __u2; \ + sub_ddddmmmmssss(__t2, s2, s1, s0, (ulong) 0, a2, a1, a0, (ulong) 0, b2, b1, b0); \ + sub_ddmmss(__u2, s3, (ulong) 0, a3, (ulong) 0, b3); \ sub_ddmmss(s4, s3, (a4) - (b4), s3, -__u2, -__t2); \ } while (0) #define sub_ddddddmmmmmmssssss(s5, s4, s3, s2, s1, s0, a5, a4, a3, a2, a1, a0, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t3, __u3; \ - sub_dddddmmmmmsssss(__t3, s3, s2, s1, s0, (mp_limb_t) 0, a3, a2, a1, a0, (mp_limb_t) 0, b3, b2, b1, b0);\ - sub_ddmmss(__u3, s4, (mp_limb_t) 0, a4, (mp_limb_t) 0, b4); \ + ulong __t3, __u3; \ + sub_dddddmmmmmsssss(__t3, s3, s2, s1, s0, (ulong) 0, a3, a2, a1, a0, (ulong) 0, b3, b2, b1, b0);\ + sub_ddmmss(__u3, s4, (ulong) 0, a4, (ulong) 0, b4); \ sub_ddmmss(s5, s4, (a5) - (b5), s4, -__u3, -__t3); \ } while (0) #define sub_dddddddmmmmmmmsssssss(s6, s5, s4, s3, s2, s1, s0, a6, a5, a4, a3, a2, a1, a0, b6, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t4, __u4; \ - sub_ddddddmmmmmmssssss(__t4, s4, s3, s2, s1, s0, (mp_limb_t) 0, a4, a3, a2, a1, a0, (mp_limb_t) 0, b4, b3, b2, b1, b0); \ - sub_ddmmss(__u4, s5, (mp_limb_t) 0, a5, (mp_limb_t) 0, b5); \ + ulong __t4, __u4; \ + sub_ddddddmmmmmmssssss(__t4, s4, s3, s2, s1, s0, (ulong) 0, a4, a3, a2, a1, a0, (ulong) 0, b4, b3, b2, b1, b0); \ + sub_ddmmss(__u4, s5, (ulong) 0, a5, (ulong) 0, b5); \ sub_ddmmss(s6, s5, (a6) - (b6), s5, -__u4, -__t4); \ } while (0) #define sub_ddddddddmmmmmmmmssssssss(s7, s6, s5, s4, s3, s2, s1, s0, a7, a6, a5, a4, a3, a2, a1, a0, b7, b6, b5, b4, b3, b2, b1, b0) \ do { \ - mp_limb_t __t5, __u5; \ - sub_dddddddmmmmmmmsssssss(__t5, s5, s4, s3, s2, s1, s0, (mp_limb_t) 0, a5, a4, a3, a2, a1, a0, (mp_limb_t) 0, b5, b4, b3, b2, b1, b0); \ - sub_ddmmss(__u5, s6, (mp_limb_t) 0, a6, (mp_limb_t) 0, b6); \ + ulong __t5, __u5; \ + sub_dddddddmmmmmmmsssssss(__t5, s5, s4, s3, s2, s1, s0, (ulong) 0, a5, a4, a3, a2, a1, a0, (ulong) 0, b5, b4, b3, b2, b1, b0); \ + sub_ddmmss(__u5, s6, (ulong) 0, a6, (ulong) 0, b6); \ sub_ddmmss(s7, s6, (a7) - (b7), s6, -__u5, -__t5); \ } while (0) diff --git a/src/d_mat/test/main.c b/src/d_mat/test/main.c index e18fe961cb..e74f6f66ae 100644 --- a/src/d_mat/test/main.c +++ b/src/d_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-entry.c" diff --git a/src/d_vec/test/main.c b/src/d_vec/test/main.c index 6dbd674d02..7b4f211953 100644 --- a/src/d_vec/test/main.c +++ b/src/d_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/dirichlet/test/main.c b/src/dirichlet/test/main.c index 38d14f9704..c549e80587 100644 --- a/src/dirichlet/test/main.c +++ b/src/dirichlet/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-char.c" diff --git a/src/dlog.h b/src/dlog.h index 4060505eeb..84a207a207 100644 --- a/src/dlog.h +++ b/src/dlog.h @@ -30,7 +30,7 @@ enum }; typedef struct dlog_precomp_struct dlog_precomp_struct; -typedef struct dlog_precomp_struct * dlog_precomp_ptr; +typedef struct dlog_precomp_struct * dlog_preconn_ptr; /* log in (1+pZ/p^eZ), e small: use recursion formulas * could use padic log instead but exponent is small @@ -107,7 +107,7 @@ typedef struct ulong num; ulong * expo; ulong * crt_coeffs; - dlog_precomp_ptr pre; + dlog_preconn_ptr pre; } dlog_crt_struct; diff --git a/src/dlog/test/main.c b/src/dlog/test/main.c index 197225908c..02b9fcc334 100644 --- a/src/dlog/test/main.c +++ b/src/dlog/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-dlog.c" diff --git a/src/double_extras/randtest.c b/src/double_extras/randtest.c index ded74abf59..03858c7152 100644 --- a/src/double_extras/randtest.c +++ b/src/double_extras/randtest.c @@ -18,7 +18,7 @@ double d_randtest(flint_rand_t state) { - mp_limb_t m1, m2; + ulong m1, m2; double t; if (FLINT_BITS == 64) diff --git a/src/double_extras/test/main.c b/src/double_extras/test/main.c index 6245edc01d..1832b0c042 100644 --- a/src/double_extras/test/main.c +++ b/src/double_extras/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-is_nan.c" diff --git a/src/double_interval/test/main.c b/src/double_interval/test/main.c index 6018d1b38b..6d901655b7 100644 --- a/src/double_interval/test/main.c +++ b/src/double_interval/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-fast_add.c" diff --git a/src/fexpr/call0.c b/src/fexpr/call0.c index 51bc9e2385..72fda4204c 100644 --- a/src/fexpr/call0.c +++ b/src/fexpr/call0.c @@ -16,7 +16,7 @@ void fexpr_call0(fexpr_t res, const fexpr_t f) { slong res_size, f_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); diff --git a/src/fexpr/call1.c b/src/fexpr/call1.c index 27de7afaed..b103ea90f6 100644 --- a/src/fexpr/call1.c +++ b/src/fexpr/call1.c @@ -16,7 +16,7 @@ void fexpr_call1(fexpr_t res, const fexpr_t f, const fexpr_t x1) { slong res_size, f_size, x1_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); x1_size = fexpr_size(x1); diff --git a/src/fexpr/call2.c b/src/fexpr/call2.c index f958e2d854..935da03f8f 100644 --- a/src/fexpr/call2.c +++ b/src/fexpr/call2.c @@ -16,7 +16,7 @@ void fexpr_call2(fexpr_t res, const fexpr_t f, const fexpr_t x1, const fexpr_t x2) { slong res_size, f_size, x1_size, x2_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); x1_size = fexpr_size(x1); diff --git a/src/fexpr/call3.c b/src/fexpr/call3.c index 0a8229f7f4..f02463b4d5 100644 --- a/src/fexpr/call3.c +++ b/src/fexpr/call3.c @@ -16,7 +16,7 @@ void fexpr_call3(fexpr_t res, const fexpr_t f, const fexpr_t x1, const fexpr_t x2, const fexpr_t x3) { slong res_size, f_size, x1_size, x2_size, x3_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); x1_size = fexpr_size(x1); diff --git a/src/fexpr/call4.c b/src/fexpr/call4.c index c6b830e38e..52f3b64922 100644 --- a/src/fexpr/call4.c +++ b/src/fexpr/call4.c @@ -16,7 +16,7 @@ void fexpr_call4(fexpr_t res, const fexpr_t f, const fexpr_t x1, const fexpr_t x2, const fexpr_t x3, const fexpr_t x4) { slong res_size, f_size, x1_size, x2_size, x3_size, x4_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); x1_size = fexpr_size(x1); diff --git a/src/fexpr/call_vec.c b/src/fexpr/call_vec.c index ef4aa30bfe..d353d06a00 100644 --- a/src/fexpr/call_vec.c +++ b/src/fexpr/call_vec.c @@ -38,7 +38,7 @@ fexpr_call_vec(fexpr_t res, const fexpr_t f, fexpr_srcptr args, slong len) else { slong i, f_size, args_size, index_size, size, pos, arg_size; - mp_ptr out; + nn_ptr out; f_size = fexpr_size(f); diff --git a/src/fexpr/test/main.c b/src/fexpr/test/main.c index bcc3e9742d..46375f7e3b 100644 --- a/src/fexpr/test/main.c +++ b/src/fexpr/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-builtins.c" diff --git a/src/fft.h b/src/fft.h index 427007df47..09959b9f14 100644 --- a/src/fft.h +++ b/src/fft.h @@ -18,10 +18,11 @@ #define FFT_INLINE static inline #endif +#include #include "flint.h" #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* defined in mpn_extras.h */ diff --git a/src/fft/test/main.c b/src/fft/test/main.c index 7308bfd913..30d36f769b 100644 --- a/src/fft/test/main.c +++ b/src/fft/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-adjust.c" diff --git a/src/fft_small.h b/src/fft_small.h index f90a0b2ec2..e5314e728d 100644 --- a/src/fft_small.h +++ b/src/fft_small.h @@ -528,8 +528,8 @@ int _nmod_poly_divrem_precomp( mpn_ctx_struct * get_default_mpn_ctx(void); -void mpn_mul_default_mpn_ctx(mp_ptr r1, mp_srcptr i1, mp_size_t n1, mp_srcptr i2, mp_size_t n2); -void _nmod_poly_mul_mid_default_mpn_ctx(mp_ptr res, slong zl, slong zh, mp_srcptr a, slong an, mp_srcptr b, slong bn, nmod_t mod); +void mpn_mul_default_mpn_ctx(nn_ptr r1, nn_srcptr i1, slong n1, nn_srcptr i2, slong n2); +void _nmod_poly_mul_mid_default_mpn_ctx(nn_ptr res, slong zl, slong zh, nn_srcptr a, slong an, nn_srcptr b, slong bn, nmod_t mod); int _fmpz_poly_mul_mid_mpn_ctx( diff --git a/src/fft_small/default_ctx.c b/src/fft_small/default_ctx.c index b6f5f7c698..34cdf6bbe4 100644 --- a/src/fft_small/default_ctx.c +++ b/src/fft_small/default_ctx.c @@ -39,13 +39,13 @@ mpn_ctx_struct * get_default_mpn_ctx(void) } void -mpn_mul_default_mpn_ctx(mp_ptr r1, mp_srcptr i1, mp_size_t n1, mp_srcptr i2, mp_size_t n2) +mpn_mul_default_mpn_ctx(nn_ptr r1, nn_srcptr i1, slong n1, nn_srcptr i2, slong n2) { mpn_ctx_mpn_mul(get_default_mpn_ctx(), r1, i1, n1, i2, n2); } void -_nmod_poly_mul_mid_default_mpn_ctx(mp_ptr res, slong zl, slong zh, mp_srcptr a, slong an, mp_srcptr b, slong bn, nmod_t mod) +_nmod_poly_mul_mid_default_mpn_ctx(nn_ptr res, slong zl, slong zh, nn_srcptr a, slong an, nn_srcptr b, slong bn, nmod_t mod) { _nmod_poly_mul_mid_mpn_ctx(res, zl, zh, a, an, b, bn, mod, get_default_mpn_ctx()); } diff --git a/src/fft_small/profile/p-fft_small_vs_gmp.c b/src/fft_small/profile/p-fft_small_vs_gmp.c index 9853c84583..0cf8b37cee 100644 --- a/src/fft_small/profile/p-fft_small_vs_gmp.c +++ b/src/fft_small/profile/p-fft_small_vs_gmp.c @@ -25,13 +25,16 @@ int main(void) { - mp_ptr x, y, r, s; + nn_ptr x, y, r, s; slong n; + flint_rand_t state; - x = flint_malloc(sizeof(mp_limb_t) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); - y = flint_malloc(sizeof(mp_limb_t) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); - r = flint_malloc(2 * sizeof(mp_limb_t) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); - s = flint_malloc(2 * sizeof(mp_limb_t) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); + flint_rand_init(state); + + x = flint_malloc(sizeof(ulong) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); + y = flint_malloc(sizeof(ulong) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); + r = flint_malloc(2 * sizeof(ulong) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); + s = flint_malloc(2 * sizeof(ulong) * FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); flint_mpn_rrandom(x, state, FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); flint_mpn_rrandom(y, state, FLINT_MAX(N_MAX_MUL, N_MAX_SQR)); @@ -79,6 +82,8 @@ int main(void) flint_free(r); flint_free(s); + flint_rand_clear(state); + flint_cleanup_master(); return 0; diff --git a/src/fft_small/profile/p-mul.c b/src/fft_small/profile/p-mul.c index 703527d94f..83198c8d05 100644 --- a/src/fft_small/profile/p-mul.c +++ b/src/fft_small/profile/p-mul.c @@ -1,3 +1,4 @@ +#include #include "ulong_extras.h" #include "fft_small.h" #include "profiler.h" diff --git a/src/fft_small/test/main.c b/src/fft_small/test/main.c index b886e1a118..0a36f12735 100644 --- a/src/fft_small/test/main.c +++ b/src/fft_small/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-fmpz_poly_mul.c" diff --git a/src/fft_small/test/t-mpn_add_inplace_c.c b/src/fft_small/test/t-mpn_add_inplace_c.c index 455600f8b1..42a887d675 100644 --- a/src/fft_small/test/t-mpn_add_inplace_c.c +++ b/src/fft_small/test/t-mpn_add_inplace_c.c @@ -40,8 +40,8 @@ TEST_FUNCTION_START(flint_mpn_add_inplace_c, state) for (iter = 0; iter < 1000 * flint_test_multiplier(); iter++) { - mp_limb_t a[10], b[10], c[10]; - mp_size_t an, bn; + ulong a[10], b[10], c[10]; + slong an, bn; unsigned char cf, c1, c2; bn = 1 + n_randint(state, 4); @@ -66,7 +66,7 @@ TEST_FUNCTION_START(flint_mpn_add_inplace_c, state) for (iter = 0; iter < 1000 * flint_test_multiplier(); iter++) { - mp_limb_t a[8], b[8], c[8], d[8]; + ulong a[8], b[8], c[8], d[8]; flint_mpn_rrandom(a, state, 8); flint_mpn_rrandom(b, state, 8); diff --git a/src/flint-config.h.in b/src/flint-config.h.in index 415222da5e..d65d12c852 100644 --- a/src/flint-config.h.in +++ b/src/flint-config.h.in @@ -12,6 +12,9 @@ /* Define if system is big endian. */ #undef FLINT_BIG_ENDIAN +/* Define according to the ABI FLINT was compiled with */ +#undef FLINT_BITS + /* Define if Arm v8 assembly is available */ #undef FLINT_HAVE_ASSEMBLY_armv8 @@ -51,6 +54,9 @@ /* Define if system is strongly ordered */ #undef FLINT_KNOW_STRONG_ORDER +/* Define to use long long limbs */ +#undef FLINT_LONG_LONG + /* Define to enable reentrant. */ #undef FLINT_REENTRANT diff --git a/src/flint.h.in b/src/flint.h.in index 1cbd6fc8ae..726ccee444 100644 --- a/src/flint.h.in +++ b/src/flint.h.in @@ -20,8 +20,7 @@ # include #endif -#include -#include +#include #ifdef BUILDING_FLINT # include "config.h" #else @@ -88,9 +87,17 @@ FLINT_DLL extern char flint_version[]; struct __FLINT_FILE; typedef struct __FLINT_FILE FLINT_FILE; -typedef @ULONG@ ulong; -typedef @SLONG@ slong; -typedef @ULONG@ flint_bitcnt_t; +#if FLINT_LONG_LONG +typedef unsigned long long int ulong; +typedef long long int slong; +#else +typedef unsigned long int ulong; +typedef long int slong; +#endif + +typedef ulong flint_bitcnt_t; +typedef ulong * nn_ptr; +typedef const ulong * nn_srcptr; #if FLINT_WANT_ASSERT # define FLINT_ASSERT(param) assert(param) @@ -167,39 +174,37 @@ typedef @ULONG@ flint_bitcnt_t; # define FLINT_TLS_PREFIX #endif -#if defined(_LONG_LONG_LIMB) +#if FLINT_LONG_LONG # define _WORD_FMT "ll" # define WORD_FMT "%ll" # define WORD_WIDTH_FMT "%*ll" # define WORD(xx) (xx##LL) # define UWORD(xx) (xx##ULL) -# ifndef FLINT_NO_WORDMAC -# define UWORD_MAX ULLONG_MAX -# define UWORD_MIN ULLONG_MIN -# define WORD_MAX LLONG_MAX -# define WORD_MIN LLONG_MIN -# endif #else # define _WORD_FMT "l" # define WORD_FMT "%l" # define WORD_WIDTH_FMT "%*l" # define WORD(xx) (xx##L) # define UWORD(xx) (xx##UL) -# ifndef FLINT_NO_WORDMAC -# define UWORD_MAX ULONG_MAX -# define UWORD_MIN ULONG_MIN -# define WORD_MAX LONG_MAX -# define WORD_MIN LONG_MIN -# endif #endif -#if GMP_LIMB_BITS == 64 -# define FLINT_BITS 64 +#if FLINT_BITS == 64 # define FLINT_D_BITS 53 # define FLINT64 1 +# ifndef FLINT_NO_WORDMAC +# define UWORD_MAX UWORD(18446744073709551615) +# define UWORD_MIN UWORD(0) +# define WORD_MAX WORD(9223372036854775807) +# define WORD_MIN (-WORD_MAX - WORD(1)) +# endif #else -# define FLINT_BITS 32 # define FLINT_D_BITS 31 +# ifndef FLINT_NO_WORDMAC +# define UWORD_MAX UWORD(4294967295) +# define UWORD_MIN UWORD(0) +# define WORD_MAX WORD(2147483647) +# define WORD_MIN (-WORD_MAX - WORD(1)) +# endif #endif #include "longlong.h" diff --git a/src/fmpq.h b/src/fmpq.h index 065241b4e7..c0ff7b5833 100644 --- a/src/fmpq.h +++ b/src/fmpq.h @@ -312,7 +312,7 @@ typedef struct { typedef _fmpz_mat22_struct _fmpz_mat22_t[1]; typedef struct { - mp_limb_t _11, _12, _21, _22; + ulong _11, _12, _21, _22; int det; /* ditto */ } _ui_mat22_struct; diff --git a/src/fmpq/add.c b/src/fmpq/add.c index de79ac4d80..f676d880e3 100644 --- a/src/fmpq/add.c +++ b/src/fmpq/add.c @@ -17,7 +17,7 @@ void _fmpq_add_small(fmpz_t rnum, fmpz_t rden, slong p1, ulong q1, slong p2, ulong q2) { ulong pp, qq, rr, ss; - mp_limb_t hi, lo; + ulong hi, lo; int s1, s2; if (q1 == q2) diff --git a/src/fmpq/dedekind_sum.c b/src/fmpq/dedekind_sum.c index 359fdf7cc4..b1fdacb2d7 100644 --- a/src/fmpq/dedekind_sum.c +++ b/src/fmpq/dedekind_sum.c @@ -66,8 +66,8 @@ fmpq_dedekind_sum_naive(fmpq_t s, const fmpz_t h, const fmpz_t k) #define _UI_MAT22_RMUL_ELEM(m11, m12, m21, m22, q) \ do { \ - mp_limb_t __t1 = m12 + q*m11; \ - mp_limb_t __t2 = m22 + q*m21; \ + ulong __t1 = m12 + q*m11; \ + ulong __t2 = m22 + q*m21; \ m12 = m11; \ m22 = m21; \ m11 = __t1; \ diff --git a/src/fmpq/fmpz_vector.c b/src/fmpq/fmpz_vector.c index 170eaac95b..e0d8d1dead 100644 --- a/src/fmpq/fmpz_vector.c +++ b/src/fmpq/fmpz_vector.c @@ -58,7 +58,7 @@ void _fmpq_cfrac_list_fit_length(_fmpq_cfrac_list_t v, slong len) v->array = (fmpz *) flint_realloc(v->array, len * sizeof(fmpz)); FLINT_ASSERT(len > v->alloc); - flint_mpn_zero((mp_ptr) (v->array + v->alloc), len - v->alloc); + flint_mpn_zero((nn_ptr) (v->array + v->alloc), len - v->alloc); } else { diff --git a/src/fmpq/get_cfrac_helpers.c b/src/fmpq/get_cfrac_helpers.c index 769c217586..6dfa552523 100644 --- a/src/fmpq/get_cfrac_helpers.c +++ b/src/fmpq/get_cfrac_helpers.c @@ -307,24 +307,24 @@ static flint_bitcnt_t _hgcd_split( s should have at least 2*FLINT_BITS entries allocated */ static slong _uiui_hgcd( - mp_limb_t * s, - mp_limb_t A1, mp_limb_t A0, - mp_limb_t B1, mp_limb_t B0, + ulong * s, + ulong A1, ulong A0, + ulong B1, ulong B0, _ui_mat22_t M) { slong written = 0; - mp_limb_t d0, d1; - mp_limb_t t0, t1, t2, r0, r1; + ulong d0, d1; + ulong t0, t1, t2, r0, r1; int det = 1; - mp_limb_t m11 = 1; - mp_limb_t m12 = 0; - mp_limb_t m21 = 0; - mp_limb_t m22 = 1; - mp_limb_t a1 = A1; - mp_limb_t a0 = A0; - mp_limb_t b1 = B1; - mp_limb_t b0 = B0; - mp_limb_t q; + ulong m11 = 1; + ulong m12 = 0; + ulong m21 = 0; + ulong m22 = 1; + ulong a1 = A1; + ulong a0 = A0; + ulong b1 = B1; + ulong b0 = B0; + ulong q; FLINT_ASSERT(a1 != 0); FLINT_ASSERT(b1 < a1 || (b1 == a1 && b0 <= a0)); @@ -460,15 +460,15 @@ static slong _uiui_hgcd( static void _lehmer_exact(_fmpq_cfrac_list_t s, _fmpz_mat22_t M, int flags, fmpz_t xa, fmpz_t xb, fmpz_t ya, fmpz_t yb) { - mp_limb_t s_temp[2*FLINT_BITS]; + ulong s_temp[2*FLINT_BITS]; slong written; unsigned int x_lzcnt; mpz_ptr xn, xd, yn, yd; - mp_size_t xn_len, xd_len, yn_len, yd_len; - mp_ptr xn_ptr, xd_ptr, yn_ptr, yd_ptr; + slong xn_len, xd_len, yn_len, yd_len; + nn_ptr xn_ptr, xd_ptr, yn_ptr, yd_ptr; _ui_mat22_t m; - mp_limb_t A0, A1, B0, B1; - mp_size_t n; + ulong A0, A1, B0, B1; + slong n; if (!COEFF_IS_MPZ(*xa) || !COEFF_IS_MPZ(*xb)) return; @@ -553,13 +553,13 @@ static void _lehmer_exact(_fmpq_cfrac_list_t s, _fmpz_mat22_t M, int flags, if (flags & CFRAC_NEED_HGCD) { /* over-strict but fast _hcgd_ok(M, yn, yd) */ - mp_size_t j; + slong j; FLINT_ASSERT(yn_len >= yd_len); _fmpz_mat22_rmul_ui(M, m); for (j = 2 + _fmpz_mat22_bits(M)/FLINT_BITS; j < yn_len; j++) { - mp_limb_t aa = yn_ptr[j]; - mp_limb_t bb = j < yd_len ? yd_ptr[j] : 0; + ulong aa = yn_ptr[j]; + ulong bb = j < yd_len ? yd_ptr[j] : 0; if (aa > bb && aa - bb > 1) goto its_ok; } @@ -605,18 +605,18 @@ static void _lehmer_exact(_fmpq_cfrac_list_t s, _fmpz_mat22_t M, int flags, static void _lehmer_inexact(_fmpq_cfrac_list_t s, _fmpz_mat22_t M, int needM, _fmpq_ball_t x, _fmpq_ball_t y) { - mp_limb_t s_temp[2*FLINT_BITS]; + ulong s_temp[2*FLINT_BITS]; slong written; unsigned int x_lzcnt; mpz_ptr xln, xld, xrn, xrd; mpz_ptr yln, yld, yrn, yrd; - mp_size_t xln_len, xld_len, xrn_len, xrd_len; - mp_size_t yln_len, yld_len, yrn_len, yrd_len; - mp_ptr xln_ptr, xld_ptr, xrn_ptr, xrd_ptr; - mp_ptr yln_ptr, yld_ptr, yrn_ptr, yrd_ptr; + slong xln_len, xld_len, xrn_len, xrd_len; + slong yln_len, yld_len, yrn_len, yrd_len; + nn_ptr xln_ptr, xld_ptr, xrn_ptr, xrd_ptr; + nn_ptr yln_ptr, yld_ptr, yrn_ptr, yrd_ptr; _ui_mat22_t m; - mp_limb_t A0, A1, B0, B1; - mp_size_t n, nl, nr; + ulong A0, A1, B0, B1; + slong n, nl, nr; if (!COEFF_IS_MPZ(*x->left_num) || !COEFF_IS_MPZ(*x->left_den) || !COEFF_IS_MPZ(*x->right_num) || !COEFF_IS_MPZ(*x->right_den)) diff --git a/src/fmpq/get_mpfr.c b/src/fmpq/get_mpfr.c index 33c4f8ee3a..84bbad9aa4 100644 --- a/src/fmpq/get_mpfr.c +++ b/src/fmpq/get_mpfr.c @@ -17,7 +17,7 @@ fmpq_get_mpfr(mpfr_t r, const fmpq_t x, mpfr_rnd_t rnd) { __mpq_struct mpq; fmpz p, q; - mp_limb_t pp, qq; + ulong pp, qq; p = *fmpq_numref(x); q = *fmpq_denref(x); diff --git a/src/fmpq/harmonic_ui.c b/src/fmpq/harmonic_ui.c index ea20e581bf..39fd3f4e51 100644 --- a/src/fmpq/harmonic_ui.c +++ b/src/fmpq/harmonic_ui.c @@ -17,7 +17,7 @@ # define FMPQ_HARMONIC_UI_TAB_SIZE 25 #endif -static const mp_limb_t fmpq_harmonic_ui_tab_num[] = +static const ulong fmpq_harmonic_ui_tab_num[] = { 0, 1, 3, 11, 25, 137, 49, 363, 761, 7129, 7381, 83711, 86021, 1145993, 1171733, 1195757, 2436559, 42142223, 14274301, 275295799, 55835135, @@ -34,7 +34,7 @@ static const mp_limb_t fmpq_harmonic_ui_tab_num[] = #endif }; -const mp_limb_t fmpq_harmonic_ui_tab_den[] = +const ulong fmpq_harmonic_ui_tab_den[] = { 1, 1, 2, 6, 12, 60, 20, 140, 280, 2520, 2520, 27720, 27720, 360360, 360360, 360360, 720720, 12252240, 4084080, 77597520, 15519504, 5173168, @@ -105,7 +105,7 @@ def harmonic(n): static void harmonic_odd_direct(fmpz_t P, fmpz_t Q, ulong a, ulong b, ulong n, int d) { - mp_limb_t p, q, r, s, t, u, v, w = 0; + ulong p, q, r, s, t, u, v, w = 0; slong k; fmpz_zero(P); @@ -122,7 +122,7 @@ harmonic_odd_direct(fmpz_t P, fmpz_t Q, ulong a, ulong b, ulong n, int d) d++; r = (UWORD(1) << d) - UWORD(1); - s = ((mp_limb_t) k) << (d-1); + s = ((ulong) k) << (d-1); umul_ppmm(t, u, p, s); umul_ppmm(v, w, q, r); diff --git a/src/fmpq/mul.c b/src/fmpq/mul.c index 058be6e784..4cb7fb1df7 100644 --- a/src/fmpq/mul.c +++ b/src/fmpq/mul.c @@ -25,7 +25,7 @@ static ulong _fmpz_gcd_ui(const fmpz_t g, ulong h) void _fmpq_mul_small(fmpz_t rnum, fmpz_t rden, slong op1num, ulong op1den, slong op2num, ulong op2den) { - mp_limb_t hi, lo, denhi, denlo; + ulong hi, lo, denhi, denlo; int neg; if (op1num == 0 || op2num == 0) diff --git a/src/fmpq/randtest.c b/src/fmpq/randtest.c index 4c4131eff0..52db810d81 100644 --- a/src/fmpq/randtest.c +++ b/src/fmpq/randtest.c @@ -15,7 +15,7 @@ void _fmpq_randtest(fmpz_t num, fmpz_t den, flint_rand_t state, flint_bitcnt_t bits) { - mp_limb_t x = n_randlimb(state); + ulong x = n_randlimb(state); fmpz_randtest(num, state, bits); diff --git a/src/fmpq/reconstruct_fmpz_2.c b/src/fmpq/reconstruct_fmpz_2.c index 3947610684..def61ccd95 100644 --- a/src/fmpq/reconstruct_fmpz_2.c +++ b/src/fmpq/reconstruct_fmpz_2.c @@ -20,23 +20,23 @@ hgcd for two-limb input, individual quotients not written */ static slong _hgcd_uiui_no_write( - mp_limb_t A1, mp_limb_t A0, - mp_limb_t B1, mp_limb_t B0, + ulong A1, ulong A0, + ulong B1, ulong B0, _ui_mat22_t M) { slong written = 0; /* number of quotients generated */ - mp_limb_t last_written = 0; - mp_limb_t d0, d1, t0, t1, t2, r0, r1; + ulong last_written = 0; + ulong d0, d1, t0, t1, t2, r0, r1; int det = 1; - mp_limb_t m11 = 1; - mp_limb_t m12 = 0; - mp_limb_t m21 = 0; - mp_limb_t m22 = 1; - mp_limb_t a1 = A1; - mp_limb_t a0 = A0; - mp_limb_t b1 = B1; - mp_limb_t b0 = B0; - mp_limb_t q; + ulong m11 = 1; + ulong m12 = 0; + ulong m21 = 0; + ulong m22 = 1; + ulong a1 = A1; + ulong a0 = A0; + ulong b1 = B1; + ulong b0 = B0; + ulong q; FLINT_ASSERT(a1 != 0); FLINT_ASSERT(b1 < a1 || (b1 == a1 && b0 <= a0)); @@ -171,7 +171,7 @@ static slong _hgcd_uiui_no_write( /* u is odd */ -static int coprime_ui(mp_limb_t u, mp_limb_t v) +static int coprime_ui(ulong u, ulong v) { FLINT_ASSERT(u > 0); FLINT_ASSERT(v > 0); @@ -206,7 +206,7 @@ static int coprime_ui(mp_limb_t u, mp_limb_t v) } /* u is odd */ -static int coprime_uiui(mp_limb_t u1, mp_limb_t u0, mp_limb_t v1, mp_limb_t v0) +static int coprime_uiui(ulong u1, ulong u0, ulong v1, ulong v0) { FLINT_ASSERT(u1 > 0 || u0 > 0); FLINT_ASSERT(v1 > 0 || v0 > 0); @@ -258,8 +258,8 @@ static int coprime_uiui(mp_limb_t u1, mp_limb_t u0, mp_limb_t v1, mp_limb_t v0) int _fmpq_reconstruct_fmpz_2_ui(fmpz_t n, fmpz_t d, const fmpz_t a, const fmpz_t m, const fmpz_t NN, const fmpz_t DD) { - mp_limb_t Q, R, A, B, N; - mp_limb_t m11 = 1, m12 = 0, t; + ulong Q, R, A, B, N; + ulong m11 = 1, m12 = 0, t; int mdet = 1; FLINT_ASSERT(fmpz_size(m) == 1); @@ -310,9 +310,9 @@ int _fmpq_reconstruct_fmpz_2_ui(fmpz_t n, fmpz_t d, int _fmpq_reconstruct_fmpz_2_uiui(fmpz_t n, fmpz_t d, const fmpz_t a, const fmpz_t m, const fmpz_t NN, const fmpz_t DD) { - mp_limb_t extra; - mp_limb_t Q1, Q0, R1, R0, A1, A0, B1, B0, N1, N0, D1, D0; - mp_limb_t m11[2] = {1, 0}, m12[2] = {0, 0}, t[2]; + ulong extra; + ulong Q1, Q0, R1, R0, A1, A0, B1, B0, N1, N0, D1, D0; + ulong m11[2] = {1, 0}, m12[2] = {0, 0}, t[2]; int mdet = 1; FLINT_ASSERT(fmpz_size(m) == 2); @@ -382,18 +382,18 @@ int _fmpq_reconstruct_fmpz_2_uiui(fmpz_t n, fmpz_t d, int _fmpq_reconstruct_fmpz_2_ui_array(fmpz_t n, fmpz_t d, const fmpz_t a, const fmpz_t m, const fmpz_t N, const fmpz_t D) { - mp_limb_t ex0, ex1, ex2, ex3, A1, A0, B1, B0; + ulong ex0, ex1, ex2, ex3, A1, A0, B1, B0; unsigned int n_lzcnt, a_lzcnt; _ui_mat22_t h; slong written; - const mp_limb_t * n_ptr, * d_ptr; + const ulong * n_ptr, * d_ptr; slong n_len, d_len; - mp_limb_t A[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; - mp_limb_t B[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; - mp_limb_t Q[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; - mp_limb_t R[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; - mp_limb_t m11[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 2]; - mp_limb_t m12[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 2]; + ulong A[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; + ulong B[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; + ulong Q[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; + ulong R[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 1]; + ulong m11[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 2]; + ulong m12[FMPQ_RECONSTRUCT_ARRAY_LIMIT + 2]; slong Alen, Blen, Qlen, Rlen, m_len; int mdet = 1; @@ -414,7 +414,7 @@ int _fmpq_reconstruct_fmpz_2_ui_array(fmpz_t n, fmpz_t d, } else { - n_ptr = (mp_srcptr) N; /* haha, dirty but works */ + n_ptr = (nn_srcptr) N; /* haha, dirty but works */ n_len = 1; } @@ -425,7 +425,7 @@ int _fmpq_reconstruct_fmpz_2_ui_array(fmpz_t n, fmpz_t d, } else { - d_ptr = (mp_srcptr) D; /* haha, dirty but works */ + d_ptr = (nn_srcptr) D; /* haha, dirty but works */ d_len = 1; } @@ -627,12 +627,12 @@ static int _lehmer(_fmpz_mat22_t M, fmpz_t A, fmpz_t B, const fmpz_t N, { int ret; slong written; - mp_srcptr n_ptr; + nn_srcptr n_ptr; mpz_ptr a, b, s, t; - mp_ptr a_ptr, b_ptr, s_ptr, t_ptr; - mp_size_t a_len, b_len, n_len, s_len, t_len; + nn_ptr a_ptr, b_ptr, s_ptr, t_ptr; + slong a_len, b_len, n_len, s_len, t_len; _ui_mat22_t h; - mp_limb_t A0, A1, B0, B1; + ulong A0, A1, B0, B1; unsigned int n_lzcnt, a_lzcnt; if (!COEFF_IS_MPZ(*A) || !COEFF_IS_MPZ(*B)) @@ -651,7 +651,7 @@ static int _lehmer(_fmpz_mat22_t M, fmpz_t A, fmpz_t B, const fmpz_t N, } else { - n_ptr = (mp_srcptr) N; /* haha, dirty but works */ + n_ptr = (nn_srcptr) N; /* haha, dirty but works */ n_len = 1; } @@ -923,7 +923,7 @@ int _fmpq_reconstruct_fmpz_2(fmpz_t n, fmpz_t d, const fmpz_t a, const fmpz_t m, const fmpz_t N, const fmpz_t D) { int ret, success; - mp_size_t Asize, Nsize; + slong Asize, Nsize; fmpz_t Q, R, A, B; _fmpz_mat22_t M; /* only need first row of matrix M */ #if FLINT_WANT_ASSERT diff --git a/src/fmpq/test/main.c b/src/fmpq/test/main.c index ac8b890e12..cf0cd5c559 100644 --- a/src/fmpq/test/main.c +++ b/src/fmpq/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* For t-get_mpfr.c */ #include #include diff --git a/src/fmpq_mat/can_solve_dixon.c b/src/fmpq_mat/can_solve_dixon.c index 6e68c61cee..d7b6a4c6cb 100644 --- a/src/fmpq_mat/can_solve_dixon.c +++ b/src/fmpq_mat/can_solve_dixon.c @@ -25,7 +25,7 @@ int fmpq_mat_can_solve_fmpz_mat_dixon(fmpq_mat_t X, const fmpz_mat_t A, const fmpz_mat_t B) { - mp_limb_t p; + ulong p; fmpz_t tested; nmod_mat_t Ap, LU; int result = 0, success = 0; diff --git a/src/fmpq_mat/can_solve_multi_mod.c b/src/fmpq_mat/can_solve_multi_mod.c index 62f8d5ea48..e55308a1cb 100644 --- a/src/fmpq_mat/can_solve_multi_mod.c +++ b/src/fmpq_mat/can_solve_multi_mod.c @@ -96,7 +96,7 @@ _fmpq_mat_can_solve_multi_mod(fmpq_mat_t X, slong * prm, * perm, * piv, * pivots; int stabilised; /* has CRT stabilised */ int res = 1, pcmp, firstp = 1; - mp_limb_t p = UWORD(1) << NMOD_MAT_OPTIMAL_MODULUS_BITS; + ulong p = UWORD(1) << NMOD_MAT_OPTIMAL_MODULUS_BITS; n = A->r; diff --git a/src/fmpq_mat/solve_dixon.c b/src/fmpq_mat/solve_dixon.c index 84d19890b8..cd57ff95c9 100644 --- a/src/fmpq_mat/solve_dixon.c +++ b/src/fmpq_mat/solve_dixon.c @@ -25,13 +25,13 @@ _fmpq_mat_check_solution_fmpz_mat(const fmpq_mat_t X, const fmpz_mat_t A, const void _fmpq_mat_solve_dixon(fmpq_mat_t X, const fmpz_mat_t A, const fmpz_mat_t B, - const nmod_mat_t Ainv, mp_limb_t p, + const nmod_mat_t Ainv, ulong p, const fmpz_t N, const fmpz_t D) { fmpz_t bound, ppow; fmpz_mat_t x, y, d, Ay; fmpz_t prod; - mp_limb_t * crt_primes; + ulong * crt_primes; nmod_mat_t * A_mod; nmod_mat_t Ay_mod, d_mod, y_mod; slong i, j, n, nexti, cols, num_primes; @@ -166,7 +166,7 @@ fmpq_mat_solve_fmpz_mat_dixon(fmpq_mat_t X, { nmod_mat_t Ainv; fmpz_t N, D; - mp_limb_t p; + ulong p; if (!fmpz_mat_is_square(A)) { diff --git a/src/fmpq_mat/solve_multi_mod.c b/src/fmpq_mat/solve_multi_mod.c index def7ec0886..cac1b0c53e 100644 --- a/src/fmpq_mat/solve_multi_mod.c +++ b/src/fmpq_mat/solve_multi_mod.c @@ -17,7 +17,7 @@ #include "fmpz_mat.h" #include "fmpq_mat.h" -mp_limb_t fmpz_mat_find_good_prime_and_solve(nmod_mat_t Xmod, +ulong fmpz_mat_find_good_prime_and_solve(nmod_mat_t Xmod, nmod_mat_t Amod, nmod_mat_t Bmod, const fmpz_mat_t A, const fmpz_mat_t B, const fmpz_t det_bound); @@ -64,7 +64,7 @@ void _fmpq_mat_solve_multi_mod(fmpq_mat_t X, const fmpz_mat_t A, const fmpz_mat_t B, nmod_mat_t Xmod, nmod_mat_t Amod, nmod_mat_t Bmod, - mp_limb_t p, const fmpz_t N, const fmpz_t D) + ulong p, const fmpz_t N, const fmpz_t D) { fmpz_t bound, pprod; fmpz_mat_t x; @@ -151,7 +151,7 @@ fmpq_mat_solve_fmpz_mat_multi_mod(fmpq_mat_t X, { nmod_mat_t Xmod, Amod, Bmod; fmpz_t N, D; - mp_limb_t p; + ulong p; if (!fmpz_mat_is_square(A)) { diff --git a/src/fmpq_mat/test/main.c b/src/fmpq_mat/test/main.c index 5c217b7d18..8c20b92e93 100644 --- a/src/fmpq_mat/test/main.c +++ b/src/fmpq_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpq_mpoly/test/main.c b/src/fmpq_mpoly/test/main.c index 9d249e0416..c1aa39275c 100644 --- a/src/fmpq_mpoly/test/main.c +++ b/src/fmpq_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fmpq_mpoly/test/t-div.c b/src/fmpq_mpoly/test/t-div.c index 34efa5d137..5d1b87e835 100644 --- a/src/fmpq_mpoly/test/t-div.c +++ b/src/fmpq_mpoly/test/t-div.c @@ -113,7 +113,7 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t f, g, h, k, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong nvars; @@ -134,9 +134,9 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) coeff_bits = n_randint(state, 70); max_bound = 1 + 400/FLINT_MAX(WORD(1), nvars)/FLINT_MAX(WORD(1), nvars); - exp_bound = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); for (j = 0; j < nvars; j++) @@ -205,7 +205,7 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong nvars; @@ -225,9 +225,9 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) coeff_bits = n_randint(state, 70); max_bound = 1 + 400/FLINT_MAX(WORD(1), nvars)/FLINT_MAX(WORD(1), nvars); - exp_bound = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); for (j = 0; j < nvars; j++) @@ -294,7 +294,7 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong nvars; @@ -314,9 +314,9 @@ TEST_FUNCTION_START(fmpq_mpoly_div, state) coeff_bits = n_randint(state, 70); max_bound = 1 + 400/FLINT_MAX(WORD(1), nvars)/FLINT_MAX(WORD(1), nvars); - exp_bound = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); for (j = 0; j < nvars; j++) diff --git a/src/fmpq_mpoly/test/t-gcd.c b/src/fmpq_mpoly/test/t-gcd.c index 858240157b..a48c5489ba 100644 --- a/src/fmpq_mpoly/test/t-gcd.c +++ b/src/fmpq_mpoly/test/t-gcd.c @@ -291,7 +291,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd, state) fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; flint_bitcnt_t coeff_bits; fmpq_mpoly_ctx_init_rand(ctx, state, 10); @@ -394,7 +394,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd, state) { fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t coeff_bits, newbits; slong len, len1, len2; slong degbound; @@ -606,7 +606,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd, state) { fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; flint_bitcnt_t coeff_bits1, coeff_bits2, coeff_bits3, coeff_bits4; slong len1, len2, len3, len4; diff --git a/src/fmpq_mpoly/test/t-gcd_cofactors.c b/src/fmpq_mpoly/test/t-gcd_cofactors.c index d7829db479..767deda39b 100644 --- a/src/fmpq_mpoly/test/t-gcd_cofactors.c +++ b/src/fmpq_mpoly/test/t-gcd_cofactors.c @@ -455,7 +455,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd_cofactors, state) fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; flint_bitcnt_t coeff_bits; fmpq_mpoly_ctx_init_rand(ctx, state, 10); @@ -566,7 +566,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd_cofactors, state) { fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t coeff_bits, newbits; slong len, len1, len2; slong degbound; @@ -790,7 +790,7 @@ TEST_FUNCTION_START(fmpq_mpoly_gcd_cofactors, state) { fmpq_mpoly_ctx_t ctx; fmpq_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; flint_bitcnt_t coeff_bits1, coeff_bits2, coeff_bits3, coeff_bits4; slong len1, len2, len3, len4; diff --git a/src/fmpq_mpoly_factor/test/main.c b/src/fmpq_mpoly_factor/test/main.c index 1d911c51b7..9081fc8311 100644 --- a/src/fmpq_mpoly_factor/test/main.c +++ b/src/fmpq_mpoly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/fmpq_poly/exp_series.c b/src/fmpq_poly/exp_series.c index 31c99ce38c..7618040f20 100644 --- a/src/fmpq_poly/exp_series.c +++ b/src/fmpq_poly/exp_series.c @@ -103,7 +103,7 @@ void _fmpq_poly_integral_offset(fmpz * rpoly, fmpz_t rden, { slong k; ulong v, c, d; - mp_ptr divisors; + nn_ptr divisors; fmpz_t t, u; TMP_INIT; diff --git a/src/fmpq_poly/integral.c b/src/fmpq_poly/integral.c index c01a4b1e1e..1c12bf8257 100644 --- a/src/fmpq_poly/integral.c +++ b/src/fmpq_poly/integral.c @@ -33,7 +33,7 @@ void _fmpq_poly_integral(fmpz * rpoly, fmpz_t rden, { slong k; ulong v, c, d; - mp_ptr divisors; + nn_ptr divisors; fmpz_t t, u; TMP_INIT; diff --git a/src/fmpq_poly/pow_trunc.c b/src/fmpq_poly/pow_trunc.c index 7b69e6315a..74db2e9666 100644 --- a/src/fmpq_poly/pow_trunc.c +++ b/src/fmpq_poly/pow_trunc.c @@ -16,7 +16,7 @@ /* counts zero bits in the binary representation of e */ static int -n_zerobits(mp_limb_t e) +n_zerobits(ulong e) { int zeros = 0; @@ -32,10 +32,10 @@ n_zerobits(mp_limb_t e) static slong poly_pow_length(slong poly_len, ulong exp, slong trunc) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, poly_len - 1, exp); add_ssaaaa(hi, lo, hi, lo, 0, 1); - if (hi != 0 || lo > (mp_limb_t) WORD_MAX) + if (hi != 0 || lo > (ulong) WORD_MAX) return trunc; return FLINT_MIN((slong) lo, trunc); } diff --git a/src/fmpq_poly/realloc.c b/src/fmpq_poly/realloc.c index 153fa6e97c..602bd4663a 100644 --- a/src/fmpq_poly/realloc.c +++ b/src/fmpq_poly/realloc.c @@ -36,7 +36,7 @@ void fmpq_poly_realloc(fmpq_poly_t poly, slong alloc) poly->coeffs = (fmpz *) flint_realloc(poly->coeffs, alloc * sizeof(fmpz)); if (poly->alloc < alloc) { - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->alloc), alloc - poly->alloc); + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->alloc), alloc - poly->alloc); } } else /* Nothing allocated, do it now */ diff --git a/src/fmpq_poly/resultant.c b/src/fmpq_poly/resultant.c index 52c32ae6bf..f0d3288e29 100644 --- a/src/fmpq_poly/resultant.c +++ b/src/fmpq_poly/resultant.c @@ -53,7 +53,7 @@ void _fmpq_poly_resultant(fmpz_t rnum, fmpz_t rden, slong lenG = len2; ulong p; nmod_t mod; - mp_ptr pp1, pp2, gp; + nn_ptr pp1, pp2, gp; fmpz_init(c1); fmpz_init(c2); diff --git a/src/fmpq_poly/set_coeff.c b/src/fmpq_poly/set_coeff.c index 08021043b2..02567be6b7 100644 --- a/src/fmpq_poly/set_coeff.c +++ b/src/fmpq_poly/set_coeff.c @@ -28,7 +28,7 @@ void fmpq_poly_set_coeff_fmpq(fmpq_poly_t poly, slong n, const fmpq_t x) { fmpq_poly_fit_length(poly, n + 1); _fmpq_poly_set_length(poly, n + 1); - flint_mpn_zero((mp_ptr) poly->coeffs + len, (n + 1) - len); + flint_mpn_zero((nn_ptr) poly->coeffs + len, (n + 1) - len); len = n + 1; } @@ -92,7 +92,7 @@ void fmpq_poly_set_coeff_fmpz(fmpq_poly_t poly, slong n, const fmpz_t x) { fmpq_poly_fit_length(poly, n + 1); _fmpq_poly_set_length(poly, n + 1); - flint_mpn_zero((mp_ptr) poly->coeffs + len, (n + 1) - len); + flint_mpn_zero((nn_ptr) poly->coeffs + len, (n + 1) - len); } if (*poly->den == WORD(1)) @@ -121,7 +121,7 @@ void fmpq_poly_set_coeff_si(fmpq_poly_t poly, slong n, slong x) { fmpq_poly_fit_length(poly, n + 1); _fmpq_poly_set_length(poly, n + 1); - flint_mpn_zero((mp_ptr) poly->coeffs + len, (n + 1) - len); + flint_mpn_zero((nn_ptr) poly->coeffs + len, (n + 1) - len); } if (*poly->den == WORD(1)) @@ -150,7 +150,7 @@ void fmpq_poly_set_coeff_ui(fmpq_poly_t poly, slong n, ulong x) { fmpq_poly_fit_length(poly, n + 1); _fmpq_poly_set_length(poly, n + 1); - flint_mpn_zero((mp_ptr) poly->coeffs + len, (n + 1) - len); + flint_mpn_zero((nn_ptr) poly->coeffs + len, (n + 1) - len); } if (*poly->den == WORD(1)) diff --git a/src/fmpq_poly/test/main.c b/src/fmpq_poly/test/main.c index 36b106cd3b..35e49dd938 100644 --- a/src/fmpq_poly/test/main.c +++ b/src/fmpq_poly/test/main.c @@ -20,9 +20,6 @@ # undef ulong #endif -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpq_poly/test/t-get_nmod_poly.c b/src/fmpq_poly/test/t-get_nmod_poly.c index bacc6b0877..1d7028bc66 100644 --- a/src/fmpq_poly/test/t-get_nmod_poly.c +++ b/src/fmpq_poly/test/t-get_nmod_poly.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(fmpq_poly_get_nmod_poly, state) fmpq_poly_t A; nmod_poly_t M, M2; slong length; - mp_limb_t mod; + ulong mod; length = n_randint(state, 50); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(fmpq_poly_get_nmod_poly, state) fmpq_poly_t A; nmod_poly_t M, M2; slong length; - mp_limb_t mod; + ulong mod; ulong lead; length = n_randint(state, 50); diff --git a/src/fmpq_vec/randtest_uniq_sorted.c b/src/fmpq_vec/randtest_uniq_sorted.c index 1c681f2595..0978947c4e 100644 --- a/src/fmpq_vec/randtest_uniq_sorted.c +++ b/src/fmpq_vec/randtest_uniq_sorted.c @@ -19,7 +19,7 @@ void _fmpq_vec_randtest_uniq_sorted(fmpq * vec, flint_rand_t state, slong len, f int do_again; /* if 2^bits < len we are too likely to have collision */ - if ((mp_limb_t) (4 * n_sizeinbase(len, 2)) > bits) + if ((ulong) (4 * n_sizeinbase(len, 2)) > bits) flint_throw(FLINT_ERROR, "bits too small in %s\n", __func__); _fmpq_vec_randtest(vec, state, len, bits); diff --git a/src/fmpq_vec/test/main.c b/src/fmpq_vec/test/main.c index a6e321f08c..3c8ccf5d1e 100644 --- a/src/fmpq_vec/test/main.c +++ b/src/fmpq_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-dot.c" diff --git a/src/fmpz.h b/src/fmpz.h index c53ece6738..a79a6f1c9e 100644 --- a/src/fmpz.h +++ b/src/fmpz.h @@ -18,6 +18,7 @@ #define FMPZ_INLINE static inline #endif +#include #include "fmpz_types.h" #ifdef __cplusplus @@ -178,7 +179,7 @@ fmpz_neg_ui(fmpz_t f, ulong val) } FMPZ_INLINE void -fmpz_get_uiui(mp_limb_t * hi, mp_limb_t * low, const fmpz_t f) +fmpz_get_uiui(ulong * hi, ulong * low, const fmpz_t f) { if (!COEFF_IS_MPZ(*f)) { @@ -194,7 +195,7 @@ fmpz_get_uiui(mp_limb_t * hi, mp_limb_t * low, const fmpz_t f) } FMPZ_INLINE void -fmpz_set_uiui(fmpz_t f, mp_limb_t hi, mp_limb_t lo) +fmpz_set_uiui(fmpz_t f, ulong hi, ulong lo) { if (hi == 0) { @@ -212,7 +213,7 @@ fmpz_set_uiui(fmpz_t f, mp_limb_t hi, mp_limb_t lo) } FMPZ_INLINE void -fmpz_neg_uiui(fmpz_t f, mp_limb_t hi, mp_limb_t lo) +fmpz_neg_uiui(fmpz_t f, ulong hi, ulong lo) { if (hi == 0) { @@ -257,7 +258,7 @@ void fmpz_set_signed_ui_array(fmpz_t out, const ulong * in, slong n); void fmpz_get_mpz(mpz_t x, const fmpz_t f); void fmpz_set_mpz(fmpz_t f, const mpz_t x); -mp_limb_t fmpz_get_nmod(const fmpz_t f, nmod_t mod); +ulong fmpz_get_nmod(const fmpz_t f, nmod_t mod); double fmpz_get_d(const fmpz_t f); void fmpz_set_d(fmpz_t f, double c); @@ -269,7 +270,7 @@ void fmpz_set_mpf(fmpz_t f, const mpf_t x); void fmpz_get_mpfr(mpfr_t x, const fmpz_t f, mpfr_rnd_t rnd); #endif -int fmpz_get_mpn(mp_ptr * n, fmpz_t n_in); +int fmpz_get_mpn(nn_ptr * n, fmpz_t n_in); /* Comparisons ***************************************************************/ @@ -314,15 +315,15 @@ int fmpz_abs_fits_ui(const fmpz_t f); int fmpz_fits_si(const fmpz_t f); size_t fmpz_sizeinbase(const fmpz_t f, int b); -mp_size_t fmpz_size(const fmpz_t f); +slong fmpz_size(const fmpz_t f); flint_bitcnt_t fmpz_bits(const fmpz_t f); flint_bitcnt_t fmpz_val2(const fmpz_t x); int fmpz_is_square(const fmpz_t f); int fmpz_is_perfect_power(fmpz_t root, const fmpz_t f); -mp_limb_t fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits); -mp_limb_t fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits); +ulong fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits); +ulong fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits); /* I/O ***********************************************************************/ @@ -374,7 +375,7 @@ void fmpz_mul(fmpz_t f, const fmpz_t g, const fmpz_t h); FMPZ_INLINE void fmpz_mul2_uiui(fmpz_t f, const fmpz_t g, ulong h1, ulong h2) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, h1, h2); if (!hi) @@ -423,7 +424,7 @@ void fmpz_divexact_si(fmpz_t f, const fmpz_t g, slong h); FMPZ_INLINE void fmpz_divexact2_uiui(fmpz_t f, const fmpz_t g, ulong h1, ulong h2) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, h1, h2); if (!hi) @@ -519,9 +520,9 @@ flint_bitcnt_t fmpz_popcnt(const fmpz_t c); /* Bit packing ***************************************************************/ -int fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, const fmpz_t coeff, int negate, int borrow); -int fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, int negate, int borrow); -void fmpz_bit_unpack_unsigned(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits); +int fmpz_bit_pack(nn_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, const fmpz_t coeff, int negate, int borrow); +int fmpz_bit_unpack(fmpz_t coeff, nn_srcptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, int negate, int borrow); +void fmpz_bit_unpack_unsigned(fmpz_t coeff, nn_srcptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits); /* Modular arithmetic ********************************************************/ @@ -533,7 +534,7 @@ fmpz_mod_ui(fmpz_t f, const fmpz_t g, ulong h) return h; } -FMPZ_INLINE void fmpz_set_ui_smod(fmpz_t f, mp_limb_t x, mp_limb_t m) +FMPZ_INLINE void fmpz_set_ui_smod(fmpz_t f, ulong x, ulong m) { if (x <= m / 2) fmpz_set_ui(f, x); @@ -604,7 +605,7 @@ void fmpz_fib_ui(fmpz_t f, ulong n); /* crt ***********************************************************************/ -void _fmpz_CRT_ui_precomp(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, ulong m2, mp_limb_t m2inv, const fmpz_t m1m2, mp_limb_t c, int sign); +void _fmpz_CRT_ui_precomp(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, ulong m2, ulong m2inv, const fmpz_t m1m2, ulong c, int sign); void fmpz_CRT_ui(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, ulong m2, int sign); void fmpz_CRT(fmpz_t out, const fmpz_t r1, const fmpz_t m1, const fmpz_t r2, const fmpz_t m2, int sign); @@ -680,7 +681,7 @@ void fmpz_multi_mod_precomp(fmpz * outputs, const fmpz_multi_mod_t P, const fmpz typedef struct { nmod_t mod; - mp_limb_t i0, i1, i2; + ulong i0, i1, i2; } crt_lut_entry; typedef struct { @@ -691,7 +692,7 @@ typedef struct { typedef struct { fmpz_multi_CRT_t crt_P; fmpz_multi_mod_t mod_P; - mp_limb_t * packed_multipliers; + ulong * packed_multipliers; slong * step; slong * crt_offsets; slong crt_offsets_alloc; @@ -718,11 +719,11 @@ typedef fmpz_comb_temp_struct fmpz_comb_temp_t[1]; void fmpz_comb_temp_init(fmpz_comb_temp_t CT, const fmpz_comb_t C); void fmpz_comb_temp_clear(fmpz_comb_temp_t CT); -void fmpz_comb_init(fmpz_comb_t C, mp_srcptr primes, slong num_primes); +void fmpz_comb_init(fmpz_comb_t C, nn_srcptr primes, slong num_primes); void fmpz_comb_clear(fmpz_comb_t C); -void fmpz_multi_mod_ui(mp_limb_t * out, const fmpz_t in, const fmpz_comb_t C, fmpz_comb_temp_t CT); -void fmpz_multi_CRT_ui(fmpz_t output, mp_srcptr residues, const fmpz_comb_t comb, fmpz_comb_temp_t temp, int sign); +void fmpz_multi_mod_ui(ulong * out, const fmpz_t in, const fmpz_comb_t C, fmpz_comb_temp_t CT); +void fmpz_multi_CRT_ui(fmpz_t output, nn_srcptr residues, const fmpz_comb_t comb, fmpz_comb_temp_t temp, int sign); /*****************************************************************************/ @@ -740,14 +741,14 @@ int fmpz_is_probabprime(const fmpz_t p); int fmpz_is_strong_probabprime(const fmpz_t n, const fmpz_t a); int fmpz_is_prime_pseudosquare(const fmpz_t n); -int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pm1, slong num_pm1); -int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pm1, slong num_pm1); +int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pm1, slong num_pm1); +int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pm1, slong num_pm1); int fmpz_is_prime(const fmpz_t p); void fmpz_nextprime(fmpz_t res, const fmpz_t n, int proved); -void _fmpz_nm1_trial_factors(const fmpz_t n, mp_ptr pm1, slong * num_pm1, ulong limit); -void _fmpz_np1_trial_factors(const fmpz_t n, mp_ptr pp1, slong * num_pp1, ulong limit); +void _fmpz_nm1_trial_factors(const fmpz_t n, nn_ptr pm1, slong * num_pm1, ulong limit); +void _fmpz_np1_trial_factors(const fmpz_t n, nn_ptr pp1, slong * num_pp1, ulong limit); int fmpz_divisor_in_residue_class_lenstra(fmpz_t fac, const fmpz_t n, const fmpz_t r, const fmpz_t s); diff --git a/src/fmpz/CRT.c b/src/fmpz/CRT.c index 994e53be55..2aa972bb98 100644 --- a/src/fmpz/CRT.c +++ b/src/fmpz/CRT.c @@ -87,9 +87,9 @@ void fmpz_CRT(fmpz_t out, const fmpz_t r1, const fmpz_t m1, void _fmpz_CRT_ui_precomp(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, - ulong m2, mp_limb_t m2inv, const fmpz_t m1m2, mp_limb_t c, int sign) + ulong m2, ulong m2inv, const fmpz_t m1m2, ulong c, int sign) { - mp_limb_t r1mod, s; + ulong r1mod, s; fmpz_t tmp; nmod_t mod; @@ -126,7 +126,7 @@ _fmpz_CRT_ui_precomp(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, void fmpz_CRT_ui(fmpz_t out, const fmpz_t r1, const fmpz_t m1, ulong r2, ulong m2, int sign) { - mp_limb_t c; + ulong c; fmpz_t m1m2; c = fmpz_fdiv_ui(m1, m2); diff --git a/src/fmpz/abs_lbound_ui_2exp.c b/src/fmpz/abs_lbound_ui_2exp.c index 8201d1ee5b..3a7cd10913 100644 --- a/src/fmpz/abs_lbound_ui_2exp.c +++ b/src/fmpz/abs_lbound_ui_2exp.c @@ -13,10 +13,10 @@ #include "ulong_extras.h" #include "fmpz.h" -mp_limb_t +ulong fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits) { - mp_limb_t m; + ulong m; slong shift, e, size; fmpz c = *x; @@ -52,7 +52,7 @@ fmpz_abs_lbound_ui_2exp(slong * exp, const fmpz_t x, int bits) else { /* read a second limb to get an accurate value */ - mp_limb_t m2 = z->_mp_d[size - 2]; + ulong m2 = z->_mp_d[size - 2]; m = (m << (-shift)) | (m2 >> (FLINT_BITS + shift)); } diff --git a/src/fmpz/abs_ubound_ui_2exp.c b/src/fmpz/abs_ubound_ui_2exp.c index 810006cf7e..174711f56c 100644 --- a/src/fmpz/abs_ubound_ui_2exp.c +++ b/src/fmpz/abs_ubound_ui_2exp.c @@ -13,10 +13,10 @@ #include "ulong_extras.h" #include "fmpz.h" -mp_limb_t +ulong fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits) { - mp_limb_t m; + ulong m; slong shift, e, size; fmpz c = *x; @@ -54,7 +54,7 @@ fmpz_abs_ubound_ui_2exp(slong * exp, const fmpz_t x, int bits) else { /* read a second limb to get an accurate value */ - mp_limb_t m2 = z->_mp_d[size - 2]; + ulong m2 = z->_mp_d[size - 2]; m = (m << (-shift)) | (m2 >> (FLINT_BITS + shift)); /* round up */ m++; diff --git a/src/fmpz/addmul.c b/src/fmpz/addmul.c index 0747778e1e..3cea76e55f 100644 --- a/src/fmpz/addmul.c +++ b/src/fmpz/addmul.c @@ -26,11 +26,11 @@ void _flint_mpz_addmul_large(mpz_ptr z, mpz_srcptr x, mpz_srcptr y, int negate) { - mp_size_t xn, yn, tn, zn, zn_signed, zn_new, x_sgn, y_sgn, sgn, alloc; - mp_srcptr xd, yd; - mp_ptr zd; - mp_ptr td; - mp_limb_t top; + slong xn, yn, tn, zn, zn_signed, zn_new, x_sgn, y_sgn, sgn, alloc; + nn_srcptr xd, yd; + nn_ptr zd; + nn_ptr td; + ulong top; TMP_INIT; xn = x->_mp_size; @@ -43,7 +43,7 @@ _flint_mpz_addmul_large(mpz_ptr z, mpz_srcptr x, mpz_srcptr y, int negate) if (xn < yn) { mpz_srcptr t; - mp_size_t tn; + slong tn; t = x; x = y; y = t; tn = xn; xn = yn; yn = tn; @@ -108,7 +108,7 @@ _flint_mpz_addmul_large(mpz_ptr z, mpz_srcptr x, mpz_srcptr y, int negate) #endif TMP_START; - td = TMP_ALLOC(tn * sizeof(mp_limb_t)); + td = TMP_ALLOC(tn * sizeof(ulong)); if (x == y) { diff --git a/src/fmpz/aors_ui.c b/src/fmpz/aors_ui.c index 18f412e1f4..8feee147f4 100644 --- a/src/fmpz/aors_ui.c +++ b/src/fmpz/aors_ui.c @@ -56,10 +56,10 @@ fmpz_add_ui(fmpz_t res, const fmpz_t x, ulong y) { mpz_ptr rp; mpz_srcptr xp; - mp_ptr rd; - mp_srcptr xd; - mp_size_t xn_signed, xn; - mp_limb_t cy; + nn_ptr rd; + nn_srcptr xd; + slong xn_signed, xn; + ulong cy; xp = COEFF_TO_PTR(*x); xn_signed = xp->_mp_size; @@ -169,10 +169,10 @@ fmpz_sub_ui(fmpz_t res, const fmpz_t x, ulong y) { mpz_ptr rp; mpz_srcptr xp; - mp_ptr rd; - mp_srcptr xd; - mp_size_t xn_signed, xn; - mp_limb_t cy; + nn_ptr rd; + nn_srcptr xd; + slong xn_signed, xn; + ulong cy; xp = COEFF_TO_PTR(*x); xn_signed = xp->_mp_size; diff --git a/src/fmpz/bit_pack.c b/src/fmpz/bit_pack.c index f94202fa03..ce3965d5bb 100644 --- a/src/fmpz/bit_pack.c +++ b/src/fmpz/bit_pack.c @@ -13,16 +13,16 @@ #include "fmpz.h" int -fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, +fmpz_bit_pack(nn_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, const fmpz_t coeff, int negate, int borrow) { - mp_limb_t save = arr[0]; + ulong save = arr[0]; fmpz c = *coeff; int sign = fmpz_sgn(coeff); - mp_limb_t cy; + ulong cy; ulong limbs = (shift + bits) / FLINT_BITS; ulong rem_bits = (shift + bits) % FLINT_BITS; - mp_limb_t mask; + ulong mask; ulong size; if (sign == 0) /* special case, deal with zero (store -borrow) */ @@ -30,22 +30,22 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, if (borrow) { /* store -1 shifted and add save back in */ - arr[0] = ((~(mp_limb_t) 0) << shift) + save; + arr[0] = ((~(ulong) 0) << shift) + save; /* com remaining limbs */ if (limbs > 1) - flint_mpn_store(arr + 1, limbs - 1, ~(mp_limb_t) 0); + flint_mpn_store(arr + 1, limbs - 1, ~(ulong) 0); /* com remaining bits */ if (limbs) { if (rem_bits) - arr[limbs] = (((mp_limb_t) 1) << rem_bits) - (mp_limb_t) 1; + arr[limbs] = (((ulong) 1) << rem_bits) - (ulong) 1; } else { /* mask off final limb */ - mask = (((mp_limb_t) 1) << rem_bits) - (mp_limb_t) 1; + mask = (((ulong) 1) << rem_bits) - (ulong) 1; arr[limbs] &= mask; } @@ -65,7 +65,7 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, if (!COEFF_IS_MPZ(c)) { /* compute d = -b - borrow */ - mp_limb_t d = (c < WORD(0) ? c - borrow : -c - borrow); + ulong d = (c < WORD(0) ? c - borrow : -c - borrow); /* store d << shift and add save back into place */ arr[0] = (d << shift) + save; @@ -76,9 +76,9 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, if (shift) arr[1] = (d >> (FLINT_BITS - shift)) + - ((~(mp_limb_t) 0) << shift); + ((~(ulong) 0) << shift); else - arr[1] = ~(mp_limb_t) 0; + arr[1] = ~(ulong) 0; } size = 2; @@ -100,7 +100,7 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, { cy = mpn_lshift(arr, arr, size, shift); if (limbs + (rem_bits != 0) > size) - arr[size++] = ((~(mp_limb_t) 0) << shift) + cy; + arr[size++] = ((~(ulong) 0) << shift) + cy; } /* add back in saved bits from start of field */ @@ -111,16 +111,16 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, { /* com any additional limbs */ if (limbs > size) - flint_mpn_store(arr + size, limbs - size, ~(mp_limb_t) 0); + flint_mpn_store(arr + size, limbs - size, ~(ulong) 0); /* com remaining bits */ if (rem_bits) - arr[limbs] = (((mp_limb_t) 1) << rem_bits) - (mp_limb_t) 1; + arr[limbs] = (((ulong) 1) << rem_bits) - (ulong) 1; } else { /* mask off final limb */ - mask = (((mp_limb_t) 1) << rem_bits) - (mp_limb_t) 1; + mask = (((ulong) 1) << rem_bits) - (ulong) 1; arr[limbs] &= mask; } return 1; @@ -130,7 +130,7 @@ fmpz_bit_pack(mp_ptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits, if (!COEFF_IS_MPZ(c)) { /* compute d = b - borrow */ - mp_limb_t d = (c < WORD(0) ? -c - borrow : c - borrow); + ulong d = (c < WORD(0) ? -c - borrow : c - borrow); /* store d< 1) /* field crosses a limb boundary */ (*coeff) = @@ -41,10 +41,10 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, /* sign extend */ if (sign) - (*coeff) += ((~(mp_limb_t) 0) << bits); + (*coeff) += ((~(ulong) 0) << bits); /* determine whether we need to return a borrow */ - sign = (*coeff < (mp_limb_signed_t) 0 ? (mp_limb_t) 1 : (mp_limb_t) 0); + sign = (*coeff < (slong) 0 ? (ulong) 1 : (ulong) 0); /* deal with borrow */ if (borrow) @@ -62,12 +62,12 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, if (negate) fmpz_neg(coeff, coeff); - return (sign != (mp_limb_t) 0); + return (sign != (ulong) 0); } else /* large coefficient */ { mpz_ptr mcoeff; - mp_limb_t * p; + ulong * p; ulong l, b; mcoeff = _fmpz_promote(coeff); @@ -93,15 +93,15 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, /* mask off the last limb, if not full */ if (b) { - mask = (((mp_limb_t) 1) << b) - (mp_limb_t) 1; + mask = (((ulong) 1) << b) - (ulong) 1; p[l - 1] &= mask; } - if (sign != (mp_limb_t) 0) + if (sign != (ulong) 0) { /* sign extend */ if (b) - p[l - 1] += ((~(mp_limb_t) 0) << b); + p[l - 1] += ((~(ulong) 0) << b); /* negate */ mpn_com(p, p, l); @@ -109,7 +109,7 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, mpn_add_1(p, p, l, 1); /* normalise */ - while (l && (p[l - 1] == (mp_limb_t) 0)) + while (l && (p[l - 1] == (ulong) 0)) l--; mcoeff->_mp_size = -l; @@ -123,7 +123,7 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, mpn_add_1(p, p, l, 1); /* normalise */ - while (l && (p[l - 1] == (mp_limb_t) 0)) + while (l && (p[l - 1] == (ulong) 0)) l--; mcoeff->_mp_size = l; @@ -142,19 +142,19 @@ fmpz_bit_unpack(fmpz_t coeff, mp_srcptr arr, flint_bitcnt_t shift, } void -fmpz_bit_unpack_unsigned(fmpz_t coeff, mp_srcptr arr, +fmpz_bit_unpack_unsigned(fmpz_t coeff, nn_srcptr arr, flint_bitcnt_t shift, flint_bitcnt_t bits) { ulong limbs = (shift + bits) / FLINT_BITS; ulong rem_bits = (shift + bits) % FLINT_BITS; - mp_limb_t mask; + ulong mask; if (bits <= SMALL_FMPZ_BITCOUNT_MAX) /* fits into a small coeff */ { _fmpz_demote(coeff); /* mask for the given number of bits */ - mask = (((mp_limb_t) 1) << bits) - (mp_limb_t) 1; + mask = (((ulong) 1) << bits) - (ulong) 1; if (limbs + (rem_bits != 0) > 1) /* field crosses a limb boundary */ (*coeff) = @@ -165,7 +165,7 @@ fmpz_bit_unpack_unsigned(fmpz_t coeff, mp_srcptr arr, else /* large coefficient */ { mpz_ptr mcoeff; - mp_limb_t * p; + ulong * p; ulong l, b; mcoeff = _fmpz_promote(coeff); @@ -191,12 +191,12 @@ fmpz_bit_unpack_unsigned(fmpz_t coeff, mp_srcptr arr, /* mask off the last limb, if not full */ if (b) { - mask = (((mp_limb_t) 1) << b) - (mp_limb_t) 1; + mask = (((ulong) 1) << b) - (ulong) 1; p[l - 1] &= mask; } /* normalise */ - while (l && (p[l - 1] == (mp_limb_t) 0)) + while (l && (p[l - 1] == (ulong) 0)) l--; mcoeff->_mp_size = l; diff --git a/src/fmpz/cmp.c b/src/fmpz/cmp.c index 5278ca8536..18ad303f14 100644 --- a/src/fmpz/cmp.c +++ b/src/fmpz/cmp.c @@ -16,9 +16,9 @@ #include "mpn_extras.h" #include "fmpz.h" -static int flint_mpn_cmp2abs(mp_srcptr x, slong xn, mp_srcptr a, slong an) +static int flint_mpn_cmp2abs(nn_srcptr x, slong xn, nn_srcptr a, slong an) { - mp_limb_t xhi, ahi; + ulong xhi, ahi; FLINT_ASSERT(an >= 0); FLINT_ASSERT(xn >= 0); @@ -56,11 +56,11 @@ int fmpz_cmp2abs(const fmpz_t a, const fmpz_t b) { if (!COEFF_IS_MPZ(*b)) { - mp_limb_t ub = FLINT_ABS(*b); + ulong ub = FLINT_ABS(*b); if (!COEFF_IS_MPZ(*a)) { - mp_limb_t ua = FLINT_ABS(*a); + ulong ua = FLINT_ABS(*a); return ua < 2*ub ? -1 : ua > 2*ub ? 1 : 0; } else @@ -94,8 +94,8 @@ int fmpz_cmpabs(const fmpz_t f, const fmpz_t g) { if (!COEFF_IS_MPZ(*g)) { - mp_limb_t uf = FLINT_ABS(*f); - mp_limb_t ug = FLINT_ABS(*g); + ulong uf = FLINT_ABS(*f); + ulong ug = FLINT_ABS(*g); return (uf < ug ? -1 : (uf > ug)); } diff --git a/src/fmpz/comb_init.c b/src/fmpz/comb_init.c index 85deab58f4..dc5b536304 100644 --- a/src/fmpz/comb_init.c +++ b/src/fmpz/comb_init.c @@ -42,7 +42,7 @@ void fmpz_comb_temp_init(fmpz_comb_temp_t CT, const fmpz_comb_t C) } -void fmpz_comb_init(fmpz_comb_t C, mp_srcptr m, slong len) +void fmpz_comb_init(fmpz_comb_t C, nn_srcptr m, slong len) { int success; slong l, i, j, k, s; @@ -240,7 +240,7 @@ void fmpz_comb_init(fmpz_comb_t C, mp_srcptr m, slong len) } } - C->packed_multipliers = FLINT_ARRAY_ALLOC(l, mp_limb_t); + C->packed_multipliers = FLINT_ARRAY_ALLOC(l, ulong); l = 0; for (k = 0, i = 0; k < C->crt_klen; k++) diff --git a/src/fmpz/divides.c b/src/fmpz/divides.c index 77a2f55a77..b8ca0b09ff 100644 --- a/src/fmpz/divides.c +++ b/src/fmpz/divides.c @@ -32,7 +32,7 @@ fmpz_divides(fmpz_t q, const fmpz_t g, const fmpz_t h) { if (!COEFF_IS_MPZ(c2)) /* h is also small */ { - mp_limb_t qz; + ulong qz; if (c1 < 0) { @@ -67,7 +67,7 @@ fmpz_divides(fmpz_t q, const fmpz_t g, const fmpz_t h) if (!COEFF_IS_MPZ(c2)) /* h is small */ { - mp_limb_t r; + ulong r; mq = _fmpz_promote(q); diff --git a/src/fmpz/fac_ui.c b/src/fmpz/fac_ui.c index a27ab0d976..f39d1336a9 100644 --- a/src/fmpz/fac_ui.c +++ b/src/fmpz/fac_ui.c @@ -20,7 +20,7 @@ #define FLINT_NUM_TINY_FACTORIALS 13 #endif -const mp_limb_t flint_tiny_factorials[] = +const ulong flint_tiny_factorials[] = { UWORD(1), UWORD(1), UWORD(2), UWORD(6), UWORD(24), UWORD(120), UWORD(720), UWORD(5040), UWORD(40320), UWORD(362880), UWORD(3628800), UWORD(39916800), UWORD(479001600), diff --git a/src/fmpz/fdiv.c b/src/fmpz/fdiv.c index 51595b7e4d..a9e8324bf5 100644 --- a/src/fmpz/fdiv.c +++ b/src/fmpz/fdiv.c @@ -200,7 +200,7 @@ void _mpz_tdiv_qr_preinvn(mpz_ptr q, mpz_ptr r, int nm = (inv->norm != 0); TMP_INIT; - mp_ptr qp, rp, ap, dp, tp, sp; + nn_ptr qp, rp, ap, dp, tp, sp; if ((ulong) r->_mp_alloc < usize1 + nm) mpz_realloc2(r, (usize1 + nm)*FLINT_BITS); @@ -224,14 +224,14 @@ void _mpz_tdiv_qr_preinvn(mpz_ptr q, mpz_ptr r, TMP_START; if ((r == d || q == d) && !nm) /* we have alias with d */ { - tp = TMP_ALLOC(usize2*sizeof(mp_limb_t)); + tp = TMP_ALLOC(usize2*sizeof(ulong)); mpn_copyi(tp, dp, usize2); dp = tp; } if (r == a || q == a) /* we have alias with a */ { - tp = TMP_ALLOC(usize1*sizeof(mp_limb_t)); + tp = TMP_ALLOC(usize1*sizeof(ulong)); mpn_copyi(tp, ap, usize1); ap = tp; } @@ -245,7 +245,7 @@ void _mpz_tdiv_qr_preinvn(mpz_ptr q, mpz_ptr r, mpn_tdiv_qr(qp, rp, 0, ap, usize1, dp, usize2); else { if (nm) { - tp = TMP_ALLOC(usize2*sizeof(mp_limb_t)); + tp = TMP_ALLOC(usize2*sizeof(ulong)); mpn_lshift(tp, dp, usize2, inv->norm); dp = tp; @@ -282,7 +282,7 @@ void _mpz_fdiv_qr_preinvn(mpz_ptr q, mpz_ptr r, TMP_START; if (q == d || r == d) /* we need d later, so make sure it doesn't alias */ { - t->_mp_d = TMP_ALLOC(usize2*sizeof(mp_limb_t)); + t->_mp_d = TMP_ALLOC(usize2*sizeof(ulong)); t->_mp_size = d->_mp_size; t->_mp_alloc = d->_mp_alloc; mpn_copyi(t->_mp_d, d->_mp_d, usize2); diff --git a/src/fmpz/fib_ui.c b/src/fmpz/fib_ui.c index 01d6a51a82..c318628e6a 100644 --- a/src/fmpz/fib_ui.c +++ b/src/fmpz/fib_ui.c @@ -22,7 +22,7 @@ #define NUM_SMALL_FIB2 92 #endif -static const mp_limb_t small_fib[NUM_SMALL_FIB] = +static const ulong small_fib[NUM_SMALL_FIB] = { 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, @@ -55,7 +55,7 @@ void fmpz_fib_ui(fmpz_t f, ulong n) } else if (n < NUM_SMALL_FIB2) { - mp_limb_t hi, lo, a, b; + ulong hi, lo, a, b; a = small_fib[n / 2]; b = small_fib[n / 2 - 1]; if (n & 1) diff --git a/src/fmpz/fits_si.c b/src/fmpz/fits_si.c index 773175027f..0bdf96e397 100644 --- a/src/fmpz/fits_si.c +++ b/src/fmpz/fits_si.c @@ -25,24 +25,24 @@ MA 02110-1301, USA. */ #if defined(_WIN64) || defined(__mips64) -#define FLINT_UI_MAX ((mp_limb_t)(~(mp_limb_t)0)) +#define FLINT_UI_MAX ((ulong)(~(ulong)0)) #define FLINT_UI_HIBIT (FLINT_UI_MAX ^ (FLINT_UI_MAX >> 1)) -#define FLINT_SI_MAX ((mp_limb_signed_t)(FLINT_UI_MAX ^ FLINT_UI_HIBIT)) -#define FLINT_SI_MIN ((mp_limb_signed_t)FLINT_UI_HIBIT) +#define FLINT_SI_MAX ((slong)(FLINT_UI_MAX ^ FLINT_UI_HIBIT)) +#define FLINT_SI_MIN ((slong)FLINT_UI_HIBIT) int flint_mpz_fits_si_p(mpz_srcptr z) { - mp_size_t n = z->_mp_size; - mp_ptr p = z->_mp_d; - mp_limb_t limb = p[0]; + slong n = z->_mp_size; + nn_ptr p = z->_mp_d; + ulong limb = p[0]; if (n == 0) return 1; if (n == 1) return limb <= FLINT_SI_MAX; if (n == -1) - return limb <= (mp_limb_t) FLINT_SI_MIN; + return limb <= (ulong) FLINT_SI_MIN; return 0; } diff --git a/src/fmpz/fmma.c b/src/fmpz/fmma.c index 1b2f4ea8a1..7aec7f75e7 100644 --- a/src/fmpz/fmma.c +++ b/src/fmpz/fmma.c @@ -24,7 +24,7 @@ void fmpz_fmma(fmpz_t f, const fmpz_t a, const fmpz_t b, if (!COEFF_IS_MPZ(s) && !COEFF_IS_MPZ(t) && !COEFF_IS_MPZ(u) && !COEFF_IS_MPZ(v)) { - mp_limb_t sh, sl, th, tl; + ulong sh, sl, th, tl; smul_ppmm(sh, sl, s, t); smul_ppmm(th, tl, u, v); diff --git a/src/fmpz/fmms.c b/src/fmpz/fmms.c index 8fbd59c2cc..e013d0f91e 100644 --- a/src/fmpz/fmms.c +++ b/src/fmpz/fmms.c @@ -24,7 +24,7 @@ void fmpz_fmms(fmpz_t f, const fmpz_t a, const fmpz_t b, if (!COEFF_IS_MPZ(s) && !COEFF_IS_MPZ(t) && !COEFF_IS_MPZ(u) && !COEFF_IS_MPZ(v)) { - mp_limb_t sh, sl, th, tl; + ulong sh, sl, th, tl; smul_ppmm(sh, sl, s, t); smul_ppmm(th, tl, u, v); diff --git a/src/fmpz/gcd.c b/src/fmpz/gcd.c index e854aa2186..48de65d753 100644 --- a/src/fmpz/gcd.c +++ b/src/fmpz/gcd.c @@ -42,12 +42,12 @@ fmpz_gcd(fmpz_t f, const fmpz_t g, const fmpz_t h) } u2 = FLINT_ABS(c2); - fmpz_set_ui(f, mpn_gcd_1((mp_srcptr) &u2, (mp_size_t) 1, u1)); + fmpz_set_ui(f, mpn_gcd_1((nn_srcptr) &u2, (slong) 1, u1)); } else /* but h is large */ { mpz_ptr mpzc2 = COEFF_TO_PTR(c2); - mp_size_t size = mpzc2->_mp_size; + slong size = mpzc2->_mp_size; /* The sign is stored in the size of an mpz, and gcd_1 only takes * positive integers. */ fmpz_set_ui(f, mpn_gcd_1(mpzc2->_mp_d, FLINT_ABS(size), u1)); @@ -59,7 +59,7 @@ fmpz_gcd(fmpz_t f, const fmpz_t g, const fmpz_t h) { ulong u2; mpz_ptr mpzc1; - mp_size_t size; + slong size; if (c2 == 0) { @@ -186,7 +186,7 @@ fmpz_gcd3(fmpz_t res, const fmpz_t a, const fmpz_t b, const fmpz_t c) { /* Three-way mpz_gcd. */ mpz_ptr rp, ap, bp, cp, tp; - mp_size_t an, bn, cn, mn; + slong an, bn, cn, mn; /* If res is small, it cannot be aliased with a, b, c, so promoting is fine. */ rp = _fmpz_promote(res); @@ -228,7 +228,7 @@ fmpz_gcd3(fmpz_t res, const fmpz_t a, const fmpz_t b, const fmpz_t c) /* It would be more efficient to allocate temporary space for gcd(a, b), but we can't be sure that mpz_gcd never attempts to reallocate the output. */ - t->_mp_d = TMP_ALLOC(sizeof(mp_limb_t) * cn); + t->_mp_d = TMP_ALLOC(sizeof(ulong) * cn); t->_mp_size = t->_mp_alloc = cn; flint_mpn_copyi(t->_mp_d, cp->_mp_d, cn); diff --git a/src/fmpz/gcdinv.c b/src/fmpz/gcdinv.c index 9c8cec874a..83a983fc83 100644 --- a/src/fmpz/gcdinv.c +++ b/src/fmpz/gcdinv.c @@ -33,7 +33,7 @@ void fmpz_gcdinv(fmpz_t d, fmpz_t a, const fmpz_t f, const fmpz_t g) _fmpz_demote(d); _fmpz_demote(a); - *d = n_gcdinv((mp_limb_t *) a, ff, gg); + *d = n_gcdinv((ulong *) a, ff, gg); } else /* g is large */ { @@ -51,7 +51,7 @@ void fmpz_gcdinv(fmpz_t d, fmpz_t a, const fmpz_t f, const fmpz_t g) fptr->_mp_alloc = 1; fptr->_mp_size = 1; - fptr->_mp_d = (mp_limb_t *) f; + fptr->_mp_d = (ulong *) f; mpz_gcdext(dtemp, atemp, NULL, fptr, COEFF_TO_PTR(*g)); diff --git a/src/fmpz/get.c b/src/fmpz/get.c index 19f9f43f9e..55e3dcf0da 100644 --- a/src/fmpz/get.c +++ b/src/fmpz/get.c @@ -42,9 +42,9 @@ fmpz_get_d_2exp(slong * exp, const fmpz_t f) *exp = FLINT_BIT_COUNT(d_abs); if (d < WORD(0)) - return flint_mpn_get_d((mp_limb_t *) &d_abs, WORD(1), WORD(-1), -*exp); + return flint_mpn_get_d((ulong *) &d_abs, WORD(1), WORD(-1), -*exp); else - return flint_mpn_get_d((mp_limb_t *) &d, WORD(1), WORD(1), -*exp); + return flint_mpn_get_d((ulong *) &d, WORD(1), WORD(1), -*exp); } else { @@ -74,7 +74,7 @@ fmpz_get_d(const fmpz_t f) } else if (!COEFF_IS_MPZ(c)) { - mp_limb_t d; + ulong d; if (c > 0) { @@ -114,13 +114,13 @@ fmpz_get_mpfr(mpfr_t x, const fmpz_t f, mpfr_rnd_t rnd) } int -fmpz_get_mpn(mp_ptr *n, fmpz_t n_in) +fmpz_get_mpn(nn_ptr *n, fmpz_t n_in) { - mp_limb_t n_size; - mp_ptr temp; + ulong n_size; + nn_ptr temp; n_size = fmpz_size(n_in); - *n = flint_malloc(n_size * sizeof(mp_limb_t)); + *n = flint_malloc(n_size * sizeof(ulong)); if (n_size <= 1) { @@ -144,10 +144,10 @@ fmpz_get_mpz(mpz_t x, const fmpz_t f) mpz_set(x, COEFF_TO_PTR(*f)); /* set x to large value */ } -mp_limb_t fmpz_get_nmod(const fmpz_t aa, nmod_t mod) +ulong fmpz_get_nmod(const fmpz_t aa, nmod_t mod) { fmpz A = *aa; - mp_limb_t r, SA, UA; + ulong r, SA, UA; if (!COEFF_IS_MPZ(A)) { @@ -158,7 +158,7 @@ mp_limb_t fmpz_get_nmod(const fmpz_t aa, nmod_t mod) else { mpz_srcptr a = COEFF_TO_PTR(A); - mp_srcptr ad = a->_mp_d; + nn_srcptr ad = a->_mp_d; slong an = a->_mp_size; if (an < 0) @@ -195,7 +195,7 @@ fmpz_get_si(const fmpz_t f) return (!COEFF_IS_MPZ(*f) ? *f : flint_mpz_get_si(COEFF_TO_PTR(*f))); } -void fmpz_get_signed_ui_array(mp_limb_t * r, slong n, const fmpz_t x) +void fmpz_get_signed_ui_array(ulong * r, slong n, const fmpz_t x) { int neg; slong i, sz; @@ -225,7 +225,7 @@ void fmpz_get_signed_ui_array(mp_limb_t * r, slong n, const fmpz_t x) mpn_neg(r, r, n); } -void fmpz_get_signed_uiui(mp_limb_t * hi, mp_limb_t * lo, const fmpz_t x) +void fmpz_get_signed_uiui(ulong * hi, ulong * lo, const fmpz_t x) { ulong r0, r1, s; diff --git a/src/fmpz/get_str.c b/src/fmpz/get_str.c index 2bcef40653..ce6b7ba390 100644 --- a/src/fmpz/get_str.c +++ b/src/fmpz/get_str.c @@ -240,7 +240,7 @@ char * fmpz_get_str(char * str, int b, const fmpz_t f) if (!COEFF_IS_MPZ(*f)) { fmpz c; - mp_limb_t d; + ulong d; c = *f; d = FLINT_ABS(c); diff --git a/src/fmpz/invmod.c b/src/fmpz/invmod.c index 5e276b5e65..2ac1b9512a 100644 --- a/src/fmpz/invmod.c +++ b/src/fmpz/invmod.c @@ -67,14 +67,14 @@ fmpz_invmod(fmpz_t f, const fmpz_t g, const fmpz_t h) if (c1 < WORD(0)) { c1 = -c1; - temp._mp_d = (mp_limb_t *) & c1; + temp._mp_d = (ulong *) & c1; temp._mp_size = -1; } else if (c1 == WORD(0)) temp._mp_size = 0; else { - temp._mp_d = (mp_limb_t *) & c1; + temp._mp_d = (ulong *) & c1; temp._mp_size = 1; } diff --git a/src/fmpz/is_canonical.c b/src/fmpz/is_canonical.c index 77b817857f..f4036dc267 100644 --- a/src/fmpz/is_canonical.c +++ b/src/fmpz/is_canonical.c @@ -14,7 +14,7 @@ int _fmpz_is_canonical(const fmpz_t x) { mpz_ptr z; - mp_size_t n; + slong n; if (!COEFF_IS_MPZ(*x)) return 1; @@ -26,7 +26,7 @@ int _fmpz_is_canonical(const fmpz_t x) return 0; if (n == 1) - return z->_mp_d[0] > (mp_limb_t) COEFF_MAX; + return z->_mp_d[0] > (ulong) COEFF_MAX; return z->_mp_d[n - 1] != 0; } diff --git a/src/fmpz/is_prime.c b/src/fmpz/is_prime.c index 170002da66..fef98c1496 100644 --- a/src/fmpz/is_prime.c +++ b/src/fmpz/is_prime.c @@ -42,7 +42,7 @@ int _fmpz_is_prime(const fmpz_t n, int proved) else { mpz_ptr z; - mp_ptr d; + nn_ptr d; slong size, bits, trial_primes; z = COEFF_TO_PTR(*n); diff --git a/src/fmpz/is_prime_morrison.c b/src/fmpz/is_prime_morrison.c index a38dac3bad..48bd4b339a 100644 --- a/src/fmpz/is_prime_morrison.c +++ b/src/fmpz/is_prime_morrison.c @@ -15,7 +15,7 @@ #include "fmpz.h" #include "fmpz_factor.h" -void _fmpz_np1_trial_factors(const fmpz_t n, mp_ptr pp1, slong * num_pp1, ulong limit) +void _fmpz_np1_trial_factors(const fmpz_t n, nn_ptr pp1, slong * num_pp1, ulong limit) { slong i, num; ulong ppi, p; @@ -61,10 +61,10 @@ void _fmpz_np1_trial_factors(const fmpz_t n, mp_ptr pp1, slong * num_pp1, ulong } } -int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pp1, slong num_pp1) +int fmpz_is_prime_morrison(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pp1, slong num_pp1) { slong i, d, bits; - mp_limb_t a, b; + ulong a, b; fmpz_t g, q, r, ex, c, D, Dinv, A, B, Ukm, Ukm1, Um, Um1, Vm, Vm1, p; fmpz_factor_t fac; int res = 0, fac_found; diff --git a/src/fmpz/is_prime_pocklington.c b/src/fmpz/is_prime_pocklington.c index ff4366d097..26fd0f73a4 100644 --- a/src/fmpz/is_prime_pocklington.c +++ b/src/fmpz/is_prime_pocklington.c @@ -15,7 +15,7 @@ #include "fmpz.h" #include "fmpz_factor.h" -void _fmpz_nm1_trial_factors(const fmpz_t n, mp_ptr pm1, slong * num_pm1, ulong limit) +void _fmpz_nm1_trial_factors(const fmpz_t n, nn_ptr pm1, slong * num_pm1, ulong limit) { slong i, num; ulong ppi, p; @@ -61,7 +61,7 @@ void _fmpz_nm1_trial_factors(const fmpz_t n, mp_ptr pm1, slong * num_pm1, ulong } } -int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, mp_ptr pm1, slong num_pm1) +int fmpz_is_prime_pocklington(fmpz_t F, fmpz_t R, const fmpz_t n, nn_ptr pm1, slong num_pm1) { slong i, d, bits; ulong a; diff --git a/src/fmpz/is_prime_pseudosquare.c b/src/fmpz/is_prime_pseudosquare.c index 13f7a8db21..b46489fc90 100644 --- a/src/fmpz/is_prime_pseudosquare.c +++ b/src/fmpz/is_prime_pseudosquare.c @@ -14,7 +14,7 @@ #include "fmpz.h" #ifndef FLINT64 -mp_limb_t flint_fmpz_pseudosquares[][3] = +ulong flint_fmpz_pseudosquares[][3] = { { 17, 0, 0 }, { 73, 0, 0 }, @@ -92,7 +92,7 @@ mp_limb_t flint_fmpz_pseudosquares[][3] = { 1411295841u, 761797252u, 229581u } }; #else -mp_limb_t flint_fmpz_pseudosquares[][2] = +ulong flint_fmpz_pseudosquares[][2] = { { 17, 0 }, { 73, 0 }, @@ -210,10 +210,10 @@ void fmpz_set_pseudosquare(fmpz_t f, unsigned int i) int fmpz_is_prime_pseudosquare(const fmpz_t n) { unsigned int i, j, m1; - mp_limb_t p, B, mod8; + ulong p, B, mod8; fmpz_t NB, f, exp, mod, nm1; int ret; - const mp_limb_t * primes; + const ulong * primes; if (fmpz_sgn(n) <= 0) return 0; diff --git a/src/fmpz/logic.c b/src/fmpz/logic.c index b88e7018c5..4eb5ee43a1 100644 --- a/src/fmpz/logic.c +++ b/src/fmpz/logic.c @@ -205,7 +205,7 @@ void fmpz_combit(fmpz_t f, ulong i) flint_bitcnt_t fmpz_popcnt(const fmpz_t c) { - mp_limb_t d; + ulong d; fmpz c1 = *c; if (!COEFF_IS_MPZ(c1)) diff --git a/src/fmpz/mul.c b/src/fmpz/mul.c index 62551d619f..1a039ddd1b 100644 --- a/src/fmpz/mul.c +++ b/src/fmpz/mul.c @@ -20,10 +20,10 @@ static void flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) { - mp_size_t xn, yn, zn, sgn; - mp_srcptr xd, yd; - mp_ptr zd; - mp_limb_t top; + slong xn, yn, zn, sgn; + nn_srcptr xd, yd; + nn_ptr zd; + ulong top; TMP_INIT; xn = x->_mp_size; @@ -36,7 +36,7 @@ flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) if (xn < yn) { mpz_srcptr t; - mp_size_t tn; + slong tn; t = x; x = y; @@ -60,7 +60,7 @@ flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) { if (xn == 2) { - mp_limb_t r3, r2, r1, r0; + ulong r3, r2, r1, r0; FLINT_MPN_MUL_2X2(r3, r2, r1, r0, xd[1], xd[0], yd[1], yd[0]); zd[0] = r0; zd[1] = r1; @@ -73,7 +73,7 @@ flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) if (xn == 1) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, xd[0], yd[0]); zd[0] = lo; zd[1] = hi; @@ -92,7 +92,7 @@ flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) { if (xn == 2) { - mp_limb_t r2, r1, r0; + ulong r2, r1, r0; FLINT_MPN_MUL_2X1(r2, r1, r0, xd[1], xd[0], yd[0]); zd[0] = r0; zd[1] = r1; @@ -114,13 +114,13 @@ flint_mpz_mul(mpz_ptr z, mpz_srcptr x, mpz_srcptr y) we do not overwrite it during the multiplication. */ if (zd == xd) { - mp_ptr tmp = TMP_ALLOC(xn * sizeof(mp_limb_t)); + nn_ptr tmp = TMP_ALLOC(xn * sizeof(ulong)); flint_mpn_copyi(tmp, xd, xn); xd = tmp; } else if (zd == yd) { - mp_ptr tmp = TMP_ALLOC(yn * sizeof(mp_limb_t)); + nn_ptr tmp = TMP_ALLOC(yn * sizeof(ulong)); flint_mpn_copyi(tmp, yd, yn); yd = tmp; } @@ -200,7 +200,7 @@ fmpz_mul_si(fmpz_t f, const fmpz_t g, slong x) if (!COEFF_IS_MPZ(c2)) /* c2 is small */ { - mp_limb_t th, tl; + ulong th, tl; /* limb by limb multiply (assembly for most CPU's) */ smul_ppmm(th, tl, c2, x); @@ -243,8 +243,8 @@ fmpz_mul_ui(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(c2)) /* c2 is small */ { - mp_limb_t th, tl; - mp_limb_t uc2 = FLINT_ABS(c2); + ulong th, tl; + ulong uc2 = FLINT_ABS(c2); /* unsigned limb by limb multiply (assembly for most CPU's) */ umul_ppmm(th, tl, uc2, x); diff --git a/src/fmpz/mul_2exp.c b/src/fmpz/mul_2exp.c index 5353de4613..949b85e804 100644 --- a/src/fmpz/mul_2exp.c +++ b/src/fmpz/mul_2exp.c @@ -40,7 +40,7 @@ fmpz_mul_2exp(fmpz_t f, const fmpz_t g, ulong exp) { ulong expred = exp % FLINT_BITS; int alloc = 1 + exp / FLINT_BITS + ((c1bits + expred) > FLINT_BITS); - mp_limb_t * limbs; + ulong * limbs; /* Ensure enough limbs are allocated for f */ if (!COEFF_IS_MPZ(*f)) @@ -59,7 +59,7 @@ fmpz_mul_2exp(fmpz_t f, const fmpz_t g, ulong exp) } limbs = mf->_mp_d; mf->_mp_size = (c1 > 0) ? alloc : -alloc; - memset(limbs, 0, sizeof(mp_limb_t) * alloc); + memset(limbs, 0, sizeof(ulong) * alloc); if (c1bits + expred <= FLINT_BITS) { diff --git a/src/fmpz/mul_si_tdiv_q_2exp.c b/src/fmpz/mul_si_tdiv_q_2exp.c index 92c1cbfe38..140d7d17a1 100644 --- a/src/fmpz/mul_si_tdiv_q_2exp.c +++ b/src/fmpz/mul_si_tdiv_q_2exp.c @@ -27,9 +27,9 @@ fmpz_mul_si_tdiv_q_2exp(fmpz_t f, const fmpz_t g, slong x, ulong exp) } else if (!COEFF_IS_MPZ(c2)) /* c2 is small */ { - mp_limb_t prod[2]; - mp_limb_t uc2; - mp_limb_t ux; + ulong prod[2]; + ulong uc2; + ulong ux; if (exp >= 2 * FLINT_BITS) { @@ -69,7 +69,7 @@ fmpz_mul_si_tdiv_q_2exp(fmpz_t f, const fmpz_t g, slong x, ulong exp) /* two limbs, least significant first, native endian, no nails, stored in prod */ - mpz_import(mf, 2, -1, sizeof(mp_limb_t), 0, 0, prod); + mpz_import(mf, 2, -1, sizeof(ulong), 0, 0, prod); if ((c2 ^ x) < WORD(0)) mpz_neg(mf, mf); } diff --git a/src/fmpz/multi_CRT.c b/src/fmpz/multi_CRT.c index c99eaeb15d..75eb7439de 100644 --- a/src/fmpz/multi_CRT.c +++ b/src/fmpz/multi_CRT.c @@ -446,14 +446,14 @@ int fmpz_multi_CRT_precompute( #define MAC(h, l, a, b) \ do { \ - mp_limb_t p1, p0; \ + ulong p1, p0; \ umul_ppmm(p1, p0, a, b); \ add_ssaaaa(h, l, h, l, p1, p0); \ } while (0) void fmpz_multi_CRT_ui( fmpz_t b, - mp_srcptr in, + nn_srcptr in, const fmpz_comb_t C, fmpz_comb_temp_t CT, int sign) @@ -465,10 +465,10 @@ void fmpz_multi_CRT_ui( fmpz * T = CT->T; fmpz * A = CT->A; slong * offsets = C->crt_offsets; - const mp_limb_t * md = C->packed_multipliers; + const ulong * md = C->packed_multipliers; mpz_ptr az; - mp_limb_t * ad; - mp_limb_t hi, lo, t; + ulong * ad; + ulong hi, lo, t; for (k = 0, i = 0, l = 0; k < klen; k++) { diff --git a/src/fmpz/multi_mod.c b/src/fmpz/multi_mod.c index 1f9cba39dc..23d639a454 100644 --- a/src/fmpz/multi_mod.c +++ b/src/fmpz/multi_mod.c @@ -341,7 +341,7 @@ int fmpz_multi_mod_precompute( } void fmpz_multi_mod_ui( - mp_limb_t * out, + ulong * out, const fmpz_t input, const fmpz_comb_t C, fmpz_comb_temp_t CT) @@ -375,7 +375,7 @@ void fmpz_multi_mod_ui( for ( ; i < j; i++) { /* mid level split: depends on FMPZ_MOD_UI_CUTOFF */ - mp_limb_t t = fmpz_get_nmod(A + k, lu[i].mod); + ulong t = fmpz_get_nmod(A + k, lu[i].mod); /* low level split: 1, 2, or 3 small primes */ if (lu[i].mod2.n != 0) diff --git a/src/fmpz/pow.c b/src/fmpz/pow.c index 58d4290ebd..ede1ca6cf8 100644 --- a/src/fmpz/pow.c +++ b/src/fmpz/pow.c @@ -99,7 +99,7 @@ fmpz_ui_pow_ui(fmpz_t x, ulong b, ulong e) } else if (e == 2) { - mp_limb_t t[2]; + ulong t[2]; umul_ppmm(t[1], t[0], b, b); fmpz_set_uiui(x, t[1], t[0]); } diff --git a/src/fmpz/powm.c b/src/fmpz/powm.c index 5052827ccd..d70c439fb2 100644 --- a/src/fmpz/powm.c +++ b/src/fmpz/powm.c @@ -248,7 +248,7 @@ void fmpz_powm_ui(fmpz_t f, const fmpz_t g, ulong e, const fmpz_t m) { if (!COEFF_IS_MPZ(g2)) /* g is small */ { - mp_limb_t minv = n_preinvert_limb(m2); + ulong minv = n_preinvert_limb(m2); _fmpz_demote(f); diff --git a/src/fmpz/preinvn.c b/src/fmpz/preinvn.c index edc0546b65..671c64cf3a 100644 --- a/src/fmpz/preinvn.c +++ b/src/fmpz/preinvn.c @@ -16,28 +16,28 @@ void fmpz_preinvn_init(fmpz_preinvn_t inv, const fmpz_t f) { fmpz c = *f; flint_bitcnt_t norm; - mp_ptr t; + nn_ptr t; if (c == 0) { flint_throw(FLINT_ERROR, "Exception (fmpz_preinvn_init). Division by zero.\n"); } else if (!COEFF_IS_MPZ(c)) /* c is small */ { - inv->dinv = flint_malloc(sizeof(mp_limb_t)); + inv->dinv = flint_malloc(sizeof(ulong)); if (c < 0) c = -c; norm = flint_clz(c); if (norm) c <<= norm; - flint_mpn_preinvn(inv->dinv, (mp_ptr) &c, 1); + flint_mpn_preinvn(inv->dinv, (nn_ptr) &c, 1); inv->n = 1; } else /* c is big */ { mpz_ptr mc = COEFF_TO_PTR(c); slong size = FLINT_ABS(mc->_mp_size); - inv->dinv = flint_malloc(size*sizeof(mp_limb_t)); + inv->dinv = flint_malloc(size*sizeof(ulong)); norm = flint_clz(mc->_mp_d[size - 1]); if (norm) { - t = flint_malloc(size*sizeof(mp_limb_t)); + t = flint_malloc(size*sizeof(ulong)); mpn_lshift(t, mc->_mp_d, size, norm); } else t = mc->_mp_d; diff --git a/src/fmpz/primorial.c b/src/fmpz/primorial.c index 098b469ad0..ed81714af4 100644 --- a/src/fmpz/primorial.c +++ b/src/fmpz/primorial.c @@ -55,11 +55,11 @@ const ulong ULONG_PRIMORIALS[] = #define PROD_LIMBS_DIRECT_CUTOFF 50 -mp_size_t mpn_prod_limbs_direct(mp_limb_t * result, const mp_limb_t * factors, - mp_size_t n) +slong mpn_prod_limbs_direct(ulong * result, const ulong * factors, + slong n) { - mp_size_t k, len; - mp_limb_t top; + slong k, len; + ulong top; if (n < 1) { result[0] = UWORD(1); @@ -79,11 +79,11 @@ mp_size_t mpn_prod_limbs_direct(mp_limb_t * result, const mp_limb_t * factors, return len; } -mp_size_t mpn_prod_limbs_balanced(mp_limb_t * result, mp_limb_t * scratch, - const mp_limb_t * factors, mp_size_t n, ulong bits) +slong mpn_prod_limbs_balanced(ulong * result, ulong * scratch, + const ulong * factors, slong n, ulong bits) { - mp_size_t an, bn, alen, blen, len; - mp_limb_t top; + slong an, bn, alen, blen, len; + ulong top; if (n < PROD_LIMBS_DIRECT_CUTOFF) return mpn_prod_limbs_direct(result, factors, n); @@ -112,18 +112,18 @@ mp_size_t mpn_prod_limbs_balanced(mp_limb_t * result, mp_limb_t * scratch, bits must be set to some bound on the bit size of the entries in factors. If no bound is known, simply use FLINT_BITS. */ -mp_size_t mpn_prod_limbs(mp_limb_t * result, const mp_limb_t * factors, - mp_size_t n, ulong bits) +slong mpn_prod_limbs(ulong * result, const ulong * factors, + slong n, ulong bits) { - mp_size_t len, limbs; - mp_limb_t * scratch; + slong len, limbs; + ulong * scratch; if (n < PROD_LIMBS_DIRECT_CUTOFF) return mpn_prod_limbs_direct(result, factors, n); limbs = (n * bits - 1)/FLINT_BITS + 2; - scratch = flint_malloc(sizeof(mp_limb_t) * limbs); + scratch = flint_malloc(sizeof(ulong) * limbs); len = mpn_prod_limbs_balanced(result, scratch, factors, n, bits); flint_free(scratch); @@ -133,10 +133,10 @@ mp_size_t mpn_prod_limbs(mp_limb_t * result, const mp_limb_t * factors, void fmpz_primorial(fmpz_t res, ulong n) { - mp_size_t len, pi; + slong len, pi; ulong bits; mpz_ptr mres; - const mp_limb_t * primes; + const ulong * primes; if (n <= LARGEST_ULONG_PRIMORIAL) { diff --git a/src/fmpz/profile/p-aors_ui.c b/src/fmpz/profile/p-aors_ui.c index 767d34660a..496c8c55db 100644 --- a/src/fmpz/profile/p-aors_ui.c +++ b/src/fmpz/profile/p-aors_ui.c @@ -21,10 +21,10 @@ #if OLD_ALBIN static void -_fmpz_add_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x); +_fmpz_add_mpn_1(fmpz_t f, const ulong * glimbs, slong gsz, ulong x); static void -_fmpz_sub_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x); +_fmpz_sub_mpn_1(fmpz_t f, const ulong * glimbs, slong gsz, ulong x); void fmpz_add_ui_old(fmpz_t f, const fmpz_t g, ulong x) @@ -35,7 +35,7 @@ fmpz_add_ui_old(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(g1)) /* g is small */ { - mp_size_t sz = 2; + slong sz = 2; if (g1 >= 0) { { /* add with jump if carry */ @@ -101,8 +101,8 @@ carry: if (COEFF_IS_MPZ(f1)) else { mpz_ptr mg = COEFF_TO_PTR(g1); - mp_size_t gsz = mg->_mp_size; - mp_limb_t * glimbs = mg->_mp_d; + slong gsz = mg->_mp_size; + ulong * glimbs = mg->_mp_d; if (gsz > 0) _fmpz_add_mpn_1(f, glimbs, gsz, x); @@ -113,11 +113,11 @@ carry: if (COEFF_IS_MPZ(f1)) /* "Add" two number with same sign. Decide sign from g. */ static void -_fmpz_add_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x) +_fmpz_add_mpn_1(fmpz_t f, const ulong * glimbs, slong gsz, ulong x) { mpz_ptr mf; - mp_limb_t * flimbs; - mp_size_t gabssz = FLINT_ABS(gsz); + ulong * flimbs; + slong gabssz = FLINT_ABS(gsz); /* Promote f as it is guaranteed to be large */ if (COEFF_IS_MPZ(*f)) @@ -131,7 +131,7 @@ _fmpz_add_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x) if (mf->_mp_alloc < (gabssz + 1)) /* Ensure result fits */ { - mp_limb_t * tmp = flimbs; + ulong * tmp = flimbs; flimbs = _mpz_realloc(mf, gabssz + 1); /* If f and g are aliased, then we need to change glimbs as well. */ @@ -154,11 +154,11 @@ _fmpz_add_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x) /* Subtract two limbs (they have different sign) and decide the sign via g. */ static void -_fmpz_sub_mpn_1(fmpz_t f, const mp_limb_t * glimbs, mp_size_t gsz, mp_limb_t x) +_fmpz_sub_mpn_1(fmpz_t f, const ulong * glimbs, slong gsz, ulong x) { mpz_ptr mf; - mp_limb_t * flimbs; - mp_size_t gabssz = FLINT_ABS(gsz); + ulong * flimbs; + slong gabssz = FLINT_ABS(gsz); /* If size of g is 1, we have a higher probability of the result being * small. */ @@ -267,7 +267,7 @@ fmpz_sub_ui_old(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(g1)) /* g is small */ { - mp_size_t sz = -2; + slong sz = -2; if (g1 <= 0) { /* "add" with jump if carry */ @@ -332,8 +332,8 @@ carry: if (COEFF_IS_MPZ(f1)) else { mpz_ptr mg = COEFF_TO_PTR(g1); - mp_size_t gsz = mg->_mp_size; - mp_limb_t * glimbs = mg->_mp_d; + slong gsz = mg->_mp_size; + ulong * glimbs = mg->_mp_d; if (gsz > 0) _fmpz_sub_mpn_1(f, glimbs, gsz, x); @@ -350,7 +350,7 @@ void fmpz_add_ui_old(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(c)) /* g is small */ { - mp_limb_t sum[2]; + ulong sum[2]; if (c >= WORD(0)) /* both operands non-negative */ { add_ssaaaa(sum[1], sum[0], 0, c, 0, x); @@ -380,7 +380,7 @@ fmpz_sub_ui_old(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(c)) /* coeff is small */ { - mp_limb_t sum[2]; + ulong sum[2]; if (c < WORD(0)) /* g negative, x positive, so difference is negative */ { add_ssaaaa(sum[1], sum[0], 0, -c, 0, x); @@ -418,7 +418,7 @@ sample_add_new(void * arg, ulong count) res = _fmpz_vec_init(ntests); a = _fmpz_vec_init(ntests); - b = flint_malloc(sizeof(mp_limb_t) * ntests); + b = flint_malloc(sizeof(ulong) * ntests); for (ix = 0; ix < 10 * count; ix++) { @@ -453,7 +453,7 @@ sample_add_old(void * arg, ulong count) res = _fmpz_vec_init(ntests); a = _fmpz_vec_init(ntests); - b = flint_malloc(sizeof(mp_limb_t) * ntests); + b = flint_malloc(sizeof(ulong) * ntests); for (ix = 0; ix < 10 * count; ix++) { @@ -488,7 +488,7 @@ sample_sub_new(void * arg, ulong count) res = _fmpz_vec_init(ntests); a = _fmpz_vec_init(ntests); - b = flint_malloc(sizeof(mp_limb_t) * ntests); + b = flint_malloc(sizeof(ulong) * ntests); for (ix = 0; ix < 10 * count; ix++) { @@ -523,7 +523,7 @@ sample_sub_old(void * arg, ulong count) res = _fmpz_vec_init(ntests); a = _fmpz_vec_init(ntests); - b = flint_malloc(sizeof(mp_limb_t) * ntests); + b = flint_malloc(sizeof(ulong) * ntests); for (ix = 0; ix < 10 * count; ix++) { diff --git a/src/fmpz/profile/p-crt.c b/src/fmpz/profile/p-crt.c index 7e0b6611e9..54b552e19a 100644 --- a/src/fmpz/profile/p-crt.c +++ b/src/fmpz/profile/p-crt.c @@ -17,7 +17,7 @@ _fmpz_crt_combine(fmpz_t r1r2, fmpz_t m1m2, const fmpz_t r1, const fmpz_t m1, co void _fmpz_crt_combine_uiui(fmpz_t r1r2, fmpz_t m1m2, ulong r1, ulong m1, ulong r2, ulong m2) { - mp_limb_t M[2]; + ulong M[2]; /* M = m1 * m2 @@ -31,7 +31,7 @@ _fmpz_crt_combine_uiui(fmpz_t r1r2, fmpz_t m1m2, ulong r1, ulong m1, ulong r2, u if (M[1] == 0) { - mp_limb_t c, v; + ulong c, v; c = n_invmod(m1, m2) * m1; @@ -47,7 +47,7 @@ _fmpz_crt_combine_uiui(fmpz_t r1r2, fmpz_t m1m2, ulong r1, ulong m1, ulong r2, u } else { - mp_limb_t c[2], t[4], q[3], r[3]; + ulong c[2], t[4], q[3], r[3]; umul_ppmm(c[1], c[0], n_invmod(m1, m2), m1); @@ -72,7 +72,7 @@ _fmpz_crt_combine_uiui(fmpz_t r1r2, fmpz_t m1m2, ulong r1, ulong m1, ulong r2, u } void -tree_crt(fmpz_t r, fmpz_t m, mp_srcptr residues, mp_srcptr primes, slong len) +tree_crt(fmpz_t r, fmpz_t m, nn_srcptr residues, nn_srcptr primes, slong len) { if (len == 0) { @@ -120,13 +120,13 @@ benchmark(slong num_primes, slong prime_bits) flint_rand_t state; fmpz_comb_temp_t temp; fmpz_comb_t comb; - mp_ptr primes, residues; + nn_ptr primes, residues; fmpz_t res; slong k; flint_rand_init(state); - primes = flint_malloc(num_primes * sizeof(mp_limb_t)); - residues = flint_malloc(num_primes * sizeof(mp_limb_t)); + primes = flint_malloc(num_primes * sizeof(ulong)); + residues = flint_malloc(num_primes * sizeof(ulong)); fmpz_init(res); primes[0] = n_nextprime(UWORD(1) << (prime_bits - 1), 0); diff --git a/src/fmpz/profile/p-fmma.c b/src/fmpz/profile/p-fmma.c index 2f27fd240e..9be88850b8 100644 --- a/src/fmpz/profile/p-fmma.c +++ b/src/fmpz/profile/p-fmma.c @@ -39,7 +39,7 @@ fmpz_fmma_old(fmpz_t f, const fmpz_t a, const fmpz_t b, if (!COEFF_IS_MPZ(s) && !COEFF_IS_MPZ(t) && !COEFF_IS_MPZ(u) && !COEFF_IS_MPZ(v)) { - mp_limb_t sh, sl, th, tl; + ulong sh, sl, th, tl; smul_ppmm(sh, sl, s, t); smul_ppmm(th, tl, u, v); diff --git a/src/fmpz/profile/p-gcd3.c b/src/fmpz/profile/p-gcd3.c index d213b55c17..bc2be2bf6f 100644 --- a/src/fmpz/profile/p-gcd3.c +++ b/src/fmpz/profile/p-gcd3.c @@ -84,7 +84,7 @@ fmpz_gcd3_old(fmpz_t res, const fmpz_t a, const fmpz_t b, const fmpz_t c) { /* Three-way mpz_gcd. */ mpz_ptr rp, ap, bp, cp, tp; - mp_size_t an, bn, cn, mn; + slong an, bn, cn, mn; /* If res is small, it cannot be aliased with a, b, c, so promoting is fine. */ rp = _fmpz_promote(res); @@ -126,7 +126,7 @@ fmpz_gcd3_old(fmpz_t res, const fmpz_t a, const fmpz_t b, const fmpz_t c) /* It would be more efficient to allocate temporary space for gcd(a, b), but we can't be sure that mpz_gcd never attempts to reallocate the output. */ - t->_mp_d = TMP_ALLOC(sizeof(mp_limb_t) * cn); + t->_mp_d = TMP_ALLOC(sizeof(ulong) * cn); t->_mp_size = t->_mp_alloc = cn; flint_mpn_copyi(t->_mp_d, cp->_mp_d, cn); diff --git a/src/fmpz/profile/p-mul_ui.c b/src/fmpz/profile/p-mul_ui.c index d14fb1196f..b49a80c009 100644 --- a/src/fmpz/profile/p-mul_ui.c +++ b/src/fmpz/profile/p-mul_ui.c @@ -22,8 +22,8 @@ fmpz_mul_ui_old(fmpz_t f, const fmpz_t g, ulong x) if (!COEFF_IS_MPZ(c2)) /* c2 is small */ { - mp_limb_t th, tl; - mp_limb_t uc2 = FLINT_ABS(c2); + ulong th, tl; + ulong uc2 = FLINT_ABS(c2); /* unsigned limb by limb multiply (assembly for most CPU's) */ umul_ppmm(th, tl, uc2, x); diff --git a/src/fmpz/remove.c b/src/fmpz/remove.c index 70570e9a7c..eb4c37f3e3 100644 --- a/src/fmpz/remove.c +++ b/src/fmpz/remove.c @@ -25,7 +25,7 @@ slong _fmpz_remove(fmpz_t x, const fmpz_t f, double finv) { if (y > 0) { - return n_remove2_precomp((mp_limb_t *) x, q, finv); + return n_remove2_precomp((ulong *) x, q, finv); } else { diff --git a/src/fmpz/root.c b/src/fmpz/root.c index b9748d724b..51fe8dbe59 100644 --- a/src/fmpz/root.c +++ b/src/fmpz/root.c @@ -32,7 +32,7 @@ fmpz_root(fmpz_t r, const fmpz_t f, slong n) if (!COEFF_IS_MPZ(c)) /* f is small */ { - mp_limb_t rem, root; + ulong rem, root; int sgn = c < 0; if (n == 2) diff --git a/src/fmpz/set.c b/src/fmpz/set.c index 185f3be1d3..6494440bd1 100644 --- a/src/fmpz/set.c +++ b/src/fmpz/set.c @@ -160,7 +160,7 @@ void fmpz_set_signed_ui_array(fmpz_t f, const ulong * c, slong n) else { mpz_ptr z = _fmpz_promote(f); - mp_limb_t * zd = FLINT_MPZ_REALLOC(z, n); + ulong * zd = FLINT_MPZ_REALLOC(z, n); if (csign == 0) { diff --git a/src/fmpz/set_str.c b/src/fmpz/set_str.c index 9a5b7517f1..7a43c03fe0 100644 --- a/src/fmpz/set_str.c +++ b/src/fmpz/set_str.c @@ -55,16 +55,16 @@ worker(void * arg) static void _fmpz_set_str_basecase(fmpz_t res, const char * s, slong slen) { - mp_ptr tmp; + nn_ptr tmp; unsigned char * stmp; - mp_size_t n; + slong n; slong i; TMP_INIT; TMP_START; stmp = TMP_ALLOC(sizeof(char) * slen); - tmp = TMP_ALLOC(sizeof(mp_limb_t) * (slen / DIGITS_PER_LIMB + 2)); + tmp = TMP_ALLOC(sizeof(ulong) * (slen / DIGITS_PER_LIMB + 2)); for (i = 0; i < slen; i++) stmp[i] = s[i] - '0'; diff --git a/src/fmpz/size.c b/src/fmpz/size.c index c6cdfbdf8a..2221d3a7e7 100644 --- a/src/fmpz/size.c +++ b/src/fmpz/size.c @@ -13,7 +13,7 @@ #include "ulong_extras.h" #include "fmpz.h" -mp_size_t +slong fmpz_size(const fmpz_t f) { fmpz d = *f; diff --git a/src/fmpz/sqrtmod.c b/src/fmpz/sqrtmod.c index a9a662977a..f132cf7861 100644 --- a/src/fmpz/sqrtmod.c +++ b/src/fmpz/sqrtmod.c @@ -166,7 +166,7 @@ int fmpz_sqrtmod(fmpz_t b, const fmpz_t a, const fmpz_t p) if (!COEFF_IS_MPZ(*p)) /* p, and b are small */ { - mp_limb_t ans; + ulong ans; ans = n_sqrtmod(*b, *p); if (ans) diff --git a/src/fmpz/sqrtrem.c b/src/fmpz/sqrtrem.c index 0a1c9a145f..41e5b83c5c 100644 --- a/src/fmpz/sqrtrem.c +++ b/src/fmpz/sqrtrem.c @@ -24,7 +24,7 @@ void fmpz_sqrtrem(fmpz_t f, fmpz_t r, const fmpz_t g) { if (COEFF_IS_MPZ(*r)) _fmpz_clear_mpz(*r); - fmpz_set_ui(f, n_sqrtrem((mp_limb_t *) r, *g)); + fmpz_set_ui(f, n_sqrtrem((ulong *) r, *g)); } else { diff --git a/src/fmpz/test/main.c b/src/fmpz/test/main.c index e79fcc4b17..17381eff6d 100644 --- a/src/fmpz/test/main.c +++ b/src/fmpz/test/main.c @@ -20,8 +20,6 @@ # undef ulong #endif -#include -#include #include #include diff --git a/src/fmpz/test/t-abs_lbound_ui_2exp.c b/src/fmpz/test/t-abs_lbound_ui_2exp.c index f0c94a4b6b..53930fc58a 100644 --- a/src/fmpz/test/t-abs_lbound_ui_2exp.c +++ b/src/fmpz/test/t-abs_lbound_ui_2exp.c @@ -13,12 +13,12 @@ #include "ulong_extras.h" #include "fmpz.h" -static mp_limb_t +static ulong refimpl(slong * exp, const fmpz_t x, int bits) { fmpz_t t; slong xbits; - mp_limb_t m; + ulong m; xbits = fmpz_bits(x); @@ -47,7 +47,7 @@ TEST_FUNCTION_START(fmpz_abs_lbound_ui_2exp, state) fmpz_t x; slong bits; slong exp, yexp; - mp_limb_t yman, man; + ulong yman, man; fmpz_init(x); fmpz_randtest_not_zero(x, state, 1 + n_randint(state, 400)); diff --git a/src/fmpz/test/t-abs_ubound_ui_2exp.c b/src/fmpz/test/t-abs_ubound_ui_2exp.c index 8d6fb96f12..8c2afa60f2 100644 --- a/src/fmpz/test/t-abs_ubound_ui_2exp.c +++ b/src/fmpz/test/t-abs_ubound_ui_2exp.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(fmpz_abs_ubound_ui_2exp, state) fmpz_t x, y; slong bits, yexp; slong exp; - mp_limb_t man; + ulong man; fmpz_init(x); fmpz_init(y); diff --git a/src/fmpz/test/t-bit_pack.c b/src/fmpz/test/t-bit_pack.c index 067a8ebff7..59bae95cef 100644 --- a/src/fmpz/test/t-bit_pack.c +++ b/src/fmpz/test/t-bit_pack.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(fmpz_bit_pack, state) fmpz_t a, b; flint_bitcnt_t bits = n_randint(state, 300) + 1; ulong space = (300 - 1) / FLINT_BITS + 2; /* 2 to accommodate shift */ - mp_ptr arr = (mp_ptr) flint_calloc(space, sizeof(mp_limb_t)); + nn_ptr arr = (nn_ptr) flint_calloc(space, sizeof(ulong)); flint_bitcnt_t shift = n_randint(state, FLINT_BITS); int negate = (int) -n_randint(state, 2); @@ -57,7 +57,7 @@ TEST_FUNCTION_START(fmpz_bit_pack, state) fmpz_t a, b; flint_bitcnt_t bits = n_randint(state, 300) + 1; ulong space = (300 - 1) / FLINT_BITS + 2; /* 2 to accommodate shift */ - mp_ptr arr = (mp_ptr) flint_calloc(space, sizeof(mp_limb_t)); + nn_ptr arr = (nn_ptr) flint_calloc(space, sizeof(ulong)); flint_bitcnt_t shift = n_randint(state, FLINT_BITS); fmpz_init(a); diff --git a/src/fmpz/test/t-comb_init_clear.c b/src/fmpz/test/t-comb_init_clear.c index f2c25ee6fe..393c999694 100644 --- a/src/fmpz/test/t-comb_init_clear.c +++ b/src/fmpz/test/t-comb_init_clear.c @@ -17,17 +17,17 @@ TEST_FUNCTION_START(fmpz_comb_init_clear, state) { slong i, j; - mp_limb_t n; + ulong n; slong num_primes; - mp_limb_t * primes; - mp_limb_t p; + ulong * primes; + ulong p; fmpz_comb_t comb; for (i = 0; i < 100 * flint_test_multiplier(); i++) { n = n_randint(state, 10); num_primes = (WORD(1) << n); - primes = (mp_limb_t *) flint_malloc(num_primes * sizeof(mp_limb_t)); + primes = (ulong *) flint_malloc(num_primes * sizeof(ulong)); p = n_nextprime((UWORD(1) << (FLINT_BITS-1)) - WORD(10000000), 0); for (j = 0; j < num_primes; j++) diff --git a/src/fmpz/test/t-get_mpn.c b/src/fmpz/test/t-get_mpn.c index 4216a00233..dc09535d7b 100644 --- a/src/fmpz/test/t-get_mpn.c +++ b/src/fmpz/test/t-get_mpn.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(fmpz_get_mpn, state) fmpz_t a, b, mmin; int i, j, k; - mp_ptr mpna; + nn_ptr mpna; fmpz_init(a); fmpz_init(b); diff --git a/src/fmpz/test/t-is_prime_morrison.c b/src/fmpz/test/t-is_prime_morrison.c index 0ff212d1f8..28cf02f063 100644 --- a/src/fmpz/test/t-is_prime_morrison.c +++ b/src/fmpz/test/t-is_prime_morrison.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(fmpz_is_prime_morrison, state) for (i = 0; i < 30 * flint_test_multiplier(); i++) { fmpz_t p, F, R; - mp_ptr pp1; + nn_ptr pp1; slong num_pp1; double logd; ulong limit; diff --git a/src/fmpz/test/t-is_prime_pocklington.c b/src/fmpz/test/t-is_prime_pocklington.c index 39bf6711d9..b6d96c8d01 100644 --- a/src/fmpz/test/t-is_prime_pocklington.c +++ b/src/fmpz/test/t-is_prime_pocklington.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(fmpz_is_prime_pocklington, state) for (i = 0; i < 30 * flint_test_multiplier(); i++) { fmpz_t p, F, R; - mp_ptr pm1; + nn_ptr pm1; slong num_pm1; ulong limit; double logd; diff --git a/src/fmpz/test/t-multi_CRT_ui.c b/src/fmpz/test/t-multi_CRT_ui.c index 2d71237462..b4800accf6 100644 --- a/src/fmpz/test/t-multi_CRT_ui.c +++ b/src/fmpz/test/t-multi_CRT_ui.c @@ -18,10 +18,10 @@ TEST_FUNCTION_START(fmpz_multi_CRT_ui, state) { fmpz_t input, temp, prod; - mp_limb_t * output; + ulong * output; slong i, j, k; flint_bitcnt_t bits1, bits2, bits; - mp_limb_t * primes; + ulong * primes; fmpz * primes2; slong num_primes; fmpz_comb_t comb; @@ -36,8 +36,8 @@ TEST_FUNCTION_START(fmpz_multi_CRT_ui, state) fmpz_init(temp); fmpz_init(input); fmpz_init(prod); - output = FLINT_ARRAY_ALLOC(num_primes, mp_limb_t); - primes = FLINT_ARRAY_ALLOC(num_primes, mp_limb_t); + output = FLINT_ARRAY_ALLOC(num_primes, ulong); + primes = FLINT_ARRAY_ALLOC(num_primes, ulong); primes2 = _fmpz_vec_init(num_primes); try_again: diff --git a/src/fmpz/test/t-rfac_uiui.c b/src/fmpz/test/t-rfac_uiui.c index 8d02147321..3468608a05 100644 --- a/src/fmpz/test/t-rfac_uiui.c +++ b/src/fmpz/test/t-rfac_uiui.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(fmpz_rfac_uiui, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { fmpz_t xa, r1, r2, r1r2, r3; - mp_limb_t x; + ulong x; ulong a, b; fmpz_init(xa); diff --git a/src/fmpz/test/t-set_ui_smod.c b/src/fmpz/test/t-set_ui_smod.c index 973f9c0abf..1d2120dbaf 100644 --- a/src/fmpz/test/t-set_ui_smod.c +++ b/src/fmpz/test/t-set_ui_smod.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(fmpz_set_ui_smod, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { fmpz_t a, b, mz; - mp_limb_t m, r; + ulong m, r; fmpz_init(a); fmpz_init(b); diff --git a/src/fmpz/test/t-sgn.c b/src/fmpz/test/t-sgn.c index 2488223997..53b51418e6 100644 --- a/src/fmpz/test/t-sgn.c +++ b/src/fmpz/test/t-sgn.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(fmpz_sgn, state) { fmpz_t a; mpz_t b; - mp_size_t r1, r2; + slong r1, r2; fmpz_init(a); diff --git a/src/fmpz/test/t-size.c b/src/fmpz/test/t-size.c index 9ed05d5e74..bdead0a721 100644 --- a/src/fmpz/test/t-size.c +++ b/src/fmpz/test/t-size.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(fmpz_size, state) { fmpz_t a; mpz_t b; - mp_size_t r1, r2; + slong r1, r2; fmpz_init(a); diff --git a/src/fmpz/test/t-sqrtmod.c b/src/fmpz/test/t-sqrtmod.c index c29bac1ce2..7843ffcd51 100644 --- a/src/fmpz/test/t-sqrtmod.c +++ b/src/fmpz/test/t-sqrtmod.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(fmpz_sqrtmod, state) { int ans; fmpz_t a, b, c, p; - mp_limb_t prime; + ulong prime; prime = n_randint(state, UWORD(1) << (FLINT_BITS - 1)); prime = n_nextprime(prime, 1); @@ -61,7 +61,7 @@ TEST_FUNCTION_START(fmpz_sqrtmod, state) { int ans; fmpz_t a, b, c, d, p; - mp_limb_t prime; + ulong prime; prime = n_randint(state, UWORD(1) << (FLINT_BITS - 1)); prime = n_nextprime(prime, 1); diff --git a/src/fmpz/val2.c b/src/fmpz/val2.c index be8b9808b8..32169542d0 100644 --- a/src/fmpz/val2.c +++ b/src/fmpz/val2.c @@ -27,7 +27,7 @@ flint_bitcnt_t fmpz_val2(const fmpz_t x) } else { - mp_limb_t *d = (COEFF_TO_PTR(c))->_mp_d; + ulong *d = (COEFF_TO_PTR(c))->_mp_d; flint_bitcnt_t u; t = 0; diff --git a/src/fmpz/xgcd.c b/src/fmpz/xgcd.c index 76778b482d..dfcdf58cb2 100644 --- a/src/fmpz/xgcd.c +++ b/src/fmpz/xgcd.c @@ -150,7 +150,7 @@ fmpz_xgcd_canonical_bezout(fmpz_t d, fmpz_t a, fmpz_t b, const fmpz_t f, const f mpz_t mf; ulong tf = FLINT_ABS(*f); - mf->_mp_d = (mp_limb_t *) &tf; + mf->_mp_d = (ulong *) &tf; mf->_mp_size = fmpz_sgn(f); _fmpz_promote(d); @@ -165,7 +165,7 @@ fmpz_xgcd_canonical_bezout(fmpz_t d, fmpz_t a, fmpz_t b, const fmpz_t f, const f mpz_t mg; ulong tg = FLINT_ABS(*g); - mg->_mp_d = (mp_limb_t *) &tg; + mg->_mp_d = (ulong *) &tg; mg->_mp_size = fmpz_sgn(g); _fmpz_promote(d); diff --git a/src/fmpz_extras.h b/src/fmpz_extras.h index b38a195438..5cf6413816 100644 --- a/src/fmpz_extras.h +++ b/src/fmpz_extras.h @@ -94,7 +94,7 @@ fmpz_add2_fmpz_si_inline(fmpz_t z, const fmpz_t x, const fmpz_t y, slong c) } static inline void -fmpz_set_mpn_large(fmpz_t z, mp_srcptr src, mp_size_t n, int negative) +fmpz_set_mpn_large(fmpz_t z, nn_srcptr src, slong n, int negative) { mpz_ptr zz; slong i; @@ -142,7 +142,7 @@ _fmpz_sub_small(const fmpz_t x, const fmpz_t y) } } -static inline mp_size_t +static inline slong _fmpz_size(const fmpz_t f) { fmpz d = *f; @@ -202,13 +202,13 @@ fmpz_min(fmpz_t z, const fmpz_t x, const fmpz_t y) (zn) = FLINT_ABS(zn); \ } -void fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, flint_bitcnt_t shift); +void fmpz_lshift_mpn(fmpz_t z, nn_srcptr d, slong dn, int sgnbit, flint_bitcnt_t shift); static inline slong fmpz_allocated_bytes(const fmpz_t x) { if (COEFF_IS_MPZ(*x)) - return sizeof(__mpz_struct) + COEFF_TO_PTR(*x)->_mp_alloc * sizeof(mp_limb_t); + return sizeof(__mpz_struct) + COEFF_TO_PTR(*x)->_mp_alloc * sizeof(ulong); else return 0; } diff --git a/src/fmpz_extras/lshift_mpn.c b/src/fmpz_extras/lshift_mpn.c index 8ad393fc8f..41cd4ce601 100644 --- a/src/fmpz_extras/lshift_mpn.c +++ b/src/fmpz_extras/lshift_mpn.c @@ -13,11 +13,11 @@ #include "fmpz_extras.h" void -fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, flint_bitcnt_t shift) +fmpz_lshift_mpn(fmpz_t z, nn_srcptr d, slong dn, int sgnbit, flint_bitcnt_t shift) { mpz_ptr zmpz; - mp_ptr zp; - mp_size_t zn, shift_limbs; + nn_ptr zp; + slong zn, shift_limbs; flint_bitcnt_t shift_bits; zmpz = _fmpz_promote(z); diff --git a/src/fmpz_extras/test/main.c b/src/fmpz_extras/test/main.c index 46edf81dcb..a8896b1ff4 100644 --- a/src/fmpz_extras/test/main.c +++ b/src/fmpz_extras/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add2_fmpz_si_inline.c" diff --git a/src/fmpz_extras/test/t-lshift_mpn.c b/src/fmpz_extras/test/t-lshift_mpn.c index ef3d672c95..7a6bfffeae 100644 --- a/src/fmpz_extras/test/t-lshift_mpn.c +++ b/src/fmpz_extras/test/t-lshift_mpn.c @@ -20,9 +20,9 @@ TEST_FUNCTION_START(fmpz_lshift_mpn, state) { fmpz_t a, b, c; ulong e; - mp_limb_t atmp; - mp_ptr aptr; - mp_size_t an; + ulong atmp; + nn_ptr aptr; + slong an; int asgnbit; fmpz_init(a); diff --git a/src/fmpz_factor.h b/src/fmpz_factor.h index d1d1d4f8f9..bfafd94fa1 100644 --- a/src/fmpz_factor.h +++ b/src/fmpz_factor.h @@ -34,7 +34,7 @@ void _fmpz_factor_fit_length(fmpz_factor_t factor, slong len); void _fmpz_factor_set_length(fmpz_factor_t factor, slong newlen); void _fmpz_factor_append(fmpz_factor_t factor, const fmpz_t p, ulong exp); -void _fmpz_factor_append_ui(fmpz_factor_t factor, mp_limb_t p, ulong exp); +void _fmpz_factor_append_ui(fmpz_factor_t factor, ulong p, ulong exp); void _fmpz_factor_concat(fmpz_factor_t factor1, fmpz_factor_t factor2, ulong exp); @@ -47,7 +47,7 @@ int fmpz_factor_print(const fmpz_factor_t factor); /* Factoring *****************************************************************/ -void _fmpz_factor_extend_factor_ui(fmpz_factor_t factor, mp_limb_t n); +void _fmpz_factor_extend_factor_ui(fmpz_factor_t factor, ulong n); int fmpz_factor_trial_range(fmpz_factor_t factor, const fmpz_t n, ulong start, ulong num_primes); int fmpz_factor_trial(fmpz_factor_t factor, const fmpz_t n, slong num_primes); @@ -63,11 +63,11 @@ int fmpz_factor_pp1(fmpz_t factor, const fmpz_t n, ulong B1, ulong B2_sqrt, ulon void fmpz_factor_refine(fmpz_factor_t res, const fmpz_factor_t f); -void flint_mpn_sqr_and_add_a(mp_ptr y, mp_ptr a, mp_ptr n, mp_limb_t n_size, mp_ptr ninv, mp_limb_t normbits); +void flint_mpn_sqr_and_add_a(nn_ptr y, nn_ptr a, nn_ptr n, ulong n_size, nn_ptr ninv, ulong normbits); -int flint_mpn_factor_pollard_brent_single(mp_ptr factor, mp_ptr n, mp_ptr ninv, mp_ptr a, mp_ptr y, mp_limb_t n_size, mp_limb_t normbits, mp_limb_t max_iters); -int fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, fmpz_t ai, mp_limb_t max_iters); -int fmpz_factor_pollard_brent(fmpz_t factor, flint_rand_t state, fmpz_t n, mp_limb_t max_tries, mp_limb_t max_iters); +int flint_mpn_factor_pollard_brent_single(nn_ptr factor, nn_ptr n, nn_ptr ninv, nn_ptr a, nn_ptr y, ulong n_size, ulong normbits, ulong max_iters); +int fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, fmpz_t ai, ulong max_iters); +int fmpz_factor_pollard_brent(fmpz_t factor, flint_rand_t state, fmpz_t n, ulong max_tries, ulong max_iters); /* Expansion *****************************************************************/ @@ -87,33 +87,33 @@ void fmpz_factor_divisor_sigma(fmpz_t res, ulong k, const fmpz_factor_t fac); typedef struct ecm_s { - mp_ptr t, u, v, w; /* temp variables */ - mp_ptr x, z; /* the coordinates */ - mp_ptr a24; /* value (a + 2)/4 */ - mp_ptr ninv; /* invere of n */ - mp_ptr one; /* one shifted */ + nn_ptr t, u, v, w; /* temp variables */ + nn_ptr x, z; /* the coordinates */ + nn_ptr a24; /* value (a + 2)/4 */ + nn_ptr ninv; /* invere of n */ + nn_ptr one; /* one shifted */ unsigned char *GCD_table; /* checks whether baby step int is coprime to Primorial or not */ unsigned char **prime_table; - mp_limb_t n_size; - mp_limb_t normbits; + ulong n_size; + ulong normbits; } ecm_s; typedef ecm_s ecm_t[1]; -void fmpz_factor_ecm_init(ecm_t ecm_inf, mp_limb_t sz); +void fmpz_factor_ecm_init(ecm_t ecm_inf, ulong sz); void fmpz_factor_ecm_clear(ecm_t ecm_inf); -void fmpz_factor_ecm_double(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, mp_ptr n, ecm_t ecm_inf); -void fmpz_factor_ecm_add(mp_ptr x, mp_ptr z, mp_ptr x1, mp_ptr z1, mp_ptr x2, mp_ptr z2, mp_ptr x0, mp_ptr z0, mp_ptr n, ecm_t ecm_inf); -void fmpz_factor_ecm_mul_montgomery_ladder(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, mp_limb_t k, mp_ptr n, ecm_t ecm_inf); -int fmpz_factor_ecm_select_curve(mp_ptr f, mp_ptr sig, mp_ptr n, ecm_t ecm_inf); -int fmpz_factor_ecm_stage_I(mp_ptr f, const mp_limb_t *prime_array, mp_limb_t num, mp_limb_t B1, mp_ptr n, ecm_t ecm_inf); -int fmpz_factor_ecm_stage_II(mp_ptr f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, mp_ptr n, ecm_t ecm_inf); -int fmpz_factor_ecm(fmpz_t f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, flint_rand_t state, const fmpz_t n_in); +void fmpz_factor_ecm_double(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, nn_ptr n, ecm_t ecm_inf); +void fmpz_factor_ecm_add(nn_ptr x, nn_ptr z, nn_ptr x1, nn_ptr z1, nn_ptr x2, nn_ptr z2, nn_ptr x0, nn_ptr z0, nn_ptr n, ecm_t ecm_inf); +void fmpz_factor_ecm_mul_montgomery_ladder(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, ulong k, nn_ptr n, ecm_t ecm_inf); +int fmpz_factor_ecm_select_curve(nn_ptr f, nn_ptr sig, nn_ptr n, ecm_t ecm_inf); +int fmpz_factor_ecm_stage_I(nn_ptr f, const ulong *prime_array, ulong num, ulong B1, nn_ptr n, ecm_t ecm_inf); +int fmpz_factor_ecm_stage_II(nn_ptr f, ulong B1, ulong B2, ulong P, nn_ptr n, ecm_t ecm_inf); +int fmpz_factor_ecm(fmpz_t f, ulong curves, ulong B1, ulong B2, flint_rand_t state, const fmpz_t n_in); /* Inlines *******************************************************************/ diff --git a/src/fmpz_factor/append.c b/src/fmpz_factor/append.c index 72d409ba6b..894ff2434f 100644 --- a/src/fmpz_factor/append.c +++ b/src/fmpz_factor/append.c @@ -23,7 +23,7 @@ _fmpz_factor_append(fmpz_factor_t factor, const fmpz_t p, ulong exp) } void -_fmpz_factor_append_ui(fmpz_factor_t factor, mp_limb_t p, ulong exp) +_fmpz_factor_append_ui(fmpz_factor_t factor, ulong p, ulong exp) { _fmpz_factor_fit_length(factor, factor->num + 1); fmpz_set_ui(factor->p + factor->num, p); diff --git a/src/fmpz_factor/ecm.c b/src/fmpz_factor/ecm.c index 8732a8cf9b..3cced3c9f2 100644 --- a/src/fmpz_factor/ecm.c +++ b/src/fmpz_factor/ecm.c @@ -44,20 +44,20 @@ ulong n_ecm_primorial[] = #endif int -fmpz_factor_ecm(fmpz_t f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, +fmpz_factor_ecm(fmpz_t f, ulong curves, ulong B1, ulong B2, flint_rand_t state, const fmpz_t n_in) { fmpz_t sig, nm8; - mp_limb_t P, num, maxP, mmin, mmax, mdiff, prod, maxj, n_size, cy; + ulong P, num, maxP, mmin, mmax, mdiff, prod, maxj, n_size, cy; ulong i, j; int ret; ecm_t ecm_inf; mpz_ptr fac, mptr; - mp_ptr n, mpsig; + nn_ptr n, mpsig; TMP_INIT; - const mp_limb_t *prime_array; + const ulong *prime_array; n_size = fmpz_size(n_in); if (n_size == 1) @@ -71,8 +71,8 @@ fmpz_factor_ecm(fmpz_t f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, TMP_START; - n = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - mpsig = TMP_ALLOC(n_size * sizeof(mp_limb_t)); + n = TMP_ALLOC(n_size * sizeof(ulong)); + mpsig = TMP_ALLOC(n_size * sizeof(ulong)); if ((!COEFF_IS_MPZ(* n_in))) { diff --git a/src/fmpz_factor/ecm_add.c b/src/fmpz_factor/ecm_add.c index e2b8062115..042f8b4002 100644 --- a/src/fmpz_factor/ecm_add.c +++ b/src/fmpz_factor/ecm_add.c @@ -24,8 +24,8 @@ */ void -fmpz_factor_ecm_add(mp_ptr x, mp_ptr z, mp_ptr x1, mp_ptr z1, mp_ptr x2, - mp_ptr z2, mp_ptr x0, mp_ptr z0, mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_add(nn_ptr x, nn_ptr z, nn_ptr x1, nn_ptr z1, nn_ptr x2, + nn_ptr z2, nn_ptr x0, nn_ptr z0, nn_ptr n, ecm_t ecm_inf) { if (flint_mpn_zero_p(z1, ecm_inf->n_size)) diff --git a/src/fmpz_factor/ecm_double.c b/src/fmpz_factor/ecm_double.c index ecc47801d9..043c5f15e2 100644 --- a/src/fmpz_factor/ecm_double.c +++ b/src/fmpz_factor/ecm_double.c @@ -22,8 +22,8 @@ */ void -fmpz_factor_ecm_double(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, - mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_double(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, + nn_ptr n, ecm_t ecm_inf) { if (flint_mpn_zero_p(z0, ecm_inf->n_size)) { diff --git a/src/fmpz_factor/ecm_init.c b/src/fmpz_factor/ecm_init.c index be41608ed8..876f84d175 100644 --- a/src/fmpz_factor/ecm_init.c +++ b/src/fmpz_factor/ecm_init.c @@ -12,31 +12,19 @@ #include "fmpz_factor.h" void -fmpz_factor_ecm_init(ecm_t ecm_inf, mp_limb_t sz) +fmpz_factor_ecm_init(ecm_t ecm_inf, ulong sz) { - ecm_inf->t = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->u = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->v = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->w = flint_malloc(sz * sizeof(mp_limb_t)); + ecm_inf->t = flint_calloc(sz, sizeof(ulong)); + ecm_inf->u = flint_calloc(sz, sizeof(ulong)); + ecm_inf->v = flint_calloc(sz, sizeof(ulong)); + ecm_inf->w = flint_calloc(sz, sizeof(ulong)); - ecm_inf->x = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->z = flint_malloc(sz * sizeof(mp_limb_t)); + ecm_inf->x = flint_calloc(sz, sizeof(ulong)); + ecm_inf->z = flint_calloc(sz, sizeof(ulong)); - ecm_inf->a24 = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->ninv = flint_malloc(sz * sizeof(mp_limb_t)); - ecm_inf->one = flint_malloc(sz * sizeof(mp_limb_t)); - - mpn_zero(ecm_inf->t, sz); - mpn_zero(ecm_inf->u, sz); - mpn_zero(ecm_inf->v, sz); - mpn_zero(ecm_inf->w, sz); - - mpn_zero(ecm_inf->x, sz); - mpn_zero(ecm_inf->z, sz); - - mpn_zero(ecm_inf->a24, sz); - mpn_zero(ecm_inf->ninv, sz); - mpn_zero(ecm_inf->one, sz); + ecm_inf->a24 = flint_calloc(sz, sizeof(ulong)); + ecm_inf->ninv = flint_calloc(sz, sizeof(ulong)); + ecm_inf->one = flint_calloc(sz, sizeof(ulong)); ecm_inf->n_size = sz; } diff --git a/src/fmpz_factor/ecm_mul_montgomery_ladder.c b/src/fmpz_factor/ecm_mul_montgomery_ladder.c index 8d8684412b..8f074063c8 100644 --- a/src/fmpz_factor/ecm_mul_montgomery_ladder.c +++ b/src/fmpz_factor/ecm_mul_montgomery_ladder.c @@ -18,11 +18,11 @@ /* tstbit uses 0 based indexing */ void -fmpz_factor_ecm_mul_montgomery_ladder(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, - mp_limb_t k, mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_mul_montgomery_ladder(nn_ptr x, nn_ptr z, nn_ptr x0, nn_ptr z0, + ulong k, nn_ptr n, ecm_t ecm_inf) { - mp_ptr x1, z1, x2, z2; /* Q (x1 : z1), P (x2 : z2) */ - mp_limb_t len; + nn_ptr x1, z1, x2, z2; /* Q (x1 : z1), P (x2 : z2) */ + ulong len; TMP_INIT; @@ -41,10 +41,10 @@ fmpz_factor_ecm_mul_montgomery_ladder(mp_ptr x, mp_ptr z, mp_ptr x0, mp_ptr z0, } TMP_START; - x1 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - z1 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - x2 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - z2 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); + x1 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + z1 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + x2 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + z2 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); flint_mpn_copyi(x1, x0, ecm_inf->n_size); /* Q <- P0 */ diff --git a/src/fmpz_factor/ecm_select_curve.c b/src/fmpz_factor/ecm_select_curve.c index c5c6098f41..035d3b86a4 100644 --- a/src/fmpz_factor/ecm_select_curve.c +++ b/src/fmpz_factor/ecm_select_curve.c @@ -20,21 +20,22 @@ /* Also selects initial point Q0 [x0 :: z0] (z0 = 1) */ int -fmpz_factor_ecm_select_curve(mp_ptr f, mp_ptr sig, mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_select_curve(nn_ptr f, nn_ptr sig, nn_ptr n, ecm_t ecm_inf) { - mp_size_t sz, cy; - mp_size_t invlimbs, gcdlimbs; - mp_ptr temp, tempv, tempn, tempi, tempf; + slong sz, cy; + mp_size_t invlimbs; + slong gcdlimbs; + nn_ptr temp, tempv, tempn, tempi, tempf; int ret; TMP_INIT; TMP_START; - temp = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - tempv = TMP_ALLOC((ecm_inf->n_size) * sizeof(mp_limb_t)); - tempn = TMP_ALLOC((ecm_inf->n_size) * sizeof(mp_limb_t)); - tempi = TMP_ALLOC((ecm_inf->n_size + 1) * sizeof(mp_limb_t)); - tempf = TMP_ALLOC((ecm_inf->n_size + 1) * sizeof(mp_limb_t)); + temp = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + tempv = TMP_ALLOC((ecm_inf->n_size) * sizeof(ulong)); + tempn = TMP_ALLOC((ecm_inf->n_size) * sizeof(ulong)); + tempi = TMP_ALLOC((ecm_inf->n_size + 1) * sizeof(ulong)); + tempf = TMP_ALLOC((ecm_inf->n_size + 1) * sizeof(ulong)); mpn_zero(tempn, ecm_inf->n_size); mpn_zero(tempv, ecm_inf->n_size); @@ -110,10 +111,12 @@ fmpz_factor_ecm_select_curve(mp_ptr f, mp_ptr sig, mp_ptr n, ecm_t ecm_inf) flint_mpn_copyi(tempv, ecm_inf->v, sz); flint_mpn_copyi(tempn, n, ecm_inf->n_size); + /* NOTE: invlimbs must be mp_size_t since it is strictly different from + * slong on Windows systems. */ gcdlimbs = mpn_gcdext(tempf, tempi, &invlimbs, tempv, sz, tempn, ecm_inf->n_size); if (!(gcdlimbs == 1 && tempf[0] == ecm_inf->one[0]) && - !(gcdlimbs == (mp_size_t) ecm_inf->n_size && mpn_cmp(tempf, n, ecm_inf->n_size) == 0)) + !(gcdlimbs == (slong) ecm_inf->n_size && mpn_cmp(tempf, n, ecm_inf->n_size) == 0)) { /* Found factor */ flint_mpn_copyi(f, tempf, gcdlimbs); diff --git a/src/fmpz_factor/ecm_stage_I.c b/src/fmpz_factor/ecm_stage_I.c index b2975dac40..070df94ee4 100644 --- a/src/fmpz_factor/ecm_stage_I.c +++ b/src/fmpz_factor/ecm_stage_I.c @@ -16,11 +16,11 @@ /* Implementation of the stage I of ECM */ int -fmpz_factor_ecm_stage_I(mp_ptr f, const mp_limb_t *prime_array, mp_limb_t num, - mp_limb_t B1, mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_stage_I(nn_ptr f, const ulong *prime_array, ulong num, + ulong B1, nn_ptr n, ecm_t ecm_inf) { - mp_limb_t times; - mp_size_t sz, gcdlimbs; + ulong times; + slong sz, gcdlimbs; int j, p; ulong i; @@ -50,7 +50,7 @@ fmpz_factor_ecm_stage_I(mp_ptr f, const mp_limb_t *prime_array, mp_limb_t num, if neither is true, factor found */ if (!(gcdlimbs == 1 && f[0] == ecm_inf->one[0]) && - !(gcdlimbs == (mp_size_t) ecm_inf->n_size && mpn_cmp(f, n, ecm_inf->n_size) == 0)) + !(gcdlimbs == (slong) ecm_inf->n_size && mpn_cmp(f, n, ecm_inf->n_size) == 0)) { /* Found factor in stage I */ return gcdlimbs; diff --git a/src/fmpz_factor/ecm_stage_II.c b/src/fmpz_factor/ecm_stage_II.c index 66f5da70b3..2da3320b11 100644 --- a/src/fmpz_factor/ecm_stage_II.c +++ b/src/fmpz_factor/ecm_stage_II.c @@ -15,15 +15,15 @@ /* Implementation of the stage II of ECM */ int -fmpz_factor_ecm_stage_II(mp_ptr f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, - mp_ptr n, ecm_t ecm_inf) +fmpz_factor_ecm_stage_II(nn_ptr f, ulong B1, ulong B2, ulong P, + nn_ptr n, ecm_t ecm_inf) { - mp_ptr Qx, Qz, Rx, Rz, Qdx, Qdz, a, b, g; - mp_limb_t mmin, mmax, maxj, sz, gcdlimbs; + nn_ptr Qx, Qz, Rx, Rz, Qdx, Qdz, a, b, g; + ulong mmin, mmax, maxj, sz, gcdlimbs; ulong i, j; int ret; - mp_ptr arrx, arrz, Q0x2, Q0z2; + nn_ptr arrx, arrz, Q0x2, Q0z2; TMP_INIT; @@ -32,19 +32,19 @@ fmpz_factor_ecm_stage_II(mp_ptr f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, maxj = (P + 1)/2; TMP_START; - Qx = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Qz = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Rx = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Rz = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Qdx = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Qdz = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Q0x2 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - Q0z2 = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - a = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - b = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - g = TMP_ALLOC(ecm_inf->n_size * sizeof(mp_limb_t)); - arrx = flint_malloc(((maxj >> 1) + 1) * ecm_inf->n_size * sizeof(mp_limb_t)); - arrz = flint_malloc(((maxj >> 1) + 1) * ecm_inf->n_size * sizeof(mp_limb_t)); + Qx = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Qz = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Rx = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Rz = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Qdx = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Qdz = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Q0x2 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + Q0z2 = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + a = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + b = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + g = TMP_ALLOC(ecm_inf->n_size * sizeof(ulong)); + arrx = flint_malloc(((maxj >> 1) + 1) * ecm_inf->n_size * sizeof(ulong)); + arrz = flint_malloc(((maxj >> 1) + 1) * ecm_inf->n_size * sizeof(ulong)); mpn_zero(arrx, ((maxj >> 1) + 1) * ecm_inf->n_size); mpn_zero(arrz, ((maxj >> 1) + 1) * ecm_inf->n_size); diff --git a/src/fmpz_factor/extend_factor_ui.c b/src/fmpz_factor/extend_factor_ui.c index 291c720768..06bab4aa9d 100644 --- a/src/fmpz_factor/extend_factor_ui.c +++ b/src/fmpz_factor/extend_factor_ui.c @@ -15,7 +15,7 @@ #include "fmpz_factor.h" void -_fmpz_factor_extend_factor_ui(fmpz_factor_t factor, mp_limb_t n) +_fmpz_factor_extend_factor_ui(fmpz_factor_t factor, ulong n) { slong i, len; n_factor_t nfac; diff --git a/src/fmpz_factor/factor.c b/src/fmpz_factor/factor.c index eb7bcdd389..03ae5a2617 100644 --- a/src/fmpz_factor/factor.c +++ b/src/fmpz_factor/factor.c @@ -18,10 +18,10 @@ void fmpz_factor(fmpz_factor_t factor, const fmpz_t n) { ulong exp; - mp_limb_t p; + ulong p; mpz_ptr xsrc; - mp_ptr xd; - mp_size_t xsize; + nn_ptr xd; + slong xsize; slong found; slong trial_start, trial_stop; TMP_INIT; @@ -56,7 +56,7 @@ fmpz_factor(fmpz_factor_t factor, const fmpz_t n) /* Create a temporary copy to be mutated */ TMP_START; - xd = TMP_ALLOC(xsize * sizeof(mp_limb_t)); + xd = TMP_ALLOC(xsize * sizeof(ulong)); flint_mpn_copyi(xd, xsrc->_mp_d, xsize); /* Factor out powers of two */ diff --git a/src/fmpz_factor/factor_pp1.c b/src/fmpz_factor/factor_pp1.c index b58cc735d2..655e4d6c64 100644 --- a/src/fmpz_factor/factor_pp1.c +++ b/src/fmpz_factor/factor_pp1.c @@ -43,14 +43,14 @@ ulong pp1_primorial[9] = #define num_primorials 9 #endif -void pp1_set(mp_ptr x1, mp_ptr y1, - mp_srcptr x2, mp_srcptr y2, mp_size_t nn) +void pp1_set(nn_ptr x1, nn_ptr y1, + nn_srcptr x2, nn_srcptr y2, slong nn) { flint_mpn_copyi(x1, x2, nn); flint_mpn_copyi(y1, y2, nn); } -void pp1_set_ui(mp_ptr x, mp_size_t nn, ulong norm, ulong c) +void pp1_set_ui(nn_ptr x, slong nn, ulong norm, ulong c) { mpn_zero(x, nn); x[0] = (c << norm); @@ -58,8 +58,8 @@ void pp1_set_ui(mp_ptr x, mp_size_t nn, ulong norm, ulong c) x[1] = (c >> (FLINT_BITS - norm)); } -void pp1_2k(mp_ptr x, mp_ptr y, mp_size_t nn, mp_srcptr n, - mp_srcptr ninv, mp_srcptr x0, ulong norm) +void pp1_2k(nn_ptr x, nn_ptr y, slong nn, nn_srcptr n, + nn_srcptr ninv, nn_srcptr x0, ulong norm) { pp1_mulmod(y, y, x, nn, n, ninv, norm); if (mpn_sub_n(y, y, x0, nn)) @@ -70,8 +70,8 @@ void pp1_2k(mp_ptr x, mp_ptr y, mp_size_t nn, mp_srcptr n, mpn_add_n(x, x, n, nn); } -void pp1_2kp1(mp_ptr x, mp_ptr y, mp_size_t nn, mp_srcptr n, - mp_srcptr ninv, mp_srcptr x0, ulong norm) +void pp1_2kp1(nn_ptr x, nn_ptr y, slong nn, nn_srcptr n, + nn_srcptr ninv, nn_srcptr x0, ulong norm) { pp1_mulmod(x, x, y, nn, n, ninv, norm); if (mpn_sub_n(x, x, x0, nn)) @@ -82,15 +82,15 @@ void pp1_2kp1(mp_ptr x, mp_ptr y, mp_size_t nn, mp_srcptr n, mpn_add_n(y, y, n, nn); } -void pp1_pow_ui(mp_ptr x, mp_ptr y, mp_size_t nn, - ulong exp, mp_srcptr n, mp_srcptr ninv, ulong norm) +void pp1_pow_ui(nn_ptr x, nn_ptr y, slong nn, + ulong exp, nn_srcptr n, nn_srcptr ninv, ulong norm) { - mp_limb_t t[30]; - mp_ptr x0 = t; + ulong t[30]; + nn_ptr x0 = t; ulong bit = ((UWORD(1) << FLINT_BIT_COUNT(exp)) >> 2); if (nn > 30) - x0 = flint_malloc(nn*sizeof(mp_limb_t)); + x0 = flint_malloc(nn*sizeof(ulong)); flint_mpn_copyi(x0, x, nn); pp1_mulmod(y, x, x, nn, n, ninv, norm); @@ -111,13 +111,13 @@ void pp1_pow_ui(mp_ptr x, mp_ptr y, mp_size_t nn, flint_free(x0); } -mp_size_t pp1_factor(mp_ptr factor, mp_srcptr n, - mp_srcptr x, mp_size_t nn, ulong norm) +slong pp1_factor(nn_ptr factor, nn_srcptr n, + nn_srcptr x, slong nn, ulong norm) { - mp_size_t ret = 0, xn = nn; + slong ret = 0, xn = nn; - mp_ptr n2 = flint_malloc(nn*sizeof(mp_limb_t)); - mp_ptr x2 = flint_malloc(nn*sizeof(mp_limb_t)); + nn_ptr n2 = flint_malloc(nn*sizeof(ulong)); + nn_ptr x2 = flint_malloc(nn*sizeof(ulong)); if (norm) mpn_rshift(n2, n, nn, norm); @@ -147,10 +147,10 @@ mp_size_t pp1_factor(mp_ptr factor, mp_srcptr n, return ret; } -mp_size_t pp1_find_power(mp_ptr factor, mp_ptr x, mp_ptr y, mp_size_t nn, - ulong p, mp_srcptr n, mp_srcptr ninv, ulong norm) +slong pp1_find_power(nn_ptr factor, nn_ptr x, nn_ptr y, slong nn, + ulong p, nn_srcptr n, nn_srcptr ninv, ulong norm) { - mp_size_t ret; + slong ret; do { @@ -165,8 +165,8 @@ int fmpz_factor_pp1(fmpz_t fac, const fmpz_t n_in, ulong B1, ulong B2sqrt, ulong { slong i, j; int ret = 0; - mp_size_t nn = fmpz_size(n_in), r; - mp_ptr x, y, oldx, oldy, n, ninv, factor, ptr_0, ptr_1, ptr_2, ptr_k; + slong nn = fmpz_size(n_in), r; + nn_ptr x, y, oldx, oldy, n, ninv, factor, ptr_0, ptr_1, ptr_2, ptr_k; ulong pr, oldpr, sqrt, bits0, norm; n_primes_t iter; @@ -185,13 +185,13 @@ int fmpz_factor_pp1(fmpz_t fac, const fmpz_t n_in, ulong B1, ulong B2sqrt, ulong sqrt = n_sqrt(B1); bits0 = FLINT_BIT_COUNT(B1); - x = flint_malloc(nn*sizeof(mp_limb_t)); - y = flint_malloc(nn*sizeof(mp_limb_t)); - oldx = flint_malloc(nn*sizeof(mp_limb_t)); - oldy = flint_malloc(nn*sizeof(mp_limb_t)); - n = flint_malloc(nn*sizeof(mp_limb_t)); - ninv = flint_malloc(nn*sizeof(mp_limb_t)); - factor = flint_malloc(nn*sizeof(mp_limb_t)); + x = flint_malloc(nn*sizeof(ulong)); + y = flint_malloc(nn*sizeof(ulong)); + oldx = flint_malloc(nn*sizeof(ulong)); + oldy = flint_malloc(nn*sizeof(ulong)); + n = flint_malloc(nn*sizeof(ulong)); + ninv = flint_malloc(nn*sizeof(ulong)); + factor = flint_malloc(nn*sizeof(ulong)); if (nn == 1) { @@ -200,7 +200,7 @@ int fmpz_factor_pp1(fmpz_t fac, const fmpz_t n_in, ulong B1, ulong B2sqrt, ulong n[0] <<= norm; } else { - mp_ptr np = COEFF_TO_PTR(*n_in)->_mp_d; + nn_ptr np = COEFF_TO_PTR(*n_in)->_mp_d; norm = flint_clz(np[nn - 1]); if (norm) mpn_lshift(n, np, nn, norm); @@ -278,7 +278,7 @@ int fmpz_factor_pp1(fmpz_t fac, const fmpz_t n_in, ulong B1, ulong B2sqrt, ulong int num; char * sieve = flint_malloc(32768); slong * sieve_index = flint_malloc(32768*sizeof(slong)); - mp_ptr diff = flint_malloc(16384*nn*sizeof(mp_limb_t)); + nn_ptr diff = flint_malloc(16384*nn*sizeof(ulong)); ulong offset[15], num_roots; ulong s; slong k, index = 0; @@ -469,7 +469,7 @@ int fmpz_factor_pp1(fmpz_t fac, const fmpz_t n_in, ulong B1, ulong B2sqrt, ulong for (i = 0; (ulong) i < num_roots; i++) { - mp_size_t sn; + slong sn; mpz_ptr m1 = COEFF_TO_PTR(roots[i]); mpz_ptr m2 = COEFF_TO_PTR(roots2[i]); diff --git a/src/fmpz_factor/factor_smooth.c b/src/fmpz_factor/factor_smooth.c index f116f51d12..c62a4d045b 100644 --- a/src/fmpz_factor/factor_smooth.c +++ b/src/fmpz_factor/factor_smooth.c @@ -66,15 +66,15 @@ int fmpz_factor_smooth(fmpz_factor_t factor, const fmpz_t n, slong bits, int proved) { ulong exp; - mp_limb_t p; + ulong p; mpz_ptr xsrc; - mp_ptr xd; - mp_size_t xsize; + nn_ptr xd; + slong xsize; slong found; slong trial_stop; slong * idx; slong i, b, bits2, istride; - const mp_limb_t * primes; + const ulong * primes; int ret = 0; TMP_INIT; @@ -109,7 +109,7 @@ int fmpz_factor_smooth(fmpz_factor_t factor, const fmpz_t n, /* Create a temporary copy to be mutated */ TMP_START; - xd = TMP_ALLOC(xsize * sizeof(mp_limb_t)); + xd = TMP_ALLOC(xsize * sizeof(ulong)); flint_mpn_copyi(xd, xsrc->_mp_d, xsize); /* Factor out powers of two */ diff --git a/src/fmpz_factor/factor_trial.c b/src/fmpz_factor/factor_trial.c index c99062832c..7204bab087 100644 --- a/src/fmpz_factor/factor_trial.c +++ b/src/fmpz_factor/factor_trial.c @@ -19,15 +19,15 @@ int fmpz_factor_trial(fmpz_factor_t factor, const fmpz_t n, slong num_primes) { ulong exp; - mp_limb_t p; + ulong p; mpz_t x; - mp_ptr xd; - mp_size_t xsize; + nn_ptr xd; + slong xsize; slong found; int ret = 1; slong * idx; slong bits, i; - const mp_limb_t * primes; + const ulong * primes; if (num_primes > 3512 || num_primes < 0) { diff --git a/src/fmpz_factor/factor_trial_range.c b/src/fmpz_factor/factor_trial_range.c index db5ed9d767..1c6b235157 100644 --- a/src/fmpz_factor/factor_trial_range.c +++ b/src/fmpz_factor/factor_trial_range.c @@ -18,10 +18,10 @@ int fmpz_factor_trial_range(fmpz_factor_t factor, const fmpz_t n, ulong start, ulong num_primes) { ulong exp; - mp_limb_t p; + ulong p; mpz_t x; - mp_ptr xd; - mp_size_t xsize; + nn_ptr xd; + slong xsize; ulong found; ulong trial_start, trial_stop; int ret = 1; diff --git a/src/fmpz_factor/fit_length.c b/src/fmpz_factor/fit_length.c index 8445d8e8a0..3f9fa7a88e 100644 --- a/src/fmpz_factor/fit_length.c +++ b/src/fmpz_factor/fit_length.c @@ -26,8 +26,8 @@ _fmpz_factor_fit_length(fmpz_factor_t factor, slong len) if (len > factor->alloc) { - flint_mpn_zero((mp_ptr)(factor->p + factor->alloc), len-factor->alloc); - flint_mpn_zero((mp_ptr)(factor->exp + factor->alloc), len-factor->alloc); + flint_mpn_zero((nn_ptr)(factor->p + factor->alloc), len-factor->alloc); + flint_mpn_zero((nn_ptr)(factor->exp + factor->alloc), len-factor->alloc); } factor->alloc = len; diff --git a/src/fmpz_factor/pollard_brent.c b/src/fmpz_factor/pollard_brent.c index 11cf7d2bff..ff0f34d986 100644 --- a/src/fmpz_factor/pollard_brent.c +++ b/src/fmpz_factor/pollard_brent.c @@ -21,11 +21,11 @@ int fmpz_factor_pollard_brent(fmpz_t p_factor, flint_rand_t state, fmpz_t n_in, - mp_limb_t max_tries, mp_limb_t max_iters) + ulong max_tries, ulong max_iters) { fmpz_t fa, fy, maxa, maxy; - mp_ptr a, y, n, ninv, temp; - mp_limb_t n_size, normbits, ans, val, size, cy; + nn_ptr a, y, n, ninv, temp; + ulong n_size, normbits, ans, val, size, cy; mpz_ptr fac, mptr; int ret; @@ -55,10 +55,10 @@ fmpz_factor_pollard_brent(fmpz_t p_factor, flint_rand_t state, fmpz_t n_in, fmpz_sub_ui(maxy, n_in, 1); /* 1 <= y <= n - 1 */ TMP_START; - a = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - y = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - n = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - ninv = TMP_ALLOC(n_size * sizeof(mp_limb_t)); + a = TMP_ALLOC(n_size * sizeof(ulong)); + y = TMP_ALLOC(n_size * sizeof(ulong)); + n = TMP_ALLOC(n_size * sizeof(ulong)); + ninv = TMP_ALLOC(n_size * sizeof(ulong)); /* copying n_in onto n, and normalizing */ diff --git a/src/fmpz_factor/pollard_brent_single.c b/src/fmpz_factor/pollard_brent_single.c index 59360f2a95..9fb1d81f87 100644 --- a/src/fmpz_factor/pollard_brent_single.c +++ b/src/fmpz_factor/pollard_brent_single.c @@ -21,10 +21,10 @@ /* Sets y to (y^2 + a) % n */ void -flint_mpn_sqr_and_add_a(mp_ptr y, mp_ptr a, mp_ptr n, mp_limb_t n_size, mp_ptr ninv, - mp_limb_t normbits) +flint_mpn_sqr_and_add_a(nn_ptr y, nn_ptr a, nn_ptr n, ulong n_size, nn_ptr ninv, + ulong normbits) { - mp_limb_t cy; + ulong cy; flint_mpn_mulmod_preinvn(y, y, y, n_size, n, ninv, normbits); /* y^2 mod n */ cy = mpn_add_n(y, y, a, n_size); @@ -40,24 +40,24 @@ flint_mpn_sqr_and_add_a(mp_ptr y, mp_ptr a, mp_ptr n, mp_limb_t n_size, mp_ptr n } int -flint_mpn_factor_pollard_brent_single(mp_ptr factor, mp_ptr n, mp_ptr ninv, mp_ptr a, mp_ptr y, - mp_limb_t n_size, mp_limb_t normbits, mp_limb_t max_iters) +flint_mpn_factor_pollard_brent_single(nn_ptr factor, nn_ptr n, nn_ptr ninv, nn_ptr a, nn_ptr y, + ulong n_size, ulong normbits, ulong max_iters) { /* n_size >= 2, one limb fmpz_t's are passed on to n_factor_pollard_brent in outer function */ - mp_ptr x, q, ys, subval; - mp_limb_t iter, i, k, minval, m, one_shift_norm, gcdlimbs; - mp_limb_t j; + nn_ptr x, q, ys, subval; + ulong iter, i, k, minval, m, one_shift_norm, gcdlimbs; + ulong j; int ret; TMP_INIT; TMP_START; - x = TMP_ALLOC(n_size * sizeof(mp_limb_t)); /* initial value to evaluate f(x) */ - q = TMP_ALLOC(n_size * sizeof(mp_limb_t)); /* product of gcd's */ - ys = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - subval = TMP_ALLOC(n_size * sizeof(mp_limb_t)); + x = TMP_ALLOC(n_size * sizeof(ulong)); /* initial value to evaluate f(x) */ + q = TMP_ALLOC(n_size * sizeof(ulong)); /* product of gcd's */ + ys = TMP_ALLOC(n_size * sizeof(ulong)); + subval = TMP_ALLOC(n_size * sizeof(ulong)); /* one shifted by normbits, used for comparisons */ one_shift_norm = UWORD(1) << normbits; @@ -166,11 +166,11 @@ flint_mpn_factor_pollard_brent_single(mp_ptr factor, mp_ptr n, mp_ptr ninv, mp_p int fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, - fmpz_t ai, mp_limb_t max_iters) + fmpz_t ai, ulong max_iters) { - mp_ptr a, y, n, ninv, temp; - mp_limb_t n_size, normbits, ans, size, cy; - mp_limb_t al, yl, val, valinv; + nn_ptr a, y, n, ninv, temp; + ulong n_size, normbits, ans, size, cy; + ulong al, yl, val, valinv; mpz_ptr fac, mptr; int ret; @@ -207,10 +207,10 @@ fmpz_factor_pollard_brent_single(fmpz_t p_factor, fmpz_t n_in, fmpz_t yi, normbits = flint_clz(temp[n_size - 1]); TMP_START; - a = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - y = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - n = TMP_ALLOC(n_size * sizeof(mp_limb_t)); - ninv = TMP_ALLOC(n_size * sizeof(mp_limb_t)); + a = TMP_ALLOC(n_size * sizeof(ulong)); + y = TMP_ALLOC(n_size * sizeof(ulong)); + n = TMP_ALLOC(n_size * sizeof(ulong)); + ninv = TMP_ALLOC(n_size * sizeof(ulong)); /* copying n_in onto n, and normalizing */ diff --git a/src/fmpz_factor/test/main.c b/src/fmpz_factor/test/main.c index 4fb18f59b3..33bc8454ca 100644 --- a/src/fmpz_factor/test/main.c +++ b/src/fmpz_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-ecm.c" diff --git a/src/fmpz_factor/test/t-pollard_brent_single.c b/src/fmpz_factor/test/t-pollard_brent_single.c index 64b1bb59db..ab250364a9 100644 --- a/src/fmpz_factor/test/t-pollard_brent_single.c +++ b/src/fmpz_factor/test/t-pollard_brent_single.c @@ -35,7 +35,7 @@ TEST_FUNCTION_START(fmpz_factor_pollard_brent_single, state) for (i = 5; i < 36 && i <= FLINT_BITS; i += 5) { - mp_limb_t maxiter = UWORD(1) << FLINT_MIN(i, FLINT_BITS - 1); + ulong maxiter = UWORD(1) << FLINT_MIN(i, FLINT_BITS - 1); for (j = 0; j < 10 * flint_test_multiplier(); j++) { diff --git a/src/fmpz_lll/test/main.c b/src/fmpz_lll/test/main.c index 53c09ec59e..dca503adcc 100644 --- a/src/fmpz_lll/test/main.c +++ b/src/fmpz_lll/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-heuristic_dot.c" diff --git a/src/fmpz_mat.h b/src/fmpz_mat.h index e5e5762308..f4bb206821 100644 --- a/src/fmpz_mat.h +++ b/src/fmpz_mat.h @@ -383,18 +383,18 @@ int fmpz_mat_solve_fflu(fmpz_mat_t X, fmpz_t den, int fmpz_mat_solve_fflu_precomp(fmpz_mat_t X, const slong * perm, const fmpz_mat_t FFLU, const fmpz_mat_t B); -mp_limb_t +ulong fmpz_mat_find_good_prime_and_invert(nmod_mat_t Ainv, const fmpz_mat_t A, const fmpz_t det_bound); -mp_limb_t * +ulong * fmpz_mat_dixon_get_crt_primes(slong * num_primes, - const fmpz_mat_t A, mp_limb_t p); + const fmpz_mat_t A, ulong p); void _fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, const fmpz_mat_t A, const fmpz_mat_t B, - const nmod_mat_t Ainv, mp_limb_t p, + const nmod_mat_t Ainv, ulong p, const fmpz_t N, const fmpz_t D); int fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, @@ -403,7 +403,7 @@ int fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, void _fmpz_mat_solve_dixon_den(fmpz_mat_t X, fmpz_t den, const fmpz_mat_t A, const fmpz_mat_t B, - const nmod_mat_t Ainv, mp_limb_t p, + const nmod_mat_t Ainv, ulong p, const fmpz_t N, const fmpz_t D); int diff --git a/src/fmpz_mat/CRT_ui.c b/src/fmpz_mat/CRT_ui.c index ab51527839..0f534e4ccf 100644 --- a/src/fmpz_mat/CRT_ui.c +++ b/src/fmpz_mat/CRT_ui.c @@ -19,9 +19,9 @@ fmpz_mat_CRT_ui(fmpz_mat_t res, const fmpz_mat_t mat1, const fmpz_t m1, const nmod_mat_t mat2, int sign) { slong i, j; - mp_limb_t c; - mp_limb_t m2 = mat2->mod.n; - mp_limb_t m2inv = mat2->mod.ninv; + ulong c; + ulong m2 = mat2->mod.n; + ulong m2inv = mat2->mod.ninv; fmpz_t m1m2; c = fmpz_fdiv_ui(m1, m2); diff --git a/src/fmpz_mat/charpoly.c b/src/fmpz_mat/charpoly.c index aa3656f862..9a9994cb3c 100644 --- a/src/fmpz_mat/charpoly.c +++ b/src/fmpz_mat/charpoly.c @@ -134,7 +134,7 @@ void _fmpz_mat_charpoly_modular(fmpz * rop, const fmpz_mat_t op) slong bound; slong pbits = FLINT_BITS - 1; - mp_limb_t p = (UWORD(1) << pbits); + ulong p = (UWORD(1) << pbits); fmpz_t m; diff --git a/src/fmpz_mat/det_modular_given_divisor.c b/src/fmpz_mat/det_modular_given_divisor.c index 9ae03e07d2..c272cbbde7 100644 --- a/src/fmpz_mat/det_modular_given_divisor.c +++ b/src/fmpz_mat/det_modular_given_divisor.c @@ -18,10 +18,10 @@ #define DEBUG_USE_SMALL_PRIMES 0 -static mp_limb_t -next_good_prime(const fmpz_t d, mp_limb_t p) +static ulong +next_good_prime(const fmpz_t d, ulong p) { - mp_limb_t r = 0; + ulong r = 0; while (r == 0) { @@ -38,7 +38,7 @@ fmpz_mat_det_modular_given_divisor(fmpz_t det, const fmpz_mat_t A, const fmpz_t d, int proved) { fmpz_t bound, prod, stable_prod, x, xnew; - mp_limb_t p, xmod; + ulong p, xmod; nmod_mat_t Amod; slong n = A->r; diff --git a/src/fmpz_mat/hadamard.c b/src/fmpz_mat/hadamard.c index 60fe0a3dd2..6b0101d6f1 100644 --- a/src/fmpz_mat/hadamard.c +++ b/src/fmpz_mat/hadamard.c @@ -45,7 +45,7 @@ _fq_nmod_unrank(fq_nmod_t x, ulong r, const fq_nmod_ctx_t ctx) } static int -n_is_prime_power(mp_limb_t * p, mp_limb_t n) +n_is_prime_power(ulong * p, ulong n) { n_factor_t fac; @@ -128,7 +128,7 @@ fmpz_mat_jacobsthal(fmpz_mat_t Q) /* 2 -- n = 2^v * 2*(p^e + 1) */ /* 3 -- n = 2^v */ static int -paley_construction(mp_limb_t * q, mp_limb_t n) +paley_construction(ulong * q, ulong n) { int i, v; @@ -169,7 +169,7 @@ fmpz_mat_set2x2(fmpz_mat_t A, slong i, slong j, int fmpz_mat_hadamard(fmpz_mat_t A) { - mp_limb_t m, n, q; + ulong m, n, q; int kind; n = fmpz_mat_nrows(A); diff --git a/src/fmpz_mat/hnf_modular_eldiv.c b/src/fmpz_mat/hnf_modular_eldiv.c index 952758bc13..819129736b 100644 --- a/src/fmpz_mat/hnf_modular_eldiv.c +++ b/src/fmpz_mat/hnf_modular_eldiv.c @@ -17,7 +17,7 @@ void fmpz_mat_hnf_modular_eldiv(fmpz_mat_t A, const fmpz_t D) { slong i; - mp_limb_t Dlimbt; + ulong Dlimbt; nmod_mat_t AmodD; if (fmpz_mat_is_empty(A)) diff --git a/src/fmpz_mat/hnf_pernet_stein.c b/src/fmpz_mat/hnf_pernet_stein.c index 6358851275..88e1c27ea5 100644 --- a/src/fmpz_mat/hnf_pernet_stein.c +++ b/src/fmpz_mat/hnf_pernet_stein.c @@ -262,7 +262,7 @@ double_det(fmpz_t d1, fmpz_t d2, const fmpz_mat_t B, const fmpz_mat_t c, { slong i, j, n; slong *P; - mp_limb_t p, u1mod, u2mod, v1mod, v2mod; + ulong p, u1mod, u2mod, v1mod, v2mod; fmpz_t bound, prod, s1, s2, t, u1, u2, v1, v2; fmpz_mat_t dt, Bt; fmpq_t tmpq; diff --git a/src/fmpz_mat/minpoly_modular.c b/src/fmpz_mat/minpoly_modular.c index 43c1a31c91..ce11eabc4e 100644 --- a/src/fmpz_mat/minpoly_modular.c +++ b/src/fmpz_mat/minpoly_modular.c @@ -134,7 +134,7 @@ slong _fmpz_mat_minpoly_modular(fmpz * rop, const fmpz_mat_t op) double b1, b2, b3, bb; slong pbits = FLINT_BITS - 1, i, j; - mp_limb_t p = (UWORD(1) << pbits); + ulong p = (UWORD(1) << pbits); ulong * P, * Q; fmpz_mat_t v1, v2, v3; diff --git a/src/fmpz_mat/mul.c b/src/fmpz_mat/mul.c index 3e08bbcfbf..fb59a2e8b1 100644 --- a/src/fmpz_mat/mul.c +++ b/src/fmpz_mat/mul.c @@ -51,7 +51,7 @@ void _fmpz_mat_mul_small_2a(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B { for (j = 0; j < bc; j++) { - mp_limb_t hi, lo, shi, slo; + ulong hi, lo, shi, slo; slong x, y; shi = slo = 0; @@ -83,7 +83,7 @@ void _fmpz_mat_mul_small_2b(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B { for (j = 0; j < bc; j++) { - mp_limb_t hi, lo, shi, smid, slo; + ulong hi, lo, shi, smid, slo; slong x, y; shi = smid = slo = 0; diff --git a/src/fmpz_mat/mul_blas.c b/src/fmpz_mat/mul_blas.c index a6010a2c5c..853ab28e27 100644 --- a/src/fmpz_mat/mul_blas.c +++ b/src/fmpz_mat/mul_blas.c @@ -161,9 +161,9 @@ static void _lift_vec(double * a, const uint32_t * b, slong len, uint32_t n) a[i] = (int32_t)(b[i] - (n & (-(uint32_t)((int32_t)(n/2 - b[i]) < 0)))); } -static uint32_t _reduce_uint32(mp_limb_t a, nmod_t mod) +static uint32_t _reduce_uint32(ulong a, nmod_t mod) { - mp_limb_t r; + ulong r; NMOD_RED(r, a, mod); return (uint32_t)r; } @@ -202,7 +202,7 @@ static void fmpz_multi_mod_uint32_stride( for ( ; i < j; i++) { /* mid level split: depends on FMPZ_MOD_UI_CUTOFF */ - mp_limb_t t = fmpz_get_nmod(A + k, lu[i].mod); + ulong t = fmpz_get_nmod(A + k, lu[i].mod); /* low level split: 1, 2, or 3 small primes */ if (lu[i].mod2.n != 0) @@ -235,7 +235,7 @@ static void fmpz_multi_mod_uint32_stride( /* workers */ typedef struct { - mp_limb_t prime; + ulong prime; slong l; slong num_primes; slong m; @@ -340,9 +340,9 @@ void _fromd_worker(void * arg_ptr) { for (j = 0; j < n; j++) { - mp_limb_t r; + ulong r; slong a = (slong) dC[i*n + j]; - mp_limb_t b = (a < 0) ? a + shift : a; + ulong b = (a < 0) ? a + shift : a; NMOD_RED(r, b, mod); bigC[n*(num_primes*i + l) + j] = r; } @@ -361,11 +361,11 @@ void _crt_worker(void * arg_ptr) fmpz ** Crows = arg->Crows; const fmpz_comb_struct * comb = arg->comb; fmpz_comb_temp_t comb_temp; - mp_limb_t * r; + ulong * r; int sign = arg->sign; fmpz_comb_temp_init(comb_temp, comb); - r = FLINT_ARRAY_ALLOC(num_primes, mp_limb_t); + r = FLINT_ARRAY_ALLOC(num_primes, ulong); for (i = Cstartrow; i < Cstoprow; i++) { @@ -382,14 +382,14 @@ void _crt_worker(void * arg_ptr) fmpz_comb_temp_clear(comb_temp); } -static mp_limb_t * _calculate_primes( +static ulong * _calculate_primes( slong * num_primes_, flint_bitcnt_t bits, slong k) { slong num_primes, primes_alloc; - mp_limb_t * primes; - mp_limb_t p; + ulong * primes; + ulong p; fmpz_t prod; p = 2 + 2*n_sqrt((MAX_BLAS_DP_INT - 1)/(ulong)k); @@ -400,7 +400,7 @@ static mp_limb_t * _calculate_primes( } primes_alloc = 1 + bits/FLINT_BIT_COUNT(p); - primes = FLINT_ARRAY_ALLOC(primes_alloc, mp_limb_t); + primes = FLINT_ARRAY_ALLOC(primes_alloc, ulong); num_primes = 0; fmpz_init_set_ui(prod, 1); @@ -420,7 +420,7 @@ static mp_limb_t * _calculate_primes( if (num_primes + 1 > primes_alloc) { primes_alloc = FLINT_MAX(num_primes + 1, primes_alloc*5/4); - primes = FLINT_ARRAY_REALLOC(primes, primes_alloc, mp_limb_t); + primes = FLINT_ARRAY_REALLOC(primes, primes_alloc, ulong); } primes[num_primes] = p; @@ -457,7 +457,7 @@ int _fmpz_mat_mul_blas( slong n = B->c; uint32_t * bigC, * bigA, * bigB; double * dC, * dA, * dB; - mp_limb_t * primes; + ulong * primes; slong num_primes; fmpz_comb_t comb; thread_pool_handle * handles; diff --git a/src/fmpz_mat/mul_double_word.c b/src/fmpz_mat/mul_double_word.c index 5196407bbb..9b92136875 100644 --- a/src/fmpz_mat/mul_double_word.c +++ b/src/fmpz_mat/mul_double_word.c @@ -26,15 +26,15 @@ /* 2x2 -> 4 signed addmul */ static void _do_row_22_4_signed_branchy( fmpz * CR, - const mp_limb_t * AR, - const mp_limb_t * B, + const ulong * AR, + const ulong * B, slong br, slong bc) { slong j, k, l; - mp_limb_t s[4], t3, t2, t1, t0, w3, w2, w1, w0; - mp_limb_t A0, A1, B0, B1; - mp_limb_t u2, u1, u0; + ulong s[4], t3, t2, t1, t0, w3, w2, w1, w0; + ulong A0, A1, B0, B1; + ulong u2, u1, u0; for (j = 0, l = 0; j < bc; j++) { @@ -83,15 +83,15 @@ static void _do_row_22_4_signed_branchy( /* 2x2 -> 4 signed addmul */ static void _do_row_22_4_signed( fmpz * CR, - const mp_limb_t * AR, - const mp_limb_t * B, + const ulong * AR, + const ulong * B, slong br, slong bc) { slong j, k, l; - mp_limb_t s[4], t3, t2, t1, t0, w3, w2, w1, w0; - mp_limb_t A0, A1, B0, B1; - mp_limb_t v3, v2, u2, u1, u0; + ulong s[4], t3, t2, t1, t0, w3, w2, w1, w0; + ulong A0, A1, B0, B1; + ulong v3, v2, u2, u1, u0; for (j = 0, l = 0; j < bc; j++) { @@ -140,18 +140,18 @@ static void _do_row_22_4_signed( /* 2x2 -> 5 signed addmul */ static void _do_row_22_5_signed( fmpz * CR, - const mp_limb_t * AR, - const mp_limb_t * B, + const ulong * AR, + const ulong * B, slong br, slong bc) { slong j, k, l; - mp_limb_t s[5]; - mp_limb_t A0, A1, B0, B1; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t p3, p2, p1, p0; - mp_limb_t u3, u2; - mp_limb_t v2; + ulong s[5]; + ulong A0, A1, B0, B1; + ulong s4, s3, s2, s1, s0; + ulong p3, p2, p1, p0; + ulong u3, u2; + ulong v2; for (j = 0, l = 0; j < bc; j++) { @@ -199,18 +199,18 @@ static void _do_row_22_5_signed( /* 2x2 -> 4 unsigned addmul */ static void _do_row_22_4_unsigned( fmpz * CR, - const mp_limb_t * AR, - const mp_limb_t * B, + const ulong * AR, + const ulong * B, slong br, slong bc) { slong j, k, l; - mp_limb_t s[4]; - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s3, s2, s1, s0; - mp_limb_t u3, u2, u1; - mp_limb_t v3, v2; + ulong s[4]; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s3, s2, s1, s0; + ulong u3, u2, u1; + ulong v3, v2; for (j = 0, l = 0; j < bc; j++) { @@ -251,18 +251,18 @@ static void _do_row_22_4_unsigned( /* 2x2 -> 5 unsigned addmul */ static void _do_row_22_5_unsigned( fmpz * CR, - const mp_limb_t * AR, - const mp_limb_t * B, + const ulong * AR, + const ulong * B, slong br, slong bc) { slong j, k, l; - mp_limb_t s[5]; - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v3, v2; + ulong s[5]; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s4, s3, s2, s1, s0; + ulong u2, u1; + ulong v3, v2; for (j = 0, l = 0; j < bc; j++) { @@ -316,7 +316,7 @@ typedef struct { fmpz ** Crows; fmpz ** Arows; fmpz ** Brows; - mp_limb_t * BL; + ulong * BL; int sign; int words; } _worker_arg; @@ -328,7 +328,7 @@ static void _red_worker(void * varg) slong Bstopcol = arg->Bstopcol; slong br = arg->br; fmpz ** Brows = arg->Brows; - mp_limb_t * BL = arg->BL; + ulong * BL = arg->BL; int sign = arg->sign; slong i, j; @@ -358,10 +358,10 @@ static void _mul_worker(void * varg) slong bc = arg->bc; fmpz ** Crows = arg->Crows; fmpz ** Arows = arg->Arows; - mp_limb_t * BL = arg->BL; + ulong * BL = arg->BL; int sign = arg->sign; int words = arg->words; - mp_limb_t * AL; + ulong * AL; slong i, j; TMP_INIT; @@ -370,7 +370,7 @@ static void _mul_worker(void * varg) TMP_START; - AL = TMP_ARRAY_ALLOC(2*ac, mp_limb_t); + AL = TMP_ARRAY_ALLOC(2*ac, ulong); if (sign) { @@ -453,7 +453,7 @@ void _fmpz_mat_mul_double_word_internal( mainarg.Crows = C->rows; mainarg.Arows = A->rows; mainarg.Brows = B->rows; - mainarg.BL = TMP_ARRAY_ALLOC(br*bc*2, mp_limb_t); + mainarg.BL = TMP_ARRAY_ALLOC(br*bc*2, ulong); mainarg.sign = sign; if (bits + sign <= 4*FLINT_BITS) diff --git a/src/fmpz_mat/mul_fft.c b/src/fmpz_mat/mul_fft.c index 8a3137dd65..aeec15e216 100644 --- a/src/fmpz_mat/mul_fft.c +++ b/src/fmpz_mat/mul_fft.c @@ -25,23 +25,23 @@ The behaviour of this function does NOT depend on the initial value of z. */ -static mp_limb_t fft_combine_bits_signed( - mp_limb_t * z, - mp_limb_t ** a, mp_size_t alen, +static ulong fft_combine_bits_signed( + ulong * z, + ulong ** a, slong alen, flint_bitcnt_t bits, - mp_size_t limbs, - mp_size_t zn) + slong limbs, + slong zn) { - mp_size_t i, zout; - mp_limb_t * t; - mp_limb_t f; + slong i, zout; + ulong * t; + ulong f; TMP_INIT; FLINT_ASSERT(bits > 1); TMP_START; - t = TMP_ARRAY_ALLOC((limbs + 1), mp_limb_t); + t = TMP_ARRAY_ALLOC((limbs + 1), ulong); f = 0; zout = 0; @@ -51,8 +51,8 @@ static mp_limb_t fft_combine_bits_signed( /* add the i^th coeffs a[i] */ slong q = (bits*i)/FLINT_BITS; slong r = (bits*i)%FLINT_BITS; - mp_limb_t s; - mp_limb_t halflimb = UWORD(1) << (FLINT_BITS - 1); + ulong s; + ulong halflimb = UWORD(1) << (FLINT_BITS - 1); if (a[i][limbs] | (a[i][limbs - 1] > halflimb)) { @@ -108,25 +108,25 @@ static mp_limb_t fft_combine_bits_signed( Split into coefficients from |x| evaluated at 2^bits, and do a negmod on each coefficient for x < 0. */ -static mp_size_t fft_split_bits_fmpz( - mp_limb_t ** poly, +static slong fft_split_bits_fmpz( + ulong ** poly, const fmpz_t x, flint_bitcnt_t bits, - mp_size_t limbs) + slong limbs) { - mp_size_t len; + slong len; int x_is_neg = 0; if (COEFF_IS_MPZ(*x)) { - mp_size_t s = COEFF_TO_PTR(*x)->_mp_size; + slong s = COEFF_TO_PTR(*x)->_mp_size; x_is_neg = s < 0; len = fft_split_bits(poly, COEFF_TO_PTR(*x)->_mp_d, x_is_neg ? -s : s, bits, limbs); } else if (!fmpz_is_zero(x)) { - mp_limb_t ux; + ulong ux; x_is_neg = *x < 0; ux = x_is_neg ? -*x : *x; len = fft_split_bits(poly, &ux, 1, bits, limbs); @@ -138,7 +138,7 @@ static mp_size_t fft_split_bits_fmpz( if (x_is_neg) { - mp_size_t i; + slong i; for (i = 0; i < len; i++) mpn_negmod_2expp1(poly[i], poly[i], limbs); } @@ -148,14 +148,14 @@ static mp_size_t fft_split_bits_fmpz( static void fft_combine_bits_fmpz( fmpz_t x, - mp_limb_t ** poly, slong length, + ulong ** poly, slong length, flint_bitcnt_t bits, - mp_size_t limbs, - mp_size_t total_limbs, + slong limbs, + slong total_limbs, int sign) { mpz_ptr mx = _fmpz_promote(x); - mp_limb_t * d = FLINT_MPZ_REALLOC(mx, total_limbs); + ulong * d = FLINT_MPZ_REALLOC(mx, total_limbs); if (sign) { if (fft_combine_bits_signed(d, poly, length, bits, limbs, total_limbs)) @@ -302,7 +302,7 @@ void _fmpz_mat_mul_truncate_sqrt2( K*N*4*n arrays of length size for B's fft rep 4*n arrays of length size for C's fft rep */ - temp = FLINT_ARRAY_ALLOC((6 + 4*n*(M*K + K*N + 1))*size, mp_limb_t); + temp = FLINT_ARRAY_ALLOC((6 + 4*n*(M*K + K*N + 1))*size, ulong); t = temp + 2*size; t1 = t + size; t2 = t1 + size; @@ -316,7 +316,7 @@ void _fmpz_mat_mul_truncate_sqrt2( K*N arrays of pointers of length 4*n for B's coeffs 1 array of pointers of length 4*n for C's coeffs */ - coeffs = FLINT_ARRAY_ALLOC(4*n*(M*K + K*N + 1), mp_limb_t*); + coeffs = FLINT_ARRAY_ALLOC(4*n*(M*K + K*N + 1), ulong*); Acoeffs = coeffs; Bcoeffs = Acoeffs + 4*n*M*K; Ccoeffs = Bcoeffs + 4*n*K*N; diff --git a/src/fmpz_mat/mul_multi_mod.c b/src/fmpz_mat/mul_multi_mod.c index 04c1385306..48872a253e 100644 --- a/src/fmpz_mat/mul_multi_mod.c +++ b/src/fmpz_mat/mul_multi_mod.c @@ -35,7 +35,7 @@ typedef struct { nmod_mat_t * mod_C; const fmpz_comb_struct * comb; slong num_primes; - mp_ptr primes; + nn_ptr primes; int sign; } _worker_arg; @@ -59,10 +59,10 @@ static void _mod_worker(void * varg) if (comb != NULL) { - mp_limb_t * residues; + ulong * residues; fmpz_comb_temp_t comb_temp; - residues = FLINT_ARRAY_ALLOC(num_primes, mp_limb_t); + residues = FLINT_ARRAY_ALLOC(num_primes, ulong); fmpz_comb_temp_init(comb_temp, comb); for (i = Astartrow; i < Astoprow; i++) @@ -127,7 +127,7 @@ static void _crt_worker(void * varg) slong Cstoprow = arg->Cstoprow; fmpz ** Crows = arg->Crows; nmod_mat_t * mod_C = arg->mod_C; - mp_limb_t * primes = arg->primes; + ulong * primes = arg->primes; slong num_primes = arg->num_primes; const fmpz_comb_struct * comb = arg->comb; int sign = arg->sign; @@ -136,10 +136,10 @@ static void _crt_worker(void * varg) if (comb != NULL) { - mp_limb_t * residues; + ulong * residues; fmpz_comb_temp_t comb_temp; - residues = FLINT_ARRAY_ALLOC(num_primes, mp_limb_t); + residues = FLINT_ARRAY_ALLOC(num_primes, ulong); fmpz_comb_temp_init(comb_temp, comb); for (i = Cstartrow; i < Cstoprow; i++) @@ -156,7 +156,7 @@ static void _crt_worker(void * varg) } else if (num_primes == 1) { - mp_limb_t r, t, p = primes[0]; + ulong r, t, p = primes[0]; if (sign) { @@ -183,7 +183,7 @@ static void _crt_worker(void * varg) } else if (num_primes == 2) { - mp_limb_t r0, r1, i0, i1, m0, m1, M[2], t[2], u[2]; + ulong r0, r1, i0, i1, m0, m1, M[2], t[2], u[2]; m0 = primes[0]; m1 = primes[1]; i0 = n_invmod(m1 % m0, m0); @@ -230,11 +230,11 @@ static void _crt_worker(void * varg) } else { - mp_ptr M, Ns, T, U; - mp_size_t Msize, Nsize; - mp_limb_t cy, ri; + nn_ptr M, Ns, T, U; + slong Msize, Nsize; + ulong cy, ri; - M = FLINT_ARRAY_ALLOC(num_primes + 1, mp_limb_t); + M = FLINT_ARRAY_ALLOC(num_primes + 1, ulong); M[0] = primes[0]; Msize = 1; @@ -250,9 +250,9 @@ static void _crt_worker(void * varg) do not require an extra limb. */ Nsize = Msize + 2; - Ns = FLINT_ARRAY_ALLOC(Nsize*num_primes, mp_limb_t); - T = FLINT_ARRAY_ALLOC(Nsize, mp_limb_t); - U = FLINT_ARRAY_ALLOC(Nsize, mp_limb_t); + Ns = FLINT_ARRAY_ALLOC(Nsize*num_primes, ulong); + T = FLINT_ARRAY_ALLOC(Nsize, ulong); + U = FLINT_ARRAY_ALLOC(Nsize, ulong); for (i = 0; i < num_primes; i++) { @@ -351,7 +351,7 @@ void _fmpz_mat_mul_multi_mod( /* Initialize */ mainarg.sign = sign; - mainarg.primes = FLINT_ARRAY_ALLOC(mainarg.num_primes, mp_limb_t); + mainarg.primes = FLINT_ARRAY_ALLOC(mainarg.num_primes, ulong); mainarg.primes[0] = first_prime; if (mainarg.num_primes > 1) { diff --git a/src/fmpz_mat/multi_CRT_ui.c b/src/fmpz_mat/multi_CRT_ui.c index 4603ebd2cb..5e86379545 100644 --- a/src/fmpz_mat/multi_CRT_ui.c +++ b/src/fmpz_mat/multi_CRT_ui.c @@ -20,7 +20,7 @@ fmpz_mat_multi_CRT_ui_precomp(fmpz_mat_t mat, const fmpz_comb_t comb, fmpz_comb_temp_t temp, int sign) { slong i, j, k; - mp_ptr r; + nn_ptr r; r = _nmod_vec_init(nres); @@ -43,7 +43,7 @@ fmpz_mat_multi_CRT_ui(fmpz_mat_t mat, nmod_mat_t * const residues, { fmpz_comb_t comb; fmpz_comb_temp_t temp; - mp_ptr primes; + nn_ptr primes; slong i; primes = _nmod_vec_init(nres); diff --git a/src/fmpz_mat/multi_mod_ui.c b/src/fmpz_mat/multi_mod_ui.c index 925b011a14..6dd645e67f 100644 --- a/src/fmpz_mat/multi_mod_ui.c +++ b/src/fmpz_mat/multi_mod_ui.c @@ -19,7 +19,7 @@ fmpz_mat_multi_mod_ui_precomp(nmod_mat_t * residues, slong nres, const fmpz_mat_t mat, const fmpz_comb_t comb, fmpz_comb_temp_t temp) { slong i, j, k; - mp_ptr r; + nn_ptr r; r = _nmod_vec_init(nres); @@ -41,7 +41,7 @@ fmpz_mat_multi_mod_ui(nmod_mat_t * residues, slong nres, const fmpz_mat_t mat) { fmpz_comb_t comb; fmpz_comb_temp_t temp; - mp_ptr primes; + nn_ptr primes; slong i; primes = _nmod_vec_init(nres); diff --git a/src/fmpz_mat/next_col_van_hoeij.c b/src/fmpz_mat/next_col_van_hoeij.c index eba5a94e10..5022763cab 100644 --- a/src/fmpz_mat/next_col_van_hoeij.c +++ b/src/fmpz_mat/next_col_van_hoeij.c @@ -20,7 +20,7 @@ void _fmpz_mat_resize_van_hoeij(fmpz_mat_t M, slong r, slong c) M->entries = (fmpz *) flint_realloc(M->entries, r*c*sizeof(fmpz)); - mpn_zero((mp_ptr) M->entries + M->r*M->c, r*c - M->r*M->c); + mpn_zero((nn_ptr) M->entries + M->r*M->c, r*c - M->r*M->c); if (r != M->r) /* we will have an extra row and column */ { diff --git a/src/fmpz_mat/rref_mul.c b/src/fmpz_mat/rref_mul.c index 64442d3f20..f5de35ad5b 100644 --- a/src/fmpz_mat/rref_mul.c +++ b/src/fmpz_mat/rref_mul.c @@ -20,7 +20,7 @@ slong fmpz_mat_rref_mul(fmpz_mat_t R, fmpz_t den, const fmpz_mat_t A) { nmod_mat_t Amod; - mp_limb_t p; + ulong p; slong i, j, m, n, rank, * pivs, * P; fmpz_mat_t B, C, D, E, E2, F, FD; diff --git a/src/fmpz_mat/solve_dixon.c b/src/fmpz_mat/solve_dixon.c index 10c37d191a..92cb291f5d 100644 --- a/src/fmpz_mat/solve_dixon.c +++ b/src/fmpz_mat/solve_dixon.c @@ -14,11 +14,11 @@ #include "fmpz.h" #include "fmpz_mat.h" -mp_limb_t +ulong fmpz_mat_find_good_prime_and_invert(nmod_mat_t Ainv, const fmpz_mat_t A, const fmpz_t det_bound) { - mp_limb_t p; + ulong p; fmpz_t tested; p = UWORD(1) << NMOD_MAT_OPTIMAL_MODULUS_BITS; @@ -52,10 +52,10 @@ fmpz_mat_find_good_prime_and_invert(nmod_mat_t Ainv, #define USE_SLOW_MULTIPLICATION 0 -mp_limb_t * fmpz_mat_dixon_get_crt_primes(slong * num_primes, const fmpz_mat_t A, mp_limb_t p) +ulong * fmpz_mat_dixon_get_crt_primes(slong * num_primes, const fmpz_mat_t A, ulong p) { fmpz_t bound, prod; - mp_limb_t * primes; + ulong * primes; slong i, j; fmpz_init(bound); @@ -70,7 +70,7 @@ mp_limb_t * fmpz_mat_dixon_get_crt_primes(slong * num_primes, const fmpz_mat_t A fmpz_mul_ui(bound, bound, A->r); fmpz_mul_ui(bound, bound, UWORD(2)); /* signs */ - primes = (mp_limb_t *) flint_malloc(sizeof(mp_limb_t) * + primes = (ulong *) flint_malloc(sizeof(ulong) * (fmpz_bits(bound) / (FLINT_BIT_COUNT(p) - 1) + 2)); primes[0] = p; fmpz_set_ui(prod, p); @@ -93,13 +93,13 @@ mp_limb_t * fmpz_mat_dixon_get_crt_primes(slong * num_primes, const fmpz_mat_t A void _fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, const fmpz_mat_t A, const fmpz_mat_t B, - const nmod_mat_t Ainv, mp_limb_t p, + const nmod_mat_t Ainv, ulong p, const fmpz_t N, const fmpz_t D) { fmpz_t bound, ppow; fmpz_mat_t x, d, y, Ay; fmpz_t prod; - mp_limb_t * crt_primes; + ulong * crt_primes; nmod_mat_t * A_mod; nmod_mat_t Ay_mod, d_mod, y_mod; slong i, n, cols, num_primes; @@ -212,7 +212,7 @@ fmpz_mat_solve_dixon(fmpz_mat_t X, fmpz_t mod, { nmod_mat_t Ainv; fmpz_t N, D; - mp_limb_t p; + ulong p; if (!fmpz_mat_is_square(A)) { diff --git a/src/fmpz_mat/solve_multi_mod_den.c b/src/fmpz_mat/solve_multi_mod_den.c index af5740f65e..3e40347dd5 100644 --- a/src/fmpz_mat/solve_multi_mod_den.c +++ b/src/fmpz_mat/solve_multi_mod_den.c @@ -15,11 +15,11 @@ #include "fmpz_mat.h" #include "fmpq_mat.h" -mp_limb_t fmpz_mat_find_good_prime_and_solve(nmod_mat_t Xmod, +ulong fmpz_mat_find_good_prime_and_solve(nmod_mat_t Xmod, nmod_mat_t Amod, nmod_mat_t Bmod, const fmpz_mat_t A, const fmpz_mat_t B, const fmpz_t det_bound) { - mp_limb_t p; + ulong p; fmpz_t tested; p = UWORD(1) << NMOD_MAT_OPTIMAL_MODULUS_BITS; diff --git a/src/fmpz_mat/sqr_bodrato.c b/src/fmpz_mat/sqr_bodrato.c index 6098a4b852..fb6c2fb344 100644 --- a/src/fmpz_mat/sqr_bodrato.c +++ b/src/fmpz_mat/sqr_bodrato.c @@ -17,10 +17,10 @@ #define E fmpz_mat_entry static void -local_fmma(fmpz_t f, mp_limb_t a, fmpz b, +local_fmma(fmpz_t f, ulong a, fmpz b, fmpz c, fmpz d) { - mp_limb_t sh, sl, th, tl; + ulong sh, sl, th, tl; smul_ppmm(sh, sl, a, b); smul_ppmm(th, tl, c, d); @@ -52,7 +52,7 @@ fmpz_mat_sqr_bodrato(fmpz_mat_t B, const fmpz_mat_t A) if (!COEFF_IS_MPZ(a) && !COEFF_IS_MPZ(b) && !COEFF_IS_MPZ(c) && !COEFF_IS_MPZ(d)) { - mp_limb_t s, t, u, v; + ulong s, t, u, v; smul_ppmm(s, t, a, a); smul_ppmm(u, v, b, c); @@ -95,7 +95,7 @@ fmpz_mat_sqr_bodrato(fmpz_mat_t B, const fmpz_mat_t A) !COEFF_IS_MPZ(*E(A, 2, 0)) && !COEFF_IS_MPZ(*E(A, 2, 1)) && !COEFF_IS_MPZ(*E(A, 2, 2))) { - mp_limb_t s, t, u, v, j, k; + ulong s, t, u, v, j, k; smul_ppmm(s, t, *E(A, 0, 2), *E(A, 2, 0)); smul_ppmm(u, v, *E(A, 0, 1), *E(A, 1, 0)); diff --git a/src/fmpz_mat/test/main.c b/src/fmpz_mat/test/main.c index 3f1c762ff2..bd48cf6415 100644 --- a/src/fmpz_mat/test/main.c +++ b/src/fmpz_mat/test/main.c @@ -20,9 +20,6 @@ # undef ulong #endif -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fmpz_mat/test/t-CRT_ui.c b/src/fmpz_mat/test/t-CRT_ui.c index fe51e1d876..8375d0020f 100644 --- a/src/fmpz_mat/test/t-CRT_ui.c +++ b/src/fmpz_mat/test/t-CRT_ui.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_mat_CRT_ui, state) fmpz_t mod; fmpz_mat_t A, B, C; nmod_mat_t Amod; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; rows = n_randint(state, 10); diff --git a/src/fmpz_mat/test/t-CRT_ui_unsigned.c b/src/fmpz_mat/test/t-CRT_ui_unsigned.c index 08f5a9ea68..a4c5673f18 100644 --- a/src/fmpz_mat/test/t-CRT_ui_unsigned.c +++ b/src/fmpz_mat/test/t-CRT_ui_unsigned.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_mat_CRT_ui_unsigned, state) fmpz_t mod; fmpz_mat_t A, B, C; nmod_mat_t Amod; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; rows = n_randint(state, 10); diff --git a/src/fmpz_mat/test/t-get_nmod_mat.c b/src/fmpz_mat/test/t-get_nmod_mat.c index defa4acd1c..b67ffaf006 100644 --- a/src/fmpz_mat/test/t-get_nmod_mat.c +++ b/src/fmpz_mat/test/t-get_nmod_mat.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(fmpz_mat_get_nmod_mat, state) fmpz_mat_t A; nmod_mat_t M, M2; slong rows, cols; - mp_limb_t mod; + ulong mod; rows = n_randint(state, 50); cols = n_randint(state, 50); diff --git a/src/fmpz_mat/test/t-multi_CRT_ui.c b/src/fmpz_mat/test/t-multi_CRT_ui.c index 01ac2dd58e..1f396e1e50 100644 --- a/src/fmpz_mat/test/t-multi_CRT_ui.c +++ b/src/fmpz_mat/test/t-multi_CRT_ui.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_mat_multi_CRT_ui, state) fmpz_t mod; fmpz_mat_t A, B, C; nmod_mat_t Amod[1000]; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; rows = n_randint(state, 10); diff --git a/src/fmpz_mat/test/t-multi_CRT_ui_unsigned.c b/src/fmpz_mat/test/t-multi_CRT_ui_unsigned.c index f8d32a245e..3835e72fbe 100644 --- a/src/fmpz_mat/test/t-multi_CRT_ui_unsigned.c +++ b/src/fmpz_mat/test/t-multi_CRT_ui_unsigned.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_mat_multi_CRT_ui_unsigned, state) fmpz_t mod; fmpz_mat_t A, B, C; nmod_mat_t Amod[1000]; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; rows = n_randint(state, 10); diff --git a/src/fmpz_mod/add.c b/src/fmpz_mod/add.c index eabab5c09e..16bcff94f1 100644 --- a/src/fmpz_mod/add.c +++ b/src/fmpz_mod/add.c @@ -32,7 +32,7 @@ void _fmpz_mod_add1(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_add2s(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t FLINT_UNUSED(ctx)) { - mp_limb_t a0, b0, c0; + ulong a0, b0, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); @@ -48,7 +48,7 @@ void _fmpz_mod_add2s(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_add2(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t ctx) { - mp_limb_t t2, t1, t0, a2, a1, a0, b1, b0, c1, c0; + ulong t2, t1, t0, a2, a1, a0, b1, b0, c1, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); diff --git a/src/fmpz_mod/mul.c b/src/fmpz_mod/mul.c index f74caea3b1..063b0a0185 100644 --- a/src/fmpz_mod/mul.c +++ b/src/fmpz_mod/mul.c @@ -16,7 +16,7 @@ void _fmpz_mod_mul1(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t ctx) { - mp_limb_t a0, b0, c0; + ulong a0, b0, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); @@ -35,7 +35,7 @@ void _fmpz_mod_mul1(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_mul2s(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t FLINT_UNUSED(ctx)) { - mp_limb_t a0, b0, c0; + ulong a0, b0, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); @@ -68,14 +68,14 @@ void _fmpz_mod_mul2s(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_mul2(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t ctx) { - mp_limb_t a1, a0, b1, b0, c1, c0; - mp_limb_t x3, x2, x1, x0; - mp_limb_t q2, q1, q0; - mp_limb_t z4, z3, z2, z1, z0; - mp_limb_t t4, t3, t2, t1; - mp_limb_t s3, s2, s1; - mp_limb_t u4, u3, u2, u1; - mp_limb_t v4, v3, v2, v1; + ulong a1, a0, b1, b0, c1, c0; + ulong x3, x2, x1, x0; + ulong q2, q1, q0; + ulong z4, z3, z2, z1, z0; + ulong t4, t3, t2, t1; + ulong s3, s2, s1; + ulong u4, u3, u2, u1; + ulong v4, v3, v2, v1; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); diff --git a/src/fmpz_mod/sub.c b/src/fmpz_mod/sub.c index ad87457ca4..003815dc20 100644 --- a/src/fmpz_mod/sub.c +++ b/src/fmpz_mod/sub.c @@ -16,7 +16,7 @@ void _fmpz_mod_sub1(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t ctx) { - mp_limb_t a0, b0, c0; + ulong a0, b0, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); @@ -32,7 +32,7 @@ void _fmpz_mod_sub1(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_sub2s(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t FLINT_UNUSED(ctx)) { - mp_limb_t a0, b0, c0; + ulong a0, b0, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); @@ -48,7 +48,7 @@ void _fmpz_mod_sub2s(fmpz_t a, const fmpz_t b, const fmpz_t c, void _fmpz_mod_sub2(fmpz_t a, const fmpz_t b, const fmpz_t c, const fmpz_mod_ctx_t ctx) { - mp_limb_t a2, a1, a0, b1, b0, c1, c0; + ulong a2, a1, a0, b1, b0, c1, c0; FLINT_ASSERT(fmpz_mod_is_canonical(b, ctx)); FLINT_ASSERT(fmpz_mod_is_canonical(c, ctx)); diff --git a/src/fmpz_mod/test/main.c b/src/fmpz_mod/test/main.c index 6fd8ddc8e2..6349b53550 100644 --- a/src/fmpz_mod/test/main.c +++ b/src/fmpz_mod/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub_neg.c" diff --git a/src/fmpz_mod_mat/test/main.c b/src/fmpz_mod_mat/test/main.c index fece212e5c..fdc4a3643c 100644 --- a/src/fmpz_mod_mat/test/main.c +++ b/src/fmpz_mod_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub_neg.c" diff --git a/src/fmpz_mod_mpoly/mul_johnson.c b/src/fmpz_mod_mpoly/mul_johnson.c index c39fb6dfc0..7c89d96525 100644 --- a/src/fmpz_mod_mpoly/mul_johnson.c +++ b/src/fmpz_mod_mpoly/mul_johnson.c @@ -32,7 +32,7 @@ fmpz_mod_ctx_get_modulus_mpz_read_only(mpz_t m, const fmpz_mod_ctx_t ctx) { m->_mp_size = 1; m->_mp_alloc = 1; - m->_mp_d = (mp_ptr) p; + m->_mp_d = (nn_ptr) p; } } @@ -57,8 +57,8 @@ void _fmpz_mod_mpoly_mul_johnson1( ulong * Aexps = A->exps; slong Alen; mpz_t t, acc, modulus; - mp_limb_t * Bcoeffs_packed = NULL; - mp_limb_t * Ccoeffs_packed = NULL; + ulong * Bcoeffs_packed = NULL; + ulong * Ccoeffs_packed = NULL; TMP_INIT; TMP_START; @@ -78,7 +78,7 @@ void _fmpz_mod_mpoly_mul_johnson1( if (Blen > 8*n) { - Bcoeffs_packed = FLINT_ARRAY_ALLOC(n*(Blen + Clen), mp_limb_t); + Bcoeffs_packed = FLINT_ARRAY_ALLOC(n*(Blen + Clen), ulong); Ccoeffs_packed = Bcoeffs_packed + n*Blen; for (i = 0; i < Blen; i++) fmpz_get_ui_array(Bcoeffs_packed + n*i, n, Bcoeffs + i); @@ -106,7 +106,7 @@ void _fmpz_mod_mpoly_mul_johnson1( if (Bcoeffs_packed) { - mp_limb_t * acc_d, * t_d; + ulong * acc_d, * t_d; slong acc_len; FLINT_MPZ_REALLOC(acc, 2*n+1); @@ -244,8 +244,8 @@ static void _fmpz_mod_mpoly_mul_johnson( ulong * Aexps = A->exps; slong Alen; mpz_t t, acc, modulus; - mp_limb_t * Bcoeffs_packed = NULL; - mp_limb_t * Ccoeffs_packed = NULL; + ulong * Bcoeffs_packed = NULL; + ulong * Ccoeffs_packed = NULL; TMP_INIT; FLINT_ASSERT(Blen > 0); @@ -281,7 +281,7 @@ static void _fmpz_mod_mpoly_mul_johnson( if (Blen > 8*n) { - Bcoeffs_packed = FLINT_ARRAY_ALLOC(n*(Blen + Clen), mp_limb_t); + Bcoeffs_packed = FLINT_ARRAY_ALLOC(n*(Blen + Clen), ulong); Ccoeffs_packed = Bcoeffs_packed + n*Blen; for (i = 0; i < Blen; i++) fmpz_get_ui_array(Bcoeffs_packed + n*i, n, Bcoeffs + i); @@ -315,7 +315,7 @@ static void _fmpz_mod_mpoly_mul_johnson( if (Bcoeffs_packed) { - mp_limb_t * acc_d, * t_d; + ulong * acc_d, * t_d; slong acc_len; FLINT_MPZ_REALLOC(acc, 2*n+1); diff --git a/src/fmpz_mod_mpoly/test/main.c b/src/fmpz_mod_mpoly/test/main.c index cfa2187034..c29f6419ec 100644 --- a/src/fmpz_mod_mpoly/test/main.c +++ b/src/fmpz_mod_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fmpz_mod_mpoly/test/t-divides.c b/src/fmpz_mod_mpoly/test/t-divides.c index 4e9a141d96..bd799fc1a5 100644 --- a/src/fmpz_mod_mpoly/test/t-divides.c +++ b/src/fmpz_mod_mpoly/test/t-divides.c @@ -94,7 +94,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -110,9 +110,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 150/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -164,7 +164,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; fmpz * shifts, * strides; slong n; @@ -181,9 +181,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 20/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -252,7 +252,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -268,9 +268,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -323,7 +323,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -339,9 +339,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/fmpz_mod_mpoly/test/t-divides_dense.c b/src/fmpz_mod_mpoly/test/t-divides_dense.c index 40431258f5..7fee337f46 100644 --- a/src/fmpz_mod_mpoly/test/t-divides_dense.c +++ b/src/fmpz_mod_mpoly/test/t-divides_dense.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -39,9 +39,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -92,7 +92,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -108,9 +108,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 20/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -160,7 +160,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -176,9 +176,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -230,7 +230,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -246,9 +246,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/fmpz_mod_mpoly/test/t-divides_monagan_pearce.c b/src/fmpz_mod_mpoly/test/t-divides_monagan_pearce.c index c73e6a4e8c..6cddd72e73 100644 --- a/src/fmpz_mod_mpoly/test/t-divides_monagan_pearce.c +++ b/src/fmpz_mod_mpoly/test/t-divides_monagan_pearce.c @@ -94,7 +94,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -110,9 +110,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 150/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -164,7 +164,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; fmpz * shifts, * strides; slong n; @@ -181,9 +181,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 20/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -252,7 +252,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -268,9 +268,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -323,7 +323,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; slong n; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 6, 200); @@ -339,9 +339,9 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_divides_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/fmpz_mod_mpoly/test/t-gcd_cofactors.c b/src/fmpz_mod_mpoly/test/t-gcd_cofactors.c index ba81efbfee..eeda2b71c5 100644 --- a/src/fmpz_mod_mpoly/test/t-gcd_cofactors.c +++ b/src/fmpz_mod_mpoly/test/t-gcd_cofactors.c @@ -524,7 +524,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_gcd_cofactors, state) fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; fmpz_mod_mpoly_ctx_init_rand_bits_prime(ctx, state, 10, 150); @@ -629,7 +629,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_gcd_cofactors, state) { fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len, len1, len2; slong degbound; @@ -843,7 +843,7 @@ TEST_FUNCTION_START(fmpz_mod_mpoly_gcd_cofactors, state) { fmpz_mod_mpoly_ctx_t ctx; fmpz_mod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len1, len2, len3, len4; ulong degbounds1[4]; diff --git a/src/fmpz_mod_mpoly_factor/fmpz_mod_bpoly.c b/src/fmpz_mod_mpoly_factor/fmpz_mod_bpoly.c index bf8eb483f2..cdf4a56e49 100644 --- a/src/fmpz_mod_mpoly_factor/fmpz_mod_bpoly.c +++ b/src/fmpz_mod_mpoly_factor/fmpz_mod_bpoly.c @@ -603,7 +603,7 @@ int fmpz_mod_bpoly_divides( { for (j = order - 1; j >= 0; j--) { - mp_limb_t qc = n_poly_get_coeff(q, order*i + j); + ulong qc = n_poly_get_coeff(q, order*i + j); if (qc == 0) continue; diff --git a/src/fmpz_mod_mpoly_factor/test/main.c b/src/fmpz_mod_mpoly_factor/test/main.c index 0661011037..b2de98e03a 100644 --- a/src/fmpz_mod_mpoly_factor/test/main.c +++ b/src/fmpz_mod_mpoly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/fmpz_mod_poly/fit_length.c b/src/fmpz_mod_poly/fit_length.c index f7677fd8de..583fac5568 100644 --- a/src/fmpz_mod_poly/fit_length.c +++ b/src/fmpz_mod_poly/fit_length.c @@ -19,7 +19,7 @@ void _fmpz_mod_poly_fit_length(fmpz_mod_poly_t f, slong len) { slong alloc = FLINT_MAX(2*f->alloc, len); f->coeffs = FLINT_ARRAY_REALLOC(f->coeffs, alloc, fmpz); - flint_mpn_zero((mp_ptr) (f->coeffs + f->alloc), alloc - f->alloc); + flint_mpn_zero((nn_ptr) (f->coeffs + f->alloc), alloc - f->alloc); f->alloc = alloc; } } diff --git a/src/fmpz_mod_poly/inv_series_newton_f.c b/src/fmpz_mod_poly/inv_series_newton_f.c index 9aceaa3020..e906657930 100644 --- a/src/fmpz_mod_poly/inv_series_newton_f.c +++ b/src/fmpz_mod_poly/inv_series_newton_f.c @@ -34,7 +34,7 @@ void fmpz_mod_poly_inv_series_f(fmpz_t f, fmpz_mod_poly_t Qinv, Qcopy = (fmpz *) flint_malloc(n * sizeof(fmpz)); for (i = 0; i < Q->length; i++) Qcopy[i] = Q->coeffs[i]; - flint_mpn_zero((mp_ptr) Qcopy + i, n - i); + flint_mpn_zero((nn_ptr) Qcopy + i, n - i); Qalloc = 1; } diff --git a/src/fmpz_mod_poly/radix.c b/src/fmpz_mod_poly/radix.c index 7b64a803ea..0052a6932b 100644 --- a/src/fmpz_mod_poly/radix.c +++ b/src/fmpz_mod_poly/radix.c @@ -178,7 +178,7 @@ void fmpz_mod_poly_radix(fmpz_mod_poly_struct **B, const fmpz_mod_poly_t F, G = flint_malloc(lenG * sizeof(fmpz)); for (i = 0; i < lenF; i++) G[i] = F->coeffs[i]; - flint_mpn_zero((mp_ptr) G + lenF, lenG - lenF); + flint_mpn_zero((nn_ptr) G + lenF, lenG - lenF); T = t ? _fmpz_vec_init(t * degR) : NULL; } diff --git a/src/fmpz_mod_poly/realloc.c b/src/fmpz_mod_poly/realloc.c index 1a75583fca..018503bd6a 100644 --- a/src/fmpz_mod_poly/realloc.c +++ b/src/fmpz_mod_poly/realloc.c @@ -34,7 +34,7 @@ void fmpz_mod_poly_realloc(fmpz_mod_poly_t poly, slong alloc, poly->coeffs = (fmpz *) flint_realloc(poly->coeffs, alloc * sizeof(fmpz)); if (alloc > poly->alloc) - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->alloc), + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->alloc), alloc - poly->alloc); } else /* Nothing allocated already so do it now */ diff --git a/src/fmpz_mod_poly/set_get_coeff.c b/src/fmpz_mod_poly/set_get_coeff.c index 2c06e9245c..08a35999ea 100644 --- a/src/fmpz_mod_poly/set_get_coeff.c +++ b/src/fmpz_mod_poly/set_get_coeff.c @@ -24,7 +24,7 @@ void fmpz_mod_poly_set_coeff_si(fmpz_mod_poly_t poly, slong n, slong x, if (n + 1 > poly->length) { - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->length), n - poly->length); + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->length), n - poly->length); poly->length = n + 1; } @@ -49,7 +49,7 @@ void fmpz_mod_poly_set_coeff_ui(fmpz_mod_poly_t poly, slong n, ulong x, if (n + 1 > poly->length) { - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->length), n - poly->length); + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->length), n - poly->length); poly->length = n + 1; } @@ -77,7 +77,7 @@ void fmpz_mod_poly_set_coeff_fmpz(fmpz_mod_poly_t poly, slong n, const fmpz_t x, if (n + 1 > poly->length) { - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->length), n - poly->length); + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->length), n - poly->length); poly->length = n + 1; } diff --git a/src/fmpz_mod_poly/test/main.c b/src/fmpz_mod_poly/test/main.c index 514511f80a..00cf0df871 100644 --- a/src/fmpz_mod_poly/test/main.c +++ b/src/fmpz_mod_poly/test/main.c @@ -20,9 +20,6 @@ # undef ulong #endif -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpz_mod_poly_factor/test/main.c b/src/fmpz_mod_poly_factor/test/main.c index 00e6d2ffe3..dc7fddc0c3 100644 --- a/src/fmpz_mod_poly_factor/test/main.c +++ b/src/fmpz_mod_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor_berlekamp.c" diff --git a/src/fmpz_mod_poly_factor/test/t-is_squarefree.c b/src/fmpz_mod_poly_factor/test/t-is_squarefree.c index 13469c33f8..446633c387 100644 --- a/src/fmpz_mod_poly_factor/test/t-is_squarefree.c +++ b/src/fmpz_mod_poly_factor/test/t-is_squarefree.c @@ -28,7 +28,7 @@ TEST_FUNCTION_START(fmpz_mod_poly_factor_is_squarefree, state) { fmpz_mod_poly_t poly, Q, R, t; fmpz_t modulus; - mp_limb_t mod; + ulong mod; slong i, num_factors, exp, max_exp; int v, result; diff --git a/src/fmpz_mod_types.h b/src/fmpz_mod_types.h index 1c088ff499..0cdb3f5025 100644 --- a/src/fmpz_mod_types.h +++ b/src/fmpz_mod_types.h @@ -64,7 +64,7 @@ typedef struct ulong * exps; slong length; flint_bitcnt_t bits; /* number of bits per exponent */ - slong coeffs_alloc; /* abs size in mp_limb_t units */ + slong coeffs_alloc; /* abs size in ulong units */ slong exps_alloc; /* abs size in ulong units */ } fmpz_mod_mpoly_struct; diff --git a/src/fmpz_mpoly.h b/src/fmpz_mpoly.h index 7d80c1330a..1282ce497d 100644 --- a/src/fmpz_mpoly.h +++ b/src/fmpz_mpoly.h @@ -549,8 +549,8 @@ int _fmpz_pow_fmpz_is_not_feasible(flint_bitcnt_t bbits, const fmpz_t e); int fmpz_mpoly_evaluate_all_fmpz(fmpz_t ev, const fmpz_mpoly_t A, fmpz * const * vals, const fmpz_mpoly_ctx_t ctx); -mp_limb_t fmpz_mpoly_evaluate_all_nmod(const fmpz_mpoly_t A, - const mp_limb_t * alphas, const fmpz_mpoly_ctx_t ctx, nmod_t fpctx); +ulong fmpz_mpoly_evaluate_all_nmod(const fmpz_mpoly_t A, + const ulong * alphas, const fmpz_mpoly_ctx_t ctx, nmod_t fpctx); void fmpz_mpoly_evaluate_all_fmpz_mod(fmpz_t ev, const fmpz_mpoly_t A, const fmpz * alphas, diff --git a/src/fmpz_mpoly/evaluate_all_nmod.c b/src/fmpz_mpoly/evaluate_all_nmod.c index 6827805e6b..cb8b7fcd18 100644 --- a/src/fmpz_mpoly/evaluate_all_nmod.c +++ b/src/fmpz_mpoly/evaluate_all_nmod.c @@ -13,18 +13,18 @@ #include "nmod_mpoly.h" #include "fmpz_mpoly.h" -mp_limb_t fmpz_mpoly_evaluate_all_nmod( +ulong fmpz_mpoly_evaluate_all_nmod( const fmpz_mpoly_t A, - const mp_limb_t * alphas, + const ulong * alphas, const fmpz_mpoly_ctx_t ctx, nmod_t fpctx) { - mp_limb_t eval, * t; + ulong eval, * t; TMP_INIT; TMP_START; - t = TMP_ARRAY_ALLOC(A->length, mp_limb_t); + t = TMP_ARRAY_ALLOC(A->length, ulong); _fmpz_vec_get_nmod_vec(t, A->coeffs, A->length, fpctx); eval = _nmod_mpoly_eval_all_ui(t, A->exps, A->length, A->bits, alphas, ctx->minfo, fpctx); diff --git a/src/fmpz_mpoly/pow_fps.c b/src/fmpz_mpoly/pow_fps.c index 5614d7bb4b..01275e8b7e 100644 --- a/src/fmpz_mpoly/pow_fps.c +++ b/src/fmpz_mpoly/pow_fps.c @@ -253,7 +253,7 @@ static slong _fmpz_mpoly_pow_fps1( { Gexps = FLINT_ARRAY_REALLOC(Gexps, 2*Galloc, ulong); Gcoeffs = FLINT_ARRAY_REALLOC(Gcoeffs, 2*Galloc, fmpz); - flint_mpn_zero((mp_ptr) Gcoeffs + Galloc, Galloc); + flint_mpn_zero((nn_ptr) Gcoeffs + Galloc, Galloc); Galloc *= 2; } } @@ -490,7 +490,7 @@ static slong _fmpz_mpoly_pow_fps( { Gexps = FLINT_ARRAY_REALLOC(Gexps, 2*N*Galloc, ulong); Gcoeffs = FLINT_ARRAY_REALLOC(Gcoeffs, 2*Galloc, fmpz); - flint_mpn_zero((mp_ptr) Gcoeffs + Galloc, Galloc); + flint_mpn_zero((nn_ptr) Gcoeffs + Galloc, Galloc); Galloc *= 2; } } diff --git a/src/fmpz_mpoly/quasidiv_heap.c b/src/fmpz_mpoly/quasidiv_heap.c index 609832494a..888aac51b2 100644 --- a/src/fmpz_mpoly/quasidiv_heap.c +++ b/src/fmpz_mpoly/quasidiv_heap.c @@ -104,7 +104,7 @@ slong _fmpz_mpoly_quasidiv_heap1(fmpz_t scale, len = FLINT_MAX(q_len + 1, 2*qs_alloc); qs = (fmpz *) flint_realloc(qs, len*sizeof(fmpz)); if (len > qs_alloc) - flint_mpn_zero((mp_ptr) (qs + qs_alloc), len - qs_alloc); + flint_mpn_zero((nn_ptr) (qs + qs_alloc), len - qs_alloc); qs_alloc = len; } @@ -485,7 +485,7 @@ slong _fmpz_mpoly_quasidiv_heap(fmpz_t scale, len = FLINT_MAX(q_len + 1, 2*qs_alloc); qs = (fmpz *) flint_realloc(qs, len*sizeof(fmpz)); if (len > qs_alloc) - flint_mpn_zero((mp_ptr) (qs + qs_alloc), len - qs_alloc); + flint_mpn_zero((nn_ptr) (qs + qs_alloc), len - qs_alloc); qs_alloc = len; } diff --git a/src/fmpz_mpoly/quasidivrem_heap.c b/src/fmpz_mpoly/quasidivrem_heap.c index 790426ebfd..91ce5a652a 100644 --- a/src/fmpz_mpoly/quasidivrem_heap.c +++ b/src/fmpz_mpoly/quasidivrem_heap.c @@ -108,7 +108,7 @@ slong _fmpz_mpoly_quasidivrem_heap1(fmpz_t scale, slong * lenr, len = FLINT_MAX(q_len + 1, 2*qs_alloc); qs = (fmpz *) flint_realloc(qs, len*sizeof(fmpz)); if (len > qs_alloc) - flint_mpn_zero((mp_ptr) (qs + qs_alloc), len - qs_alloc); + flint_mpn_zero((nn_ptr) (qs + qs_alloc), len - qs_alloc); qs_alloc = len; } /* make sure remainder array has space for r_len + 1 entries */ @@ -119,7 +119,7 @@ slong _fmpz_mpoly_quasidivrem_heap1(fmpz_t scale, slong * lenr, len = FLINT_MAX(r_len + 1, 2*rs_alloc); rs = (fmpz *) flint_realloc(rs, len*sizeof(fmpz)); if (len > rs_alloc) - flint_mpn_zero((mp_ptr) (rs + rs_alloc), len - rs_alloc); + flint_mpn_zero((nn_ptr) (rs + rs_alloc), len - rs_alloc); rs_alloc = len; } @@ -470,7 +470,7 @@ slong _fmpz_mpoly_quasidivrem_heap(fmpz_t scale, slong * lenr, len = FLINT_MAX(q_len + 1, 2*qs_alloc); qs = (fmpz *) flint_realloc(qs, len*sizeof(fmpz)); if (len > qs_alloc) - flint_mpn_zero((mp_ptr) (qs + qs_alloc), len - qs_alloc); + flint_mpn_zero((nn_ptr) (qs + qs_alloc), len - qs_alloc); qs_alloc = len; } /* make sure remainder array has space for r_len + 1 entries */ @@ -481,7 +481,7 @@ slong _fmpz_mpoly_quasidivrem_heap(fmpz_t scale, slong * lenr, len = FLINT_MAX(r_len + 1, 2*rs_alloc); rs = (fmpz *) flint_realloc(rs, len*sizeof(fmpz)); if (len > rs_alloc) - flint_mpn_zero((mp_ptr) (rs + rs_alloc), len - rs_alloc); + flint_mpn_zero((nn_ptr) (rs + rs_alloc), len - rs_alloc); rs_alloc = len; } diff --git a/src/fmpz_mpoly/quasidivrem_ideal_heap.c b/src/fmpz_mpoly/quasidivrem_ideal_heap.c index 132d4061ee..b053de4bdd 100644 --- a/src/fmpz_mpoly/quasidivrem_ideal_heap.c +++ b/src/fmpz_mpoly/quasidivrem_ideal_heap.c @@ -190,7 +190,7 @@ slong _fmpz_mpoly_quasidivrem_ideal_heap1(fmpz_t scale, fmpz_mpoly_struct ** pol { slong len = FLINT_MAX(q_len[w] + 1, 2*qs_alloc[w]); qs[w] = (fmpz *) flint_realloc(qs[w], len*sizeof(fmpz)); - flint_mpn_zero((mp_ptr) (qs[w] + qs_alloc[w]), len - qs_alloc[w]); + flint_mpn_zero((nn_ptr) (qs[w] + qs_alloc[w]), len - qs_alloc[w]); qs_alloc[w] = len; } @@ -232,7 +232,7 @@ slong _fmpz_mpoly_quasidivrem_ideal_heap1(fmpz_t scale, fmpz_mpoly_struct ** pol { slong len = FLINT_MAX(r_len + 1, 2*rs_alloc); rs = (fmpz *) flint_realloc(rs, len*sizeof(fmpz)); - flint_mpn_zero((mp_ptr) (rs + rs_alloc), len - rs_alloc); + flint_mpn_zero((nn_ptr) (rs + rs_alloc), len - rs_alloc); rs_alloc = len; } fmpz_set(r_coeff + r_len, acc_lg); @@ -509,7 +509,7 @@ slong _fmpz_mpoly_quasidivrem_ideal_heap(fmpz_t scale, fmpz_mpoly_struct ** poly { slong len = FLINT_MAX(q_len[w] + 1, 2*qs_alloc[w]); qs[w] = (fmpz *) flint_realloc(qs[w], len*sizeof(fmpz)); - flint_mpn_zero((mp_ptr) (qs[w] + qs_alloc[w]), len - qs_alloc[w]); + flint_mpn_zero((nn_ptr) (qs[w] + qs_alloc[w]), len - qs_alloc[w]); qs_alloc[w] = len; } @@ -553,7 +553,7 @@ slong _fmpz_mpoly_quasidivrem_ideal_heap(fmpz_t scale, fmpz_mpoly_struct ** poly { slong len = FLINT_MAX(r_len + 1, 2*rs_alloc); rs = (fmpz *) flint_realloc(rs, len*sizeof(fmpz)); - flint_mpn_zero((mp_ptr) (rs + rs_alloc), len - rs_alloc); + flint_mpn_zero((nn_ptr) (rs + rs_alloc), len - rs_alloc); rs_alloc = len; } fmpz_set(r_coeff + r_len, acc_lg); diff --git a/src/fmpz_mpoly/sqrt_heap.c b/src/fmpz_mpoly/sqrt_heap.c index 85055b7b76..53c088f60b 100644 --- a/src/fmpz_mpoly/sqrt_heap.c +++ b/src/fmpz_mpoly/sqrt_heap.c @@ -69,7 +69,7 @@ static mpz_srcptr _fmpz_mpoly_get_mpz_signed_uiuiui(ulong * sm, fmpz x, mpz_ptr /* try to prove that A is not a square */ static int _is_proved_not_square( int count, - mp_limb_t * p, + ulong * p, flint_rand_t state, const fmpz * Acoeffs, const ulong * Aexps, @@ -79,7 +79,7 @@ static int _is_proved_not_square( { int success = 0; slong i, N = mpoly_words_per_exp(Abits, mctx); - mp_limb_t eval, * alphas; + ulong eval, * alphas; nmod_t mod; ulong * t; TMP_INIT; @@ -99,7 +99,7 @@ static int _is_proved_not_square( /* try at most 3*count evaluations */ count *= 3; - alphas = (mp_limb_t *) TMP_ALLOC(mctx->nvars*sizeof(mp_limb_t)); + alphas = (ulong *) TMP_ALLOC(mctx->nvars*sizeof(ulong)); next_p: @@ -164,7 +164,7 @@ slong _fmpz_mpoly_sqrt_heap1( ulong acc_sm[3], acc_sm2[3], pp[3]; int lt_divides, q_rest_small; flint_rand_t heuristic_state; - mp_limb_t heuristic_p = UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX); + ulong heuristic_p = UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX); int heuristic_count = 0; ulong lc_abs = 0; /* 2*sqrt(lc) if it fits in ulong, otherwise 0 */ ulong lc_norm = 0; @@ -612,7 +612,7 @@ slong _fmpz_mpoly_sqrt_heap( ulong acc_sm[3], acc_sm2[3], pp[3]; int halves, use_heap, lt_divides, q_rest_small; flint_rand_t heuristic_state; - mp_limb_t heuristic_p = UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX); + ulong heuristic_p = UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX); int heuristic_count = 0; ulong lc_abs = 0; /* 2*sqrt(lc) if it fits in ulong, otherwise 0 */ ulong lc_norm = 0; diff --git a/src/fmpz_mpoly/test/main.c b/src/fmpz_mpoly/test/main.c index e7d7b12cc1..7ce8ca8a1f 100644 --- a/src/fmpz_mpoly/test/main.c +++ b/src/fmpz_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fmpz_mpoly/test/t-div_monagan_pearce.c b/src/fmpz_mpoly/test/t-div_monagan_pearce.c index a458a20a2d..3afd5662ea 100644 --- a/src/fmpz_mpoly/test/t-div_monagan_pearce.c +++ b/src/fmpz_mpoly/test/t-div_monagan_pearce.c @@ -113,7 +113,7 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -134,9 +134,9 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 500/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -203,7 +203,7 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -223,9 +223,9 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 500/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -290,7 +290,7 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -310,9 +310,9 @@ TEST_FUNCTION_START(fmpz_mpoly_div_monagan_pearce, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 500/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) diff --git a/src/fmpz_mpoly/test/t-divides.c b/src/fmpz_mpoly/test/t-divides.c index 4defefef92..f0f571ce62 100644 --- a/src/fmpz_mpoly/test/t-divides.c +++ b/src/fmpz_mpoly/test/t-divides.c @@ -98,7 +98,7 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2, coeff_bits; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2, coeff_bits; fmpz * shifts, * strides; slong n; @@ -117,9 +117,9 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 20/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -191,7 +191,7 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; slong n; @@ -210,9 +210,9 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -266,7 +266,7 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; slong n; @@ -285,9 +285,9 @@ TEST_FUNCTION_START(fmpz_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/fmpz_mpoly/test/t-gcd.c b/src/fmpz_mpoly/test/t-gcd.c index 304941a1ea..4efd46d0bd 100644 --- a/src/fmpz_mpoly/test/t-gcd.c +++ b/src/fmpz_mpoly/test/t-gcd.c @@ -389,7 +389,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; flint_bitcnt_t coeff_bits; fmpz_mpoly_ctx_init_rand(ctx, state, 10); @@ -492,7 +492,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd, state) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t coeff_bits, newbits; slong len, len1, len2; slong degbound; @@ -704,7 +704,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd, state) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; flint_bitcnt_t coeff_bits1, coeff_bits2, coeff_bits3, coeff_bits4; slong len1, len2, len3, len4; diff --git a/src/fmpz_mpoly/test/t-gcd_cofactors.c b/src/fmpz_mpoly/test/t-gcd_cofactors.c index b87030e952..ad0a8cbce2 100644 --- a/src/fmpz_mpoly/test/t-gcd_cofactors.c +++ b/src/fmpz_mpoly/test/t-gcd_cofactors.c @@ -529,7 +529,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd_cofactors, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; flint_bitcnt_t coeff_bits; fmpz_mpoly_ctx_init_rand(ctx, state, 10); @@ -640,7 +640,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd_cofactors, state) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t coeff_bits, newbits; slong len, len1, len2; slong degbound; @@ -864,7 +864,7 @@ TEST_FUNCTION_START(fmpz_mpoly_gcd_cofactors, state) { fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; flint_bitcnt_t coeff_bits1, coeff_bits2, coeff_bits3, coeff_bits4; slong len1, len2, len3, len4; diff --git a/src/fmpz_mpoly/test/t-quasidiv_heap.c b/src/fmpz_mpoly/test/t-quasidiv_heap.c index e0475c653b..e0506728f9 100644 --- a/src/fmpz_mpoly/test/t-quasidiv_heap.c +++ b/src/fmpz_mpoly/test/t-quasidiv_heap.c @@ -85,7 +85,7 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, k, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -108,9 +108,9 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 200/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -182,7 +182,7 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -204,9 +204,9 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 200/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -277,7 +277,7 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) fmpz_mpoly_ctx_t ctx; fmpz_mpoly_t f, g, h, r; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; flint_bitcnt_t coeff_bits; fmpz * shifts, * strides; slong n; @@ -299,9 +299,9 @@ TEST_FUNCTION_START(fmpz_mpoly_quasidiv_heap, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 200/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) diff --git a/src/fmpz_mpoly_factor/gcd_algo.c b/src/fmpz_mpoly_factor/gcd_algo.c index 2ff1e29b8a..2f0e106035 100644 --- a/src/fmpz_mpoly_factor/gcd_algo.c +++ b/src/fmpz_mpoly_factor/gcd_algo.c @@ -34,7 +34,7 @@ void fmpz_mpoly_evals( ulong * Amin_exp, ulong * Amax_exp, ulong * Astride, - mp_limb_t * alpha, + ulong * alpha, const fmpz_mpoly_ctx_t ctx) { slong i, j; @@ -47,8 +47,8 @@ void fmpz_mpoly_evals( slong N = mpoly_words_per_exp_sp(A->bits, ctx->minfo); ulong * Aexp = A->exps; fmpz * Acoeff = A->coeffs; - mp_limb_t meval; - mp_limb_t t; + ulong meval; + ulong t; FLINT_ASSERT(A->bits <= FLINT_BITS); @@ -82,20 +82,20 @@ void fmpz_mpoly_evals( if (use_direct_LUT) { slong off; - mp_limb_t * LUT, ** LUTvalue, ** LUTvalueinv; + ulong * LUT, ** LUTvalue, ** LUTvalueinv; /* value of powers of alpha[j] */ - LUT = (mp_limb_t *) flint_malloc(2*total_length*sizeof(mp_limb_t)); + LUT = (ulong *) flint_malloc(2*total_length*sizeof(ulong)); /* pointers into LUT */ - LUTvalue = (mp_limb_t **) flint_malloc(nvars*sizeof(mp_limb_t *)); - LUTvalueinv = (mp_limb_t **) flint_malloc(nvars*sizeof(mp_limb_t *)); + LUTvalue = (ulong **) flint_malloc(nvars*sizeof(ulong *)); + LUTvalueinv = (ulong **) flint_malloc(nvars*sizeof(ulong *)); off = 0; for (j = 0; j < nvars; j++) { ulong k; - mp_limb_t alphainvj = nmod_inv(alpha[j], (out + 0)->mod); + ulong alphainvj = nmod_inv(alpha[j], (out + 0)->mod); LUTvalue[j] = LUT + off; LUTvalueinv[j] = LUT + total_length + off; @@ -154,17 +154,17 @@ void fmpz_mpoly_evals( slong LUTlen; ulong * LUTmask; slong * LUToffset, * LUTvar; - mp_limb_t * LUTvalue, * LUTvalueinv; - mp_limb_t * vieval; - mp_limb_t t, xpoweval, xinvpoweval; + ulong * LUTvalue, * LUTvalueinv; + ulong * vieval; + ulong t, xpoweval, xinvpoweval; LUToffset = (slong *) flint_malloc(N*FLINT_BITS*sizeof(slong)); LUTmask = (ulong *) flint_malloc(N*FLINT_BITS*sizeof(ulong)); - LUTvalue = (mp_limb_t *) flint_malloc(N*FLINT_BITS*sizeof(mp_limb_t)); + LUTvalue = (ulong *) flint_malloc(N*FLINT_BITS*sizeof(ulong)); LUTvar = (slong *) flint_malloc(N*FLINT_BITS*sizeof(slong)); - LUTvalueinv = (mp_limb_t *) flint_malloc(N*FLINT_BITS*sizeof(mp_limb_t)); + LUTvalueinv = (ulong *) flint_malloc(N*FLINT_BITS*sizeof(ulong)); - vieval = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + vieval = (ulong *) flint_malloc(nvars*sizeof(ulong)); LUTlen = 0; for (j = nvars - 1; j >= 0; j--) @@ -244,8 +244,8 @@ void _set_estimates( slong i, j; nmod_poly_t Geval; nmod_poly_struct * Aevals, * Bevals; - mp_limb_t p = UWORD(1) << (FLINT_BITS - 1); - mp_limb_t * alpha; + ulong p = UWORD(1) << (FLINT_BITS - 1); + ulong * alpha; flint_rand_t randstate; slong ignore_limit; int * ignore; @@ -253,7 +253,7 @@ void _set_estimates( flint_rand_init(randstate); ignore = (int *) flint_malloc(ctx->minfo->nvars*sizeof(int)); - alpha = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + alpha = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); Aevals = (nmod_poly_struct *) flint_malloc( ctx->minfo->nvars*sizeof(nmod_poly_struct)); Bevals = (nmod_poly_struct *) flint_malloc( diff --git a/src/fmpz_mpoly_factor/gcd_brown.c b/src/fmpz_mpoly_factor/gcd_brown.c index b4fedec3fb..a971c8660a 100644 --- a/src/fmpz_mpoly_factor/gcd_brown.c +++ b/src/fmpz_mpoly_factor/gcd_brown.c @@ -78,7 +78,7 @@ int fmpz_mpolyl_gcd_brown( int success; fmpz_t bound; slong offset, shift; - mp_limb_t p, gammared; + ulong p, gammared; fmpz_t gamma, modulus; fmpz_t gnm, gns, anm, ans, bnm, bns; fmpz_t cA, cB, cG, cAbar, cBbar; diff --git a/src/fmpz_mpoly_factor/gcd_brown_threaded.c b/src/fmpz_mpoly_factor/gcd_brown_threaded.c index 6b5e124aed..a62c4e156a 100644 --- a/src/fmpz_mpoly_factor/gcd_brown_threaded.c +++ b/src/fmpz_mpoly_factor/gcd_brown_threaded.c @@ -23,7 +23,7 @@ typedef struct { volatile int gcd_is_one; - volatile mp_limb_t p; + volatile ulong p; #if FLINT_USES_PTHREAD pthread_mutex_t mutex; #endif @@ -99,7 +99,7 @@ static void _splitworker(void * varg) slong N = mpoly_words_per_exp_sp(bits, ctx->minfo); slong offset, shift; int success; - mp_limb_t p, gammared; + ulong p, gammared; nmod_poly_stack_t Sp; mpoly_gen_offset_shift_sp(&offset, &shift, diff --git a/src/fmpz_mpoly_factor/gcd_zippel.c b/src/fmpz_mpoly_factor/gcd_zippel.c index 18490e15be..71449158b0 100644 --- a/src/fmpz_mpoly_factor/gcd_zippel.c +++ b/src/fmpz_mpoly_factor/gcd_zippel.c @@ -67,7 +67,7 @@ int fmpz_mpolyl_gcd_zippel( flint_bitcnt_t bits = G->bits; int success, changed; slong i, j, Gdegbound, Gdeg, req_zip_images; - mp_limb_t p, t, gammap; + ulong p, t, gammap; fmpz_t c, gamma, modulus; nmod_mpoly_t Ap, Bp, Gp, Abarp, Bbarp; nmod_mpoly_ctx_t ctxp; diff --git a/src/fmpz_mpoly_factor/gcd_zippel2.c b/src/fmpz_mpoly_factor/gcd_zippel2.c index d39cb5990c..36d1b6b18e 100644 --- a/src/fmpz_mpoly_factor/gcd_zippel2.c +++ b/src/fmpz_mpoly_factor/gcd_zippel2.c @@ -75,7 +75,7 @@ static void mpoly2_nmod_monomial_evals( slong start, stop, i, j, k, n; slong e0, e1; slong nvars = mctx->nvars; - mp_limb_t * p; + ulong * p; ulong mask = (-UWORD(1)) >> (FLINT_BITS - Abits); slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; @@ -145,7 +145,7 @@ static void mpoly_nmod_monomial_evals( nmod_t fpctx) { slong i, k; - mp_limb_t * p; + ulong * p; ulong mask = (-UWORD(1)) >> (FLINT_BITS - Abits); slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; @@ -250,7 +250,7 @@ static void fmpz_mpoly_fmpz_mod_coeffs( _fmpz_mod_vec_set_fmpz_vec(EH->coeffs, Acoeffs, Alen, fpctx); } -mp_limb_t n_poly_mod_zip_eval_cur_inc_coeff( +ulong n_poly_mod_zip_eval_cur_inc_coeff( n_poly_t Acur, n_poly_t Ainc, n_poly_t Acoeff, @@ -281,7 +281,7 @@ static void n_polyun_mod_zip_eval_cur_inc_coeff( { slong i, Ei; ulong e0, e1; - mp_limb_t c; + ulong c; n_poly_struct * Ec; FLINT_ASSERT(Acur->length > 0); @@ -376,7 +376,7 @@ static void fmpz_mpoly2_eval_fmpz_mod( */ void nmod_mpoly_bma_interpolate_alpha_powers( - mp_limb_t * out, + ulong * out, ulong w, slong m, const mpoly_bma_interpolate_ctx_t Ictx, @@ -670,8 +670,8 @@ static int _nmod_mpoly_bma_get_fmpz_mpoly2( slong i, j, t; slong N = mpoly_words_per_exp_sp(Abits, mctx); ulong new_exp, this_exp; - mp_limb_t * values, * roots; - mp_limb_t T, S, V, V0, V1, V2, p0, p1, r; + ulong * values, * roots; + ulong T, S, V, V0, V1, V2, p0, p1, r; FLINT_ASSERT(mctx->ord == ORD_LEX); @@ -1248,7 +1248,7 @@ static int _random_check_sp( n_polyun_t Geval_sp, n_polyun_t Abareval_sp, n_polyun_t Bbareval_sp, - mp_limb_t * alphas_sp, + ulong * alphas_sp, n_poly_struct * alpha_caches_sp, const fmpz_mpoly_t H, n_poly_t Hmarks, const fmpz_mpoly_t A, n_poly_t Amarks, ulong Abidegree, @@ -1259,7 +1259,7 @@ static int _random_check_sp( flint_rand_t randstate, n_poly_polyun_stack_t St_sp) { - mp_limb_t Gammaeval_sp; + ulong Gammaeval_sp; int success; int point_try_count; slong i; @@ -1419,7 +1419,7 @@ static int _random_check_mp( 1: success */ static int zip_solve( - mp_limb_t * Acoeffs, + ulong * Acoeffs, n_polyun_t Z, n_polyun_t H, n_polyun_t M, @@ -1465,7 +1465,7 @@ int _fmpz_vec_crt_nmod( flint_bitcnt_t * maxbits_, fmpz * a, fmpz_t am, - mp_limb_t * b, + ulong * b, slong len, nmod_t mod) { @@ -1530,9 +1530,9 @@ int fmpz_mpolyl_gcd_zippel2( n_poly_t Gammacur_sp, Gammainc_sp, Gammacoeff_sp; n_polyun_t Acur_sp, Ainc_sp, Acoeff_sp; n_polyun_t Bcur_sp, Binc_sp, Bcoeff_sp; - mp_limb_t p_sp, sshift_sp, last_unlucky_sshift_plus_1_sp, image_count_sp; - mp_limb_t Gammaeval_sp; - mp_limb_t * alphas_sp; + ulong p_sp, sshift_sp, last_unlucky_sshift_plus_1_sp, image_count_sp; + ulong Gammaeval_sp; + ulong * alphas_sp; n_poly_struct * alpha_caches_sp; /* misc */ n_polyun_t HH, MH, ZH; @@ -1645,7 +1645,7 @@ int fmpz_mpolyl_gcd_zippel2( n_polyun_init(Binc_sp); n_polyun_init(Bcoeff_sp); - alphas_sp = FLINT_ARRAY_ALLOC(nvars, mp_limb_t); + alphas_sp = FLINT_ARRAY_ALLOC(nvars, ulong); alpha_caches_sp = FLINT_ARRAY_ALLOC(3*nvars, n_poly_struct); for (i = 0; i < 3*nvars; i++) n_poly_init(alpha_caches_sp + i); diff --git a/src/fmpz_mpoly_factor/interp.c b/src/fmpz_mpoly_factor/interp.c index 7776d554b8..9e49e03219 100644 --- a/src/fmpz_mpoly_factor/interp.c +++ b/src/fmpz_mpoly_factor/interp.c @@ -128,7 +128,7 @@ void fmpz_mpoly_interp_reduce_p_mpolyn( slong N = mpoly_words_per_exp_sp(A->bits, ctx->minfo); slong offset, shift, k; ulong mask; - mp_limb_t v; + ulong v; fmpz * Acoeff = A->coeffs; ulong * Aexp = A->exps; slong Alen = A->length; diff --git a/src/fmpz_mpoly_factor/irred_zippel.c b/src/fmpz_mpoly_factor/irred_zippel.c index cad749337f..6de860663e 100644 --- a/src/fmpz_mpoly_factor/irred_zippel.c +++ b/src/fmpz_mpoly_factor/irred_zippel.c @@ -28,7 +28,7 @@ static void nmod_mpoly_get_eval_helper2( { slong start, Ai, j, k, n; slong e0, e1, EHi; - mp_limb_t * p; + ulong * p; flint_bitcnt_t bits = A->bits; slong Alen = A->length; const ulong * Aexps = A->exps; @@ -81,7 +81,7 @@ static void nmod_mpoly_get_eval_helper2( for (j = 0; j < n; j++) { - mp_limb_t meval = 1; + ulong meval = 1; for (k = 2; k < nvars; k++) { @@ -114,7 +114,7 @@ static slong nmod_mpoly_set_eval_helper_and_zip_form2( { slong start, Bi, j, k, n; slong e0, e1, Hi, EHi; - mp_limb_t * p; + ulong * p; slong zip_length = 0; flint_bitcnt_t bits = B->bits; slong Blen = B->length; @@ -172,7 +172,7 @@ static slong nmod_mpoly_set_eval_helper_and_zip_form2( for (j = 0; j < n; j++) { - mp_limb_t meval = 1; + ulong meval = 1; for (k = 2; k < ctx->minfo->nvars; k++) { @@ -229,7 +229,7 @@ static int _fmpz_mpoly_modpk_update_zip( slong N = mpoly_words_per_exp_sp(A->bits, ctx->minfo); ulong start, mask = (-UWORD(1)) >> (FLINT_BITS - A->bits); n_poly_t c, t; - mp_limb_t * ccoeffs; + ulong * ccoeffs; mpoly_gen_offset_shift_sp(&off, &shift, 0, A->bits, ctx->minfo); @@ -397,8 +397,8 @@ static void n_bpoly_mod_eval_step( nmod_t ctx) { slong i, n, Ai; - mp_limb_t * p; - mp_limb_t c; + ulong * p; + ulong c; ulong e0, e1; slong EHlen = EH->length; @@ -444,7 +444,7 @@ static int fmpz_mfactor_lift_prime_power_zippel( flint_rand_t state, const nmod_mpoly_struct * Bp, const fmpz_mpoly_t A, - const mp_limb_t * FLINT_UNUSED(alphap), + const ulong * FLINT_UNUSED(alphap), const fmpz_mpoly_ctx_t ctx, const nmod_mpoly_ctx_t ctxp, slong L) @@ -503,7 +503,7 @@ static int fmpz_mfactor_lift_prime_power_zippel( /* choose betas */ for (i = 2; i < n; i++) { - mp_limb_t bb = n_urandint(state, ctxp->mod.n - 3) + 2; + ulong bb = n_urandint(state, ctxp->mod.n - 3) + 2; nmod_pow_cache_start(bb, beta_caches + 3*i + 0, beta_caches + 3*i + 1, beta_caches + 3*i + 2); } @@ -683,12 +683,12 @@ int fmpz_mpoly_factor_irred_zippel( fmpz_mpoly_t m, mpow; fmpz_mpolyv_t Alc, lc_divs; fmpz_t q, facBound; - mp_limb_t p; + ulong p; nmod_mpoly_ctx_t ctxp; nmod_mpolyv_t facp, tfacp; nmod_mpolyv_t Aevalp, Alcp; nmod_poly_t Aup; - mp_limb_t * alphap; + ulong * alphap; slong r, L; FLINT_ASSERT(n > 1); @@ -710,7 +710,7 @@ int fmpz_mpoly_factor_irred_zippel( fmpz_poly_init(Au); alpha = _fmpz_vec_init(n); - alphap = (mp_limb_t *) flint_malloc(n*sizeof(mp_limb_t)); + alphap = (ulong *) flint_malloc(n*sizeof(ulong)); degs = (slong *) flint_malloc(2*(n + 1)*sizeof(slong)); tdegs = degs + (n + 1); diff --git a/src/fmpz_mpoly_factor/test/main.c b/src/fmpz_mpoly_factor/test/main.c index 6fdfd947f9..227acd657d 100644 --- a/src/fmpz_mpoly_factor/test/main.c +++ b/src/fmpz_mpoly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/fmpz_mpoly_q.h b/src/fmpz_mpoly_q.h index 77ce05e26a..9abd34f6c8 100644 --- a/src/fmpz_mpoly_q.h +++ b/src/fmpz_mpoly_q.h @@ -118,7 +118,7 @@ int fmpz_mpoly_q_set_str_pretty(fmpz_mpoly_q_t res, const char * s, const char * /* Random generation */ -void fmpz_mpoly_q_randtest(fmpz_mpoly_q_t res, flint_rand_t state, slong length, mp_limb_t coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx); +void fmpz_mpoly_q_randtest(fmpz_mpoly_q_t res, flint_rand_t state, slong length, ulong coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx); /* Comparisons */ diff --git a/src/fmpz_mpoly_q/randtest.c b/src/fmpz_mpoly_q/randtest.c index 1d0f92f7b0..9ad7c12004 100644 --- a/src/fmpz_mpoly_q/randtest.c +++ b/src/fmpz_mpoly_q/randtest.c @@ -13,7 +13,7 @@ void fmpz_mpoly_q_randtest(fmpz_mpoly_q_t res, flint_rand_t state, - slong length, mp_limb_t coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx) + slong length, ulong coeff_bits, slong exp_bound, const fmpz_mpoly_ctx_t ctx) { length = n_randint(state, length + 1); diff --git a/src/fmpz_mpoly_q/test/main.c b/src/fmpz_mpoly_q/test/main.c index 4be9ab02ab..784ee8b9a2 100644 --- a/src/fmpz_mpoly_q/test/main.c +++ b/src/fmpz_mpoly_q/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpz_poly.h b/src/fmpz_poly.h index 1ebd62ba7f..bd484741d3 100644 --- a/src/fmpz_poly.h +++ b/src/fmpz_poly.h @@ -44,7 +44,7 @@ typedef fmpz_poly_powers_precomp_struct fmpz_poly_powers_precomp_t[1]; typedef struct { - mp_limb_t ** jj; /* used by fft_convolution_precache */ + ulong ** jj; /* used by fft_convolution_precache */ slong n; slong len2; slong loglen; @@ -176,9 +176,9 @@ void fmpz_poly_randtest_not_zero(fmpz_poly_t f, flint_rand_t state, void fmpz_poly_randtest_no_real_root(fmpz_poly_t p, flint_rand_t state, slong len, flint_bitcnt_t bits); -void fmpz_poly_randtest_irreducible1(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits); -void fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits); -void fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits); +void fmpz_poly_randtest_irreducible1(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits); +void fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits); +void fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits); /* Getting and setting coefficients ****************************************/ @@ -331,14 +331,14 @@ void _fmpz_poly_scale_2exp(fmpz * pol, slong len, slong k); /* Bit packing *************************************************************/ -void _fmpz_poly_bit_pack(mp_ptr arr, const fmpz * poly, +void _fmpz_poly_bit_pack(nn_ptr arr, const fmpz * poly, slong len, flint_bitcnt_t bit_size, int negate); int _fmpz_poly_bit_unpack(fmpz * poly, slong len, - mp_srcptr arr, flint_bitcnt_t bit_size, int negate); + nn_srcptr arr, flint_bitcnt_t bit_size, int negate); void _fmpz_poly_bit_unpack_unsigned(fmpz * poly, slong len, - mp_srcptr arr, flint_bitcnt_t bit_size); + nn_srcptr arr, flint_bitcnt_t bit_size); void fmpz_poly_bit_pack(fmpz_t f, const fmpz_poly_t poly, flint_bitcnt_t bit_size); @@ -901,11 +901,11 @@ void _fmpz_poly_evaluate_fmpq(fmpz_t rnum, fmpz_t rden, void fmpz_poly_evaluate_fmpq(fmpq_t res, const fmpz_poly_t f, const fmpq_t a); -mp_limb_t _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, - mp_limb_t a, mp_limb_t n, mp_limb_t ninv); +ulong _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, + ulong a, ulong n, ulong ninv); -mp_limb_t fmpz_poly_evaluate_mod(const fmpz_poly_t poly, mp_limb_t a, - mp_limb_t n); +ulong fmpz_poly_evaluate_mod(const fmpz_poly_t poly, ulong a, + ulong n); double _fmpz_poly_evaluate_horner_d(const fmpz * poly, slong n, double d); @@ -1083,12 +1083,12 @@ void fmpz_poly_set_nmod_poly_unsigned(fmpz_poly_t res, const nmod_poly_t poly); void _fmpz_poly_CRT_ui_precomp(fmpz * res, const fmpz * poly1, slong len1, - const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, - mp_limb_t m2inv, fmpz_t m1m2, mp_limb_t c, int sign); + const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, + ulong m2inv, fmpz_t m1m2, ulong c, int sign); void _fmpz_poly_CRT_ui(fmpz * res, const fmpz * poly1, slong len1, - const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, - mp_limb_t m2inv, int sign); + const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, + ulong m2inv, int sign); void fmpz_poly_CRT_ui(fmpz_poly_t res, const fmpz_poly_t poly1, const fmpz_t m1, const nmod_poly_t poly2, @@ -1197,7 +1197,7 @@ void fmpz_poly_CLD_bound(fmpz_t res, const fmpz_poly_t f, slong n); /* Special polynomials */ -void _fmpz_poly_cyclotomic(fmpz * a, ulong n, mp_ptr factors, +void _fmpz_poly_cyclotomic(fmpz * a, ulong n, nn_ptr factors, slong num_factors, ulong phi); void fmpz_poly_cyclotomic(fmpz_poly_t poly, ulong n); diff --git a/src/fmpz_poly/CRT_ui.c b/src/fmpz_poly/CRT_ui.c index afabad24ff..b4f2e0928c 100644 --- a/src/fmpz_poly/CRT_ui.c +++ b/src/fmpz_poly/CRT_ui.c @@ -15,8 +15,8 @@ void _fmpz_poly_CRT_ui_precomp(fmpz * res, const fmpz * poly1, slong len1, - const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, - mp_limb_t m2inv, fmpz_t m1m2, mp_limb_t c, int sign) + const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, + ulong m2inv, fmpz_t m1m2, ulong c, int sign) { slong i; @@ -47,10 +47,10 @@ _fmpz_poly_CRT_ui_precomp(fmpz * res, const fmpz * poly1, slong len1, void _fmpz_poly_CRT_ui(fmpz * res, const fmpz * poly1, slong len1, - const fmpz_t m1, mp_srcptr poly2, slong len2, mp_limb_t m2, - mp_limb_t m2inv, int sign) + const fmpz_t m1, nn_srcptr poly2, slong len2, ulong m2, + ulong m2inv, int sign) { - mp_limb_t c; + ulong c; fmpz_t m1m2; c = fmpz_fdiv_ui(m1, m2); diff --git a/src/fmpz_poly/bit_pack.c b/src/fmpz_poly/bit_pack.c index a5e69ded34..96dc5cf673 100644 --- a/src/fmpz_poly/bit_pack.c +++ b/src/fmpz_poly/bit_pack.c @@ -14,13 +14,13 @@ #include "fmpz_poly.h" void -_fmpz_poly_bit_pack(mp_ptr arr, const fmpz * poly, slong len, +_fmpz_poly_bit_pack(nn_ptr arr, const fmpz * poly, slong len, flint_bitcnt_t bit_size, int negate) { flint_bitcnt_t bits = 0; - mp_size_t limbs = 0; + slong limbs = 0; flint_bitcnt_t b = bit_size % FLINT_BITS; - mp_size_t l = bit_size / FLINT_BITS; + slong l = bit_size / FLINT_BITS; int borrow = 0; slong i; diff --git a/src/fmpz_poly/bit_unpack.c b/src/fmpz_poly/bit_unpack.c index 2e3f347c27..02c489be15 100644 --- a/src/fmpz_poly/bit_unpack.c +++ b/src/fmpz_poly/bit_unpack.c @@ -15,12 +15,12 @@ int _fmpz_poly_bit_unpack(fmpz * poly, slong len, - mp_srcptr arr, flint_bitcnt_t bit_size, int negate) + nn_srcptr arr, flint_bitcnt_t bit_size, int negate) { flint_bitcnt_t bits = 0; - mp_size_t limbs = 0; + slong limbs = 0; flint_bitcnt_t b = bit_size % FLINT_BITS; - mp_size_t l = bit_size / FLINT_BITS; + slong l = bit_size / FLINT_BITS; int borrow = 0; slong i; @@ -43,12 +43,12 @@ _fmpz_poly_bit_unpack(fmpz * poly, slong len, void _fmpz_poly_bit_unpack_unsigned(fmpz * poly, slong len, - mp_srcptr arr, flint_bitcnt_t bit_size) + nn_srcptr arr, flint_bitcnt_t bit_size) { flint_bitcnt_t bits = 0; - mp_size_t limbs = 0; + slong limbs = 0; flint_bitcnt_t b = bit_size % FLINT_BITS; - mp_size_t l = bit_size / FLINT_BITS; + slong l = bit_size / FLINT_BITS; slong i; for (i = 0; i < len; i++) diff --git a/src/fmpz_poly/cyclotomic.c b/src/fmpz_poly/cyclotomic.c index dfd29ba4d9..7f1793269c 100644 --- a/src/fmpz_poly/cyclotomic.c +++ b/src/fmpz_poly/cyclotomic.c @@ -14,7 +14,7 @@ #include "fmpz_poly.h" void -_fmpz_poly_cyclotomic(fmpz * a, ulong n, mp_ptr factors, +_fmpz_poly_cyclotomic(fmpz * a, ulong n, nn_ptr factors, slong num_factors, ulong phi) { ulong i; diff --git a/src/fmpz_poly/div_series_basecase.c b/src/fmpz_poly/div_series_basecase.c index ba24004015..23115be6ea 100644 --- a/src/fmpz_poly/div_series_basecase.c +++ b/src/fmpz_poly/div_series_basecase.c @@ -142,7 +142,7 @@ _fmpz_poly_div_series_basecase(fmpz * Q, const fmpz * A, slong Alen, } else if (bits <= 2 * FLINT_BITS - 1) { - mp_limb_t hi, lo, shi, slo; + ulong hi, lo, shi, slo; slong x, y; shi = slo = 0; @@ -160,7 +160,7 @@ _fmpz_poly_div_series_basecase(fmpz * Q, const fmpz * A, slong Alen, } else { - mp_limb_t hi, lo, cy, shh, shi, slo; + ulong hi, lo, cy, shh, shi, slo; slong x, y; shh = shi = slo = 0; diff --git a/src/fmpz_poly/evaluate_mod.c b/src/fmpz_poly/evaluate_mod.c index be8eed1d33..c24a47d9cd 100644 --- a/src/fmpz_poly/evaluate_mod.c +++ b/src/fmpz_poly/evaluate_mod.c @@ -14,10 +14,10 @@ #include "fmpz.h" #include "fmpz_poly.h" -mp_limb_t _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, mp_limb_t a, - mp_limb_t n, mp_limb_t ninv) +ulong _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, ulong a, + ulong n, ulong ninv) { - mp_limb_t c, res = 0; + ulong c, res = 0; while (len--) { @@ -28,21 +28,21 @@ mp_limb_t _fmpz_poly_evaluate_mod(const fmpz * poly, slong len, mp_limb_t a, return res; } -mp_limb_t fmpz_poly_evaluate_mod(const fmpz_poly_t poly, mp_limb_t a, - mp_limb_t n) +ulong fmpz_poly_evaluate_mod(const fmpz_poly_t poly, ulong a, + ulong n) { if (poly->length == 0) return 0; if (a == 0) { - mp_limb_t res; + ulong res; res = fmpz_fdiv_ui(poly->coeffs, n); return res; } else { - mp_limb_t ninv; + ulong ninv; ninv = n_preinvert_limb(n); return _fmpz_poly_evaluate_mod(poly->coeffs, poly->length, a, n, ninv); diff --git a/src/fmpz_poly/gcd_heuristic.c b/src/fmpz_poly/gcd_heuristic.c index a446748696..507219acf9 100644 --- a/src/fmpz_poly/gcd_heuristic.c +++ b/src/fmpz_poly/gcd_heuristic.c @@ -18,7 +18,7 @@ Divide (arrayg, limbsg) by the positive value gc in-place and return the number of limbs written */ -mp_size_t flint_mpn_tdiv_q_fmpz_inplace(mp_ptr arrayg, mp_size_t limbsg, fmpz_t gc) +slong flint_mpn_tdiv_q_fmpz_inplace(nn_ptr arrayg, slong limbsg, fmpz_t gc) { if (fmpz_size(gc) == 1) { @@ -27,10 +27,10 @@ mp_size_t flint_mpn_tdiv_q_fmpz_inplace(mp_ptr arrayg, mp_size_t limbsg, fmpz_t } else { - mp_size_t tlimbs; + slong tlimbs; mpz_ptr mgc = COEFF_TO_PTR(*gc); - mp_ptr temp = flint_malloc(limbsg*sizeof(mp_limb_t)); + nn_ptr temp = flint_malloc(limbsg*sizeof(ulong)); flint_mpn_copyi(temp, arrayg, limbsg); mpn_tdiv_q(arrayg, temp, limbsg, mgc->_mp_d, mgc->_mp_size); @@ -75,7 +75,7 @@ _fmpz_poly_gcd_heuristic(fmpz * res, const fmpz * poly1, slong len1, slong sign1, sign2, glen, qlen, qlen2; fmpz_t ac, bc, d, gc; fmpz * A, * B, * G, * Q, * t; - mp_ptr array1, array2, arrayg, q, temp; + nn_ptr array1, array2, arrayg, q, temp; int divides; fmpz_init(ac); @@ -159,9 +159,9 @@ _fmpz_poly_gcd_heuristic(fmpz * res, const fmpz * poly1, slong len1, /* allocate space to pack into */ limbs1 = (pack_bits*len1 - 1)/FLINT_BITS + 1; limbs2 = (pack_bits*len2 - 1)/FLINT_BITS + 1; - array1 = flint_calloc(limbs1, sizeof(mp_limb_t)); - array2 = flint_calloc(limbs2, sizeof(mp_limb_t)); - arrayg = flint_calloc(limbs2, sizeof(mp_limb_t)); + array1 = flint_calloc(limbs1, sizeof(ulong)); + array2 = flint_calloc(limbs2, sizeof(ulong)); + arrayg = flint_calloc(limbs2, sizeof(ulong)); /* pack first poly and normalise */ sign1 = (slong) fmpz_sgn(A + len1 - 1); @@ -206,8 +206,8 @@ _fmpz_poly_gcd_heuristic(fmpz * res, const fmpz * poly1, slong len1, qlimbs2 = limbs2 - limbsg + 1; qlen2 = FLINT_MIN(len2, (slong) ((qlimbs2 * FLINT_BITS) / pack_bits) + 1); qlimbs = (FLINT_MAX(qlen, qlen2)*pack_bits - 1)/FLINT_BITS + 1; - q = flint_calloc(qlimbs, sizeof(mp_limb_t)); - temp = flint_malloc(limbsg*sizeof(mp_limb_t)); + q = flint_calloc(qlimbs, sizeof(ulong)); + temp = flint_malloc(limbsg*sizeof(ulong)); divides = 0; if (flint_mpn_divides(q, array1, limbs1, arrayg, limbsg, temp)) diff --git a/src/fmpz_poly/gcd_modular.c b/src/fmpz_poly/gcd_modular.c index 9bbc291a35..c33963d7c3 100644 --- a/src/fmpz_poly/gcd_modular.c +++ b/src/fmpz_poly/gcd_modular.c @@ -22,8 +22,8 @@ void _fmpz_poly_gcd_modular(fmpz * res, const fmpz * poly1, slong len1, flint_bitcnt_t bits1, bits2, nb1, nb2, bits_small, pbits, curr_bits = 0, new_bits; fmpz_t ac, bc, hc, d, g, l, eval_A, eval_B, eval_GCD, modulus; fmpz * A, * B, * Q, * lead_A, * lead_B; - mp_ptr a, b, h; - mp_limb_t p, h_inv, g_mod; + nn_ptr a, b, h; + ulong p, h_inv, g_mod; nmod_t mod; slong i, n, n0, unlucky, hlen, bound; int g_pm1; diff --git a/src/fmpz_poly/inv_series.c b/src/fmpz_poly/inv_series.c index 714ae141fb..66b62d9d84 100644 --- a/src/fmpz_poly/inv_series.c +++ b/src/fmpz_poly/inv_series.c @@ -163,7 +163,7 @@ _fmpz_poly_inv_series_basecase(fmpz * Qinv, const fmpz * Q, slong Qlen, slong n) } else if (bits <= 2 * FLINT_BITS - 1) { - mp_limb_t hi, lo, shi, slo; + ulong hi, lo, shi, slo; slong x, y; shi = slo = 0; @@ -184,7 +184,7 @@ _fmpz_poly_inv_series_basecase(fmpz * Qinv, const fmpz * Q, slong Qlen, slong n) } else { - mp_limb_t hi, lo, cy, shh, shi, slo; + ulong hi, lo, cy, shh, shi, slo; slong x, y; shh = shi = slo = 0; diff --git a/src/fmpz_poly/io.c b/src/fmpz_poly/io.c index e8a81546b1..4728c212c2 100644 --- a/src/fmpz_poly/io.c +++ b/src/fmpz_poly/io.c @@ -274,7 +274,7 @@ int fmpz_poly_fread_pretty(FILE *file, fmpz_poly_t poly, char **x) fmpz_poly_zero(poly); if (poly->alloc) - flint_mpn_zero((mp_ptr) poly->coeffs, poly->alloc); + flint_mpn_zero((nn_ptr) poly->coeffs, poly->alloc); i = 0; N = 80; diff --git a/src/fmpz_poly/mul.c b/src/fmpz_poly/mul.c index 94c5677fb9..21512dede7 100644 --- a/src/fmpz_poly/mul.c +++ b/src/fmpz_poly/mul.c @@ -44,13 +44,13 @@ _fmpz_poly_mul_tiny2(fmpz * res, const fmpz * poly1, slong len1, const fmpz * poly2, slong len2) { slong i, j, k, c, d; - mp_limb_t hi, lo; - mp_ptr tmp; + ulong hi, lo; + nn_ptr tmp; TMP_INIT; TMP_START; - tmp = TMP_ALLOC(2 * (len1 + len2 - 1) * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(2 * (len1 + len2 - 1) * sizeof(ulong)); flint_mpn_zero(tmp, 2 * (len1 + len2 - 1)); @@ -81,7 +81,7 @@ _fmpz_poly_mul_tiny2(fmpz * res, const fmpz * poly1, lo = tmp[2 * i]; hi = tmp[2 * i + 1]; - if (((mp_limb_signed_t) hi) >= 0) + if (((slong) hi) >= 0) { fmpz_set_uiui(res + i, hi, lo); } @@ -166,7 +166,7 @@ _fmpz_poly_mul(fmpz * res, const fmpz * poly1, } else { - mp_size_t limbs1, limbs2; + slong limbs1, limbs2; limbs1 = (bits1 + FLINT_BITS - 1) / FLINT_BITS; limbs2 = (bits2 + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/fmpz_poly/mul_KS.c b/src/fmpz_poly/mul_KS.c index 4fc52c49a0..74dabe545c 100644 --- a/src/fmpz_poly/mul_KS.c +++ b/src/fmpz_poly/mul_KS.c @@ -23,7 +23,7 @@ _fmpz_poly_mul_KS(fmpz * res, const fmpz * poly1, slong len1, int neg1, neg2; slong limbs1, limbs2, loglen; slong bits1, bits2, bits; - mp_limb_t *arr1, *arr2, *arr3; + ulong *arr1, *arr2, *arr3; slong sign = 0; FMPZ_VEC_NORM(poly1, len1); @@ -66,19 +66,19 @@ _fmpz_poly_mul_KS(fmpz * res, const fmpz * poly1, slong len1, if (poly1 == poly2) { - arr1 = (mp_limb_t *) flint_calloc(limbs1, sizeof(mp_limb_t)); + arr1 = (ulong *) flint_calloc(limbs1, sizeof(ulong)); arr2 = arr1; _fmpz_poly_bit_pack(arr1, poly1, len1, bits, neg1); } else { - arr1 = (mp_limb_t *) flint_calloc(limbs1 + limbs2, sizeof(mp_limb_t)); + arr1 = (ulong *) flint_calloc(limbs1 + limbs2, sizeof(ulong)); arr2 = arr1 + limbs1; _fmpz_poly_bit_pack(arr1, poly1, len1, bits, neg1); _fmpz_poly_bit_pack(arr2, poly2, len2, bits, neg2); } - arr3 = (mp_limb_t *) flint_malloc((limbs1 + limbs2) * sizeof(mp_limb_t)); + arr3 = (ulong *) flint_malloc((limbs1 + limbs2) * sizeof(ulong)); if (arr1 == arr2 && limbs1 == limbs2) flint_mpn_sqr(arr3, arr1, limbs1); diff --git a/src/fmpz_poly/mulhigh.c b/src/fmpz_poly/mulhigh.c index 465b1fd6cd..0912d79b6d 100644 --- a/src/fmpz_poly/mulhigh.c +++ b/src/fmpz_poly/mulhigh.c @@ -16,9 +16,9 @@ void _fmpz_poly_mulhigh(fmpz * res, const fmpz * poly1, slong len1, const fmpz * poly2, slong len2, slong start) { - mp_size_t limbs1 = _fmpz_vec_max_limbs(poly1, len1); - mp_size_t limbs2 = _fmpz_vec_max_limbs(poly2, len2); - mp_size_t limbsx = FLINT_MAX(limbs1, limbs2); + slong limbs1 = _fmpz_vec_max_limbs(poly1, len1); + slong limbs2 = _fmpz_vec_max_limbs(poly2, len2); + slong limbsx = FLINT_MAX(limbs1, limbs2); if (start < 5) { diff --git a/src/fmpz_poly/mulhigh_n.c b/src/fmpz_poly/mulhigh_n.c index f9bb98b5dc..0c57008118 100644 --- a/src/fmpz_poly/mulhigh_n.c +++ b/src/fmpz_poly/mulhigh_n.c @@ -16,11 +16,11 @@ void fmpz_poly_mulhigh_n(fmpz_poly_t res, const fmpz_poly_t poly1, const fmpz_poly_t poly2, slong n) { - mp_size_t limbs1 = _fmpz_vec_max_limbs(poly1->coeffs, poly1->length); - mp_size_t limbs2 = _fmpz_vec_max_limbs(poly2->coeffs, poly2->length); - mp_size_t len1 = poly1->length; - mp_size_t len2 = poly2->length; - mp_size_t limbsx = FLINT_MAX(limbs1, limbs2); + slong limbs1 = _fmpz_vec_max_limbs(poly1->coeffs, poly1->length); + slong limbs2 = _fmpz_vec_max_limbs(poly2->coeffs, poly2->length); + slong len1 = poly1->length; + slong len2 = poly2->length; + slong limbsx = FLINT_MAX(limbs1, limbs2); if (n == 0) { diff --git a/src/fmpz_poly/mullow.c b/src/fmpz_poly/mullow.c index 5793bf7fc7..dc93d087a8 100644 --- a/src/fmpz_poly/mullow.c +++ b/src/fmpz_poly/mullow.c @@ -44,13 +44,13 @@ _fmpz_poly_mullow_tiny2(fmpz * res, const fmpz * poly1, slong len1, const fmpz * poly2, slong len2, slong n) { slong i, j, k, c, d; - mp_limb_t hi, lo; - mp_ptr tmp; + ulong hi, lo; + nn_ptr tmp; TMP_INIT; TMP_START; - tmp = TMP_ALLOC(2 * n * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(2 * n * sizeof(ulong)); flint_mpn_zero(tmp, 2 * n); @@ -81,7 +81,7 @@ _fmpz_poly_mullow_tiny2(fmpz * res, const fmpz * poly1, lo = tmp[2 * i]; hi = tmp[2 * i + 1]; - if (((mp_limb_signed_t) hi) >= 0) + if (((slong) hi) >= 0) { fmpz_set_uiui(res + i, hi, lo); } @@ -178,7 +178,7 @@ _fmpz_poly_mullow(fmpz * res, const fmpz * poly1, slong len1, } else { - mp_size_t limbs1, limbs2; + slong limbs1, limbs2; limbs1 = (bits1 + FLINT_BITS - 1) / FLINT_BITS; limbs2 = (bits2 + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/fmpz_poly/mullow_KS.c b/src/fmpz_poly/mullow_KS.c index ff34daf7e1..8760583ea9 100644 --- a/src/fmpz_poly/mullow_KS.c +++ b/src/fmpz_poly/mullow_KS.c @@ -22,7 +22,7 @@ _fmpz_poly_mullow_KS(fmpz * res, const fmpz * poly1, slong len1, int neg1, neg2; slong limbs1, limbs2, loglen; slong bits1, bits2, bits; - mp_limb_t *arr1, *arr2, *arr3; + ulong *arr1, *arr2, *arr3; slong sign = 0; len1 = FLINT_MIN(len1, n); @@ -73,19 +73,19 @@ _fmpz_poly_mullow_KS(fmpz * res, const fmpz * poly1, slong len1, if (poly1 == poly2) { - arr1 = (mp_ptr) flint_calloc(limbs1, sizeof(mp_limb_t)); + arr1 = (nn_ptr) flint_calloc(limbs1, sizeof(ulong)); arr2 = arr1; _fmpz_poly_bit_pack(arr1, poly1, len1, bits, neg1); } else { - arr1 = (mp_ptr) flint_calloc(limbs1 + limbs2, sizeof(mp_limb_t)); + arr1 = (nn_ptr) flint_calloc(limbs1 + limbs2, sizeof(ulong)); arr2 = arr1 + limbs1; _fmpz_poly_bit_pack(arr1, poly1, len1, bits, neg1); _fmpz_poly_bit_pack(arr2, poly2, len2, bits, neg2); } - arr3 = (mp_ptr) flint_malloc((limbs1 + limbs2) * sizeof(mp_limb_t)); + arr3 = (nn_ptr) flint_malloc((limbs1 + limbs2) * sizeof(ulong)); if (arr1 == arr2 && limbs1 == limbs2) flint_mpn_sqr(arr3, arr1, limbs1); diff --git a/src/fmpz_poly/mullow_SS.c b/src/fmpz_poly/mullow_SS.c index b6ff6d3532..0257c66bdd 100644 --- a/src/fmpz_poly/mullow_SS.c +++ b/src/fmpz_poly/mullow_SS.c @@ -20,7 +20,7 @@ void _fmpz_poly_mullow_SS(fmpz * output, const fmpz * input1, slong len1, { slong len_out, loglen, loglen2, n; slong output_bits, limbs, size, i; - mp_limb_t * ptr, ** t1, ** t2, ** tt, ** s1, ** ii, ** jj; + ulong * ptr, ** t1, ** t2, ** tt, ** s1, ** ii, ** jj; slong bits1, bits2; ulong size1, size2; int sign = 0; @@ -61,13 +61,13 @@ void _fmpz_poly_mullow_SS(fmpz * output, const fmpz * input1, slong len1, /* allocate space for ffts */ N = flint_get_num_threads(); - ii = flint_malloc((4*(n + n*size) + 5*size*N)*sizeof(mp_limb_t)); - for (i = 0, ptr = (mp_limb_t *) ii + 4*n; i < 4*n; i++, ptr += size) + ii = flint_malloc((4*(n + n*size) + 5*size*N)*sizeof(ulong)); + for (i = 0, ptr = (ulong *) ii + 4*n; i < 4*n; i++, ptr += size) ii[i] = ptr; - t1 = TMP_ALLOC(N*sizeof(mp_limb_t *)); - t2 = TMP_ALLOC(N*sizeof(mp_limb_t *)); - s1 = TMP_ALLOC(N*sizeof(mp_limb_t *)); - tt = TMP_ALLOC(N*sizeof(mp_limb_t *)); + t1 = TMP_ALLOC(N*sizeof(ulong *)); + t2 = TMP_ALLOC(N*sizeof(ulong *)); + s1 = TMP_ALLOC(N*sizeof(ulong *)); + tt = TMP_ALLOC(N*sizeof(ulong *)); t1[0] = ptr; t2[0] = t1[0] + size*N; @@ -84,8 +84,8 @@ void _fmpz_poly_mullow_SS(fmpz * output, const fmpz * input1, slong len1, if (input1 != input2) { - jj = flint_malloc(4*(n + n*size)*sizeof(mp_limb_t)); - for (i = 0, ptr = (mp_limb_t *) jj + 4*n; i < 4*n; i++, ptr += size) + jj = flint_malloc(4*(n + n*size)*sizeof(ulong)); + for (i = 0, ptr = (ulong *) jj + 4*n; i < 4*n; i++, ptr += size) jj[i] = ptr; } else jj = ii; diff --git a/src/fmpz_poly/mullow_SS_precache.c b/src/fmpz_poly/mullow_SS_precache.c index 1be66d9624..d70bedc334 100644 --- a/src/fmpz_poly/mullow_SS_precache.c +++ b/src/fmpz_poly/mullow_SS_precache.c @@ -21,8 +21,8 @@ void fmpz_poly_mul_SS_precache_init(fmpz_poly_mul_precache_t pre, slong i, len_out, loglen2; slong output_bits, size; ulong size1, size2; - mp_limb_t * ptr; - mp_limb_t ** t1, ** t2, ** s1; + ulong * ptr; + ulong ** t1, ** t2, ** s1; int N; pre->len2 = poly2->length; @@ -51,13 +51,13 @@ void fmpz_poly_mul_SS_precache_init(fmpz_poly_mul_precache_t pre, /* allocate space for ffts */ N = flint_get_num_threads(); - pre->jj = (mp_limb_t **) - flint_malloc((4*(pre->n + pre->n*size) + 3*size*N + 3*N)*sizeof(mp_limb_t)); - for (i = 0, ptr = (mp_limb_t *) pre->jj + 4*pre->n; i < 4*pre->n; i++, ptr += size) + pre->jj = (ulong **) + flint_malloc((4*(pre->n + pre->n*size) + 3*size*N + 3*N)*sizeof(ulong)); + for (i = 0, ptr = (ulong *) pre->jj + 4*pre->n; i < 4*pre->n; i++, ptr += size) pre->jj[i] = ptr; - t1 = (mp_limb_t **) ptr; - t2 = (mp_limb_t **) t1 + N; - s1 = (mp_limb_t **) t2 + N; + t1 = (ulong **) ptr; + t2 = (ulong **) t1 + N; + s1 = (ulong **) t2 + N; ptr += 3*N; t1[0] = ptr; @@ -102,8 +102,8 @@ void _fmpz_poly_mullow_SS_precache(fmpz * output, const fmpz * input1, { slong len_out; slong size, i; - mp_limb_t ** ii, ** t1, ** t2, ** s1, ** tt; - mp_limb_t * ptr; + ulong ** ii, ** t1, ** t2, ** s1, ** tt; + ulong * ptr; int N; len_out = FLINT_MAX(len1 + pre->len2 - 1, 2*pre->n + 1); @@ -112,14 +112,14 @@ void _fmpz_poly_mullow_SS_precache(fmpz * output, const fmpz * input1, /* allocate space for ffts */ N = flint_get_num_threads(); - ii = (mp_limb_t **) - flint_malloc((4*(pre->n + pre->n*size) + 5*size*N + 4*N)*sizeof(mp_limb_t)); - for (i = 0, ptr = (mp_limb_t *) ii + 4*pre->n; i < 4*pre->n; i++, ptr += size) + ii = (ulong **) + flint_malloc((4*(pre->n + pre->n*size) + 5*size*N + 4*N)*sizeof(ulong)); + for (i = 0, ptr = (ulong *) ii + 4*pre->n; i < 4*pre->n; i++, ptr += size) ii[i] = ptr; - t1 = (mp_limb_t **) ptr; - t2 = (mp_limb_t **) t1 + N; - s1 = (mp_limb_t **) t2 + N; - tt = (mp_limb_t **) s1 + N; + t1 = (ulong **) ptr; + t2 = (ulong **) t1 + N; + s1 = (ulong **) t2 + N; + tt = (ulong **) s1 + N; ptr += 4*N; t1[0] = ptr; diff --git a/src/fmpz_poly/mullow_karatsuba_n.c b/src/fmpz_poly/mullow_karatsuba_n.c index 1f69108a0b..1db4ec07fe 100644 --- a/src/fmpz_poly/mullow_karatsuba_n.c +++ b/src/fmpz_poly/mullow_karatsuba_n.c @@ -106,7 +106,7 @@ _fmpz_poly_mullow_karatsuba(fmpz * res, const fmpz * poly1, slong len1, copy1 = (fmpz *) flint_malloc(n * sizeof(fmpz)); for (i = 0; i < len1; i++) copy1[i] = poly1[i]; - flint_mpn_zero((mp_ptr) copy1 + len1, n - len1); + flint_mpn_zero((nn_ptr) copy1 + len1, n - len1); clear |= 1; } @@ -117,7 +117,7 @@ _fmpz_poly_mullow_karatsuba(fmpz * res, const fmpz * poly1, slong len1, copy2 = (fmpz *) flint_malloc(n * sizeof(fmpz)); for (i = 0; i < len2; i++) copy2[i] = poly2[i]; - flint_mpn_zero((mp_ptr) copy2 + len2, n - len2); + flint_mpn_zero((nn_ptr) copy2 + len2, n - len2); clear |= 2; } diff --git a/src/fmpz_poly/pow_trunc.c b/src/fmpz_poly/pow_trunc.c index d60c85e06b..4a931d985f 100644 --- a/src/fmpz_poly/pow_trunc.c +++ b/src/fmpz_poly/pow_trunc.c @@ -147,7 +147,7 @@ fmpz_poly_pow_trunc(fmpz_poly_t res, const fmpz_poly_t poly, ulong e, slong n) copy = (fmpz *) flint_malloc(n * sizeof(fmpz)); for (i = 0; i < poly->length; i++) copy[i] = poly->coeffs[i]; - flint_mpn_zero((mp_ptr) copy + poly->length, n - poly->length); + flint_mpn_zero((nn_ptr) copy + poly->length, n - poly->length); clear = 1; } diff --git a/src/fmpz_poly/pseudo_divrem_divconquer.c b/src/fmpz_poly/pseudo_divrem_divconquer.c index 8baa324254..430c2bccdb 100644 --- a/src/fmpz_poly/pseudo_divrem_divconquer.c +++ b/src/fmpz_poly/pseudo_divrem_divconquer.c @@ -46,7 +46,7 @@ __fmpz_poly_pseudo_divrem_divconquer(fmpz * Q, fmpz * R, p1 = (fmpz *) flint_malloc((lenA - n1) * sizeof(fmpz)); { slong i; - flint_mpn_zero((mp_ptr) p1, n2 - 1); + flint_mpn_zero((nn_ptr) p1, n2 - 1); for (i = n2 - 1; i < lenA - n1; i++) p1[i] = (A + n1)[i]; } @@ -122,7 +122,7 @@ __fmpz_poly_pseudo_divrem_divconquer(fmpz * Q, fmpz * R, p1 = (fmpz *) flint_malloc((2 * lenB - 1) * sizeof(fmpz)); { slong i; - flint_mpn_zero((mp_ptr) p1, lenB - 1); + flint_mpn_zero((nn_ptr) p1, lenB - 1); for (i = lenB - 1; i < 2*lenB - 1; i++) p1[i] = (A + shift)[i]; } @@ -191,7 +191,7 @@ __fmpz_poly_pseudo_divrem_divconquer(fmpz * Q, fmpz * R, p1 = (fmpz *) flint_malloc((lenA - 2 * n2) * sizeof(fmpz)); { slong i; - flint_mpn_zero((mp_ptr) p1, n1 - 1); + flint_mpn_zero((nn_ptr) p1, n1 - 1); for (i = n1 - 1; i < lenA - 2 * n2; i++) p1[i] = (A + 2 * n2)[i]; } diff --git a/src/fmpz_poly/randtest.c b/src/fmpz_poly/randtest.c index d291157efd..8593581579 100644 --- a/src/fmpz_poly/randtest.c +++ b/src/fmpz_poly/randtest.c @@ -19,7 +19,7 @@ #include "fmpz_mod_poly.h" void -fmpz_poly_randtest_irreducible1(fmpz_poly_t p, flint_rand_t state, slong len, mp_bitcnt_t bits) +fmpz_poly_randtest_irreducible1(fmpz_poly_t p, flint_rand_t state, slong len, flint_bitcnt_t bits) { slong i; fmpz_t c; @@ -57,7 +57,7 @@ fmpz_poly_randtest_irreducible1(fmpz_poly_t p, flint_rand_t state, slong len, mp } void -fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits) +fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits) { while (1) { @@ -85,7 +85,7 @@ fmpz_poly_randtest_irreducible2(fmpz_poly_t pol, flint_rand_t state, slong len, } void -fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, mp_bitcnt_t bits) +fmpz_poly_randtest_irreducible(fmpz_poly_t pol, flint_rand_t state, slong len, flint_bitcnt_t bits) { if (n_randint(state, 2)) fmpz_poly_randtest_irreducible1(pol, state, len, bits); diff --git a/src/fmpz_poly/realloc.c b/src/fmpz_poly/realloc.c index 072fb6fbbd..74fca8af1f 100644 --- a/src/fmpz_poly/realloc.c +++ b/src/fmpz_poly/realloc.c @@ -28,7 +28,7 @@ fmpz_poly_realloc(fmpz_poly_t poly, slong alloc) poly->coeffs = (fmpz *) flint_realloc(poly->coeffs, alloc * sizeof(fmpz)); if (alloc > poly->alloc) - flint_mpn_zero((mp_ptr) (poly->coeffs + poly->alloc), + flint_mpn_zero((nn_ptr) (poly->coeffs + poly->alloc), alloc - poly->alloc); } else /* Nothing allocated already so do it now */ diff --git a/src/fmpz_poly/resultant_modular.c b/src/fmpz_poly/resultant_modular.c index 09f6ee4e0d..bed8d5f557 100644 --- a/src/fmpz_poly/resultant_modular.c +++ b/src/fmpz_poly/resultant_modular.c @@ -25,8 +25,8 @@ void _fmpz_poly_resultant_modular(fmpz_t res, const fmpz * poly1, slong len1, fmpz_comb_temp_t comb_temp; fmpz_t ac, bc, l, modulus; fmpz * A, * B, * lead_A, * lead_B; - mp_ptr a, b, rarr, parr; - mp_limb_t p; + nn_ptr a, b, rarr, parr; + ulong p; nmod_t mod; /* special case, one of the polys is a constant */ diff --git a/src/fmpz_poly/resultant_modular_div.c b/src/fmpz_poly/resultant_modular_div.c index 670edac201..2b41bb1544 100644 --- a/src/fmpz_poly/resultant_modular_div.c +++ b/src/fmpz_poly/resultant_modular_div.c @@ -27,8 +27,8 @@ void _fmpz_poly_resultant_modular_div(fmpz_t res, fmpz_comb_temp_t comb_temp; fmpz_t ac, bc, l, modulus, div, la, lb; fmpz * A, * B, * lead_A, * lead_B; - mp_ptr a, b, rarr, parr; - mp_limb_t p, d; + nn_ptr a, b, rarr, parr; + ulong p, d; nmod_t mod; if (fmpz_is_zero(divisor)) diff --git a/src/fmpz_poly/sqr.c b/src/fmpz_poly/sqr.c index 4fa13976aa..80fbc473e5 100644 --- a/src/fmpz_poly/sqr.c +++ b/src/fmpz_poly/sqr.c @@ -44,13 +44,13 @@ void _fmpz_poly_sqr_tiny1(fmpz * res, const fmpz * poly, slong len) void _fmpz_poly_sqr_tiny2(fmpz * res, const fmpz * poly, slong len) { slong i, j, k, c, d; - mp_limb_t hi, lo; - mp_ptr tmp; + ulong hi, lo; + nn_ptr tmp; TMP_INIT; TMP_START; - tmp = TMP_ALLOC(2 * (2 * len - 1) * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(2 * (2 * len - 1) * sizeof(ulong)); flint_mpn_zero(tmp, 2 * (2 * len - 1)); @@ -87,7 +87,7 @@ void _fmpz_poly_sqr_tiny2(fmpz * res, const fmpz * poly, slong len) lo = tmp[2 * i]; hi = tmp[2 * i + 1]; - if (((mp_limb_signed_t) hi) >= 0) + if (((slong) hi) >= 0) { fmpz_set_uiui(res + i, hi, lo); } @@ -159,7 +159,7 @@ void _fmpz_poly_sqr(fmpz * res, const fmpz * poly, slong len) } else { - mp_size_t limbs; + slong limbs; limbs = (bits + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/fmpz_poly/sqr_KS.c b/src/fmpz_poly/sqr_KS.c index 08e6d66337..82372874d2 100644 --- a/src/fmpz_poly/sqr_KS.c +++ b/src/fmpz_poly/sqr_KS.c @@ -21,7 +21,7 @@ _fmpz_poly_sqr_KS(fmpz *rop, const fmpz *op, slong len) const slong in_len = len; int neg; slong bits, limbs, loglen; - mp_limb_t *arr, *arr3; + ulong *arr, *arr3; slong sign = 0; FMPZ_VEC_NORM(op, len); @@ -46,11 +46,11 @@ _fmpz_poly_sqr_KS(fmpz *rop, const fmpz *op, slong len) bits = 2 * bits + loglen + sign; limbs = (bits * len - 1) / FLINT_BITS + 1; - arr = (mp_limb_t *) flint_calloc(limbs, sizeof(mp_limb_t)); + arr = (ulong *) flint_calloc(limbs, sizeof(ulong)); _fmpz_poly_bit_pack(arr, op, len, bits, neg); - arr3 = (mp_limb_t *) flint_malloc((2 * limbs) * sizeof(mp_limb_t)); + arr3 = (ulong *) flint_malloc((2 * limbs) * sizeof(ulong)); flint_mpn_sqr(arr3, arr, limbs); diff --git a/src/fmpz_poly/sqrlow.c b/src/fmpz_poly/sqrlow.c index 9a5255434b..2472c60367 100644 --- a/src/fmpz_poly/sqrlow.c +++ b/src/fmpz_poly/sqrlow.c @@ -46,13 +46,13 @@ void _fmpz_poly_sqrlow_tiny1(fmpz * res, const fmpz * poly, slong len, slong n) void _fmpz_poly_sqrlow_tiny2(fmpz * res, const fmpz * poly, slong len, slong n) { slong i, j, k, c, d; - mp_limb_t hi, lo; - mp_ptr tmp; + ulong hi, lo; + nn_ptr tmp; TMP_INIT; TMP_START; - tmp = TMP_ALLOC(2 * n * sizeof(mp_limb_t)); + tmp = TMP_ALLOC(2 * n * sizeof(ulong)); flint_mpn_zero(tmp, 2 * n); @@ -92,7 +92,7 @@ void _fmpz_poly_sqrlow_tiny2(fmpz * res, const fmpz * poly, slong len, slong n) lo = tmp[2 * i]; hi = tmp[2 * i + 1]; - if (((mp_limb_signed_t) hi) >= 0) + if (((slong) hi) >= 0) { fmpz_set_uiui(res + i, hi, lo); } @@ -167,7 +167,7 @@ void _fmpz_poly_sqrlow(fmpz * res, const fmpz * poly, slong len, slong n) } else { - mp_size_t limbs; + slong limbs; limbs = (bits + FLINT_BITS - 1) / FLINT_BITS; diff --git a/src/fmpz_poly/sqrlow_KS.c b/src/fmpz_poly/sqrlow_KS.c index bf5061bfee..29ee60a91d 100644 --- a/src/fmpz_poly/sqrlow_KS.c +++ b/src/fmpz_poly/sqrlow_KS.c @@ -19,7 +19,7 @@ void _fmpz_poly_sqrlow_KS(fmpz * res, const fmpz * poly, slong len, slong n) { int neg; slong bits, limbs, loglen, sign = 0; - mp_limb_t *arr_in, *arr_out; + ulong *arr_in, *arr_out; len = FLINT_MIN(len, n); @@ -50,8 +50,8 @@ void _fmpz_poly_sqrlow_KS(fmpz * res, const fmpz * poly, slong len, slong n) bits = 2 * bits + loglen + sign; limbs = (bits * len - 1) / FLINT_BITS + 1; - arr_in = flint_calloc(limbs, sizeof(mp_limb_t)); - arr_out = flint_malloc((2 * limbs) * sizeof(mp_limb_t)); + arr_in = flint_calloc(limbs, sizeof(ulong)); + arr_out = flint_malloc((2 * limbs) * sizeof(ulong)); _fmpz_poly_bit_pack(arr_in, poly, len, bits, neg); diff --git a/src/fmpz_poly/sqrlow_karatsuba_n.c b/src/fmpz_poly/sqrlow_karatsuba_n.c index 07fc1cf908..98aa620d25 100644 --- a/src/fmpz_poly/sqrlow_karatsuba_n.c +++ b/src/fmpz_poly/sqrlow_karatsuba_n.c @@ -92,7 +92,7 @@ void _fmpz_poly_sqrlow_karatsuba(fmpz * res, const fmpz * poly, slong len, slong copy = flint_malloc(n * sizeof(fmpz)); for (i = 0; i < len; i++) copy[i] = poly[i]; - flint_mpn_zero((mp_ptr) copy + len, n - len); + flint_mpn_zero((nn_ptr) copy + len, n - len); clear = 1; } diff --git a/src/fmpz_poly/sqrt_KS.c b/src/fmpz_poly/sqrt_KS.c index 76a43a145f..9df33b5ebd 100644 --- a/src/fmpz_poly/sqrt_KS.c +++ b/src/fmpz_poly/sqrt_KS.c @@ -22,7 +22,7 @@ _fmpz_poly_sqrt_KS(fmpz *rop, const fmpz *op, slong len) slong i, len2, m, rlimbs; int result = 1; slong bits, bits2, limbs, limbs2, loglen; - mp_limb_t *arr, *arr2, *arr3; + ulong *arr, *arr2, *arr3; /* the degree must be even */ if (len % 2 == 0) @@ -66,14 +66,14 @@ _fmpz_poly_sqrt_KS(fmpz *rop, const fmpz *op, slong len) limbs = (bits * len - 1) / FLINT_BITS + 1; - arr = (mp_limb_t *) flint_calloc(limbs, sizeof(mp_limb_t)); + arr = (ulong *) flint_calloc(limbs, sizeof(ulong)); _fmpz_poly_bit_pack(arr, op, len, bits, 0); limbs2 = (bits * len2 - 1) / FLINT_BITS + 1; - arr2 = (mp_limb_t *) flint_calloc(limbs2, sizeof(mp_limb_t)); + arr2 = (ulong *) flint_calloc(limbs2, sizeof(ulong)); - arr3 = (mp_limb_t *) flint_calloc(limbs, sizeof(mp_limb_t)); + arr3 = (ulong *) flint_calloc(limbs, sizeof(ulong)); while (limbs != 0 && arr[limbs - 1] == 0) limbs--; diff --git a/src/fmpz_poly/taylor_shift_horner.c b/src/fmpz_poly/taylor_shift_horner.c index db945d1a88..3adbb6d05a 100644 --- a/src/fmpz_poly/taylor_shift_horner.c +++ b/src/fmpz_poly/taylor_shift_horner.c @@ -102,11 +102,11 @@ _fmpz_poly_taylor_shift_horner(fmpz * poly, const fmpz_t c, slong n) } else if (n >= 5 && out_bits < 2 * FLINT_BITS) { - mp_ptr t; + nn_ptr t; TMP_INIT; TMP_START; - t = TMP_ALLOC(2 * n * sizeof(mp_limb_t)); + t = TMP_ALLOC(2 * n * sizeof(ulong)); for (i = 0; i < n; i++) fmpz_get_signed_uiui(t + 2*i + 1, t + 2*i + 0, poly + i); @@ -124,12 +124,12 @@ _fmpz_poly_taylor_shift_horner(fmpz * poly, const fmpz_t c, slong n) else if (n >= 3 + (slong) FLINT_BIT_COUNT(bits) && bits <= 100 * FLINT_BITS) { slong B = BLOCK_SIZE, ii, jj, d; - mp_ptr t; + nn_ptr t; TMP_INIT; TMP_START; d = (out_bits + FLINT_BITS - 1) / FLINT_BITS; - t = TMP_ALLOC(d * n * sizeof(mp_limb_t)); + t = TMP_ALLOC(d * n * sizeof(ulong)); for (i = 0; i < n; i++) fmpz_get_signed_ui_array(t + d * i, d, poly + i); diff --git a/src/fmpz_poly/taylor_shift_multi_mod_threaded.c b/src/fmpz_poly/taylor_shift_multi_mod_threaded.c index 65a60180d8..d8e6f96225 100644 --- a/src/fmpz_poly/taylor_shift_multi_mod_threaded.c +++ b/src/fmpz_poly/taylor_shift_multi_mod_threaded.c @@ -21,10 +21,10 @@ typedef struct { fmpz * vec; - mp_ptr * residues; + nn_ptr * residues; slong n0; slong n1; - mp_srcptr primes; + nn_srcptr primes; slong num_primes; int crt; /* reduce if 0, lift if 1 */ } @@ -34,13 +34,13 @@ void _fmpz_vec_multi_mod_ui_worker(void * arg_ptr) { mod_ui_arg_t arg = *((mod_ui_arg_t *) arg_ptr); - mp_ptr tmp; + nn_ptr tmp; slong i, j; fmpz_comb_t comb; fmpz_comb_temp_t comb_temp; - tmp = flint_malloc(sizeof(mp_limb_t) * arg.num_primes); + tmp = flint_malloc(sizeof(ulong) * arg.num_primes); fmpz_comb_init(comb, arg.primes, arg.num_primes); fmpz_comb_temp_init(comb_temp, comb); @@ -66,8 +66,8 @@ _fmpz_vec_multi_mod_ui_worker(void * arg_ptr) } void -_fmpz_vec_multi_mod_ui_threaded(mp_ptr * residues, fmpz * vec, slong len, - mp_srcptr primes, slong num_primes, int crt) +_fmpz_vec_multi_mod_ui_threaded(nn_ptr * residues, fmpz * vec, slong len, + nn_srcptr primes, slong num_primes, int crt) { mod_ui_arg_t * args; slong i, num_threads; @@ -84,7 +84,7 @@ _fmpz_vec_multi_mod_ui_threaded(mp_ptr * residues, fmpz * vec, slong len, args[i].residues = residues; args[i].n0 = (len * i) / (num_threads + 1); args[i].n1 = (len * (i + 1)) / (num_threads + 1); - args[i].primes = (mp_ptr) primes; + args[i].primes = (nn_ptr) primes; args[i].num_primes = num_primes; args[i].crt = crt; } @@ -105,9 +105,9 @@ _fmpz_vec_multi_mod_ui_threaded(mp_ptr * residues, fmpz * vec, slong len, typedef struct { - mp_ptr * residues; + nn_ptr * residues; slong len; - mp_srcptr primes; + nn_srcptr primes; slong num_primes; slong p0; slong p1; @@ -124,7 +124,7 @@ _fmpz_poly_multi_taylor_shift_worker(void * arg_ptr) for (i = arg.p0; i < arg.p1; i++) { nmod_t mod; - mp_limb_t p, cm; + ulong p, cm; p = arg.primes[i]; nmod_init(&mod, p); @@ -134,8 +134,8 @@ _fmpz_poly_multi_taylor_shift_worker(void * arg_ptr) } void -_fmpz_poly_multi_taylor_shift_threaded(mp_ptr * residues, slong len, - const fmpz_t c, mp_srcptr primes, slong num_primes) +_fmpz_poly_multi_taylor_shift_threaded(nn_ptr * residues, slong len, + const fmpz_t c, nn_srcptr primes, slong num_primes) { taylor_shift_arg_t * args; slong i, num_threads; @@ -152,7 +152,7 @@ _fmpz_poly_multi_taylor_shift_threaded(mp_ptr * residues, slong len, args[i].len = len; args[i].p0 = (num_primes * i) / (num_threads + 1); args[i].p1 = (num_primes * (i + 1)) / (num_threads + 1); - args[i].primes = (mp_ptr) primes; + args[i].primes = (nn_ptr) primes; args[i].num_primes = num_primes; args[i].c = (fmpz *) c; } @@ -175,8 +175,8 @@ void _fmpz_poly_taylor_shift_multi_mod(fmpz * poly, const fmpz_t c, slong len) { slong xbits, ybits, num_primes, i; - mp_ptr primes; - mp_ptr * residues; + nn_ptr primes; + nn_ptr * residues; if (len <= 1 || fmpz_is_zero(c)) return; @@ -202,15 +202,15 @@ _fmpz_poly_taylor_shift_multi_mod(fmpz * poly, const fmpz_t c, slong len) /* Use primes greater than 2^(FLINT_BITS-1) */ num_primes = (ybits + (FLINT_BITS - 1) - 1) / (FLINT_BITS - 1); - primes = flint_malloc(sizeof(mp_limb_t) * num_primes); + primes = flint_malloc(sizeof(ulong) * num_primes); primes[0] = n_nextprime(UWORD(1) << (FLINT_BITS - 1), 1); for (i = 1; i < num_primes; i++) primes[i] = n_nextprime(primes[i-1], 1); /* Space for poly reduced modulo the primes */ - residues = flint_malloc(sizeof(mp_ptr) * num_primes); + residues = flint_malloc(sizeof(nn_ptr) * num_primes); for (i = 0; i < num_primes; i++) - residues[i] = flint_malloc(sizeof(mp_limb_t) * len); + residues[i] = flint_malloc(sizeof(ulong) * len); _fmpz_vec_multi_mod_ui_threaded(residues, poly, len, primes, num_primes, 0); diff --git a/src/fmpz_poly/test/main.c b/src/fmpz_poly/test/main.c index 8f2579d9cf..872b444432 100644 --- a/src/fmpz_poly/test/main.c +++ b/src/fmpz_poly/test/main.c @@ -20,8 +20,6 @@ # undef ulong #endif -#include -#include #include #include diff --git a/src/fmpz_poly/test/t-CRT_ui.c b/src/fmpz_poly/test/t-CRT_ui.c index c4c85de765..6ce01f3797 100644 --- a/src/fmpz_poly/test/t-CRT_ui.c +++ b/src/fmpz_poly/test/t-CRT_ui.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_poly_CRT_ui, state) fmpz_t mod; fmpz_poly_t A, B, C; nmod_poly_t Amod; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; length = n_randint(state, 30) + 1; diff --git a/src/fmpz_poly/test/t-CRT_ui_unsigned.c b/src/fmpz_poly/test/t-CRT_ui_unsigned.c index d528cb7b94..81f454f092 100644 --- a/src/fmpz_poly/test/t-CRT_ui_unsigned.c +++ b/src/fmpz_poly/test/t-CRT_ui_unsigned.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(fmpz_poly_CRT_ui_unsigned, state) fmpz_t mod; fmpz_poly_t A, B, C; nmod_poly_t Amod; - mp_limb_t primes[1000]; + ulong primes[1000]; bits = n_randint(state, 500) + 1; length = n_randint(state, 30) + 1; diff --git a/src/fmpz_poly/test/t-bit_pack.c b/src/fmpz_poly/test/t-bit_pack.c index b94bf08830..8c7c02de61 100644 --- a/src/fmpz_poly/test/t-bit_pack.c +++ b/src/fmpz_poly/test/t-bit_pack.c @@ -24,8 +24,8 @@ TEST_FUNCTION_START(fmpz_poly_bit_pack, state) slong length = n_randint(state, 100) + 1; flint_bitcnt_t bits = n_randint(state, 300) + 2; - mp_ptr arr = (mp_ptr) flint_calloc((length * bits - 1) / FLINT_BITS + 1, - sizeof(mp_limb_t)); + nn_ptr arr = (nn_ptr) flint_calloc((length * bits - 1) / FLINT_BITS + 1, + sizeof(ulong)); int negate; fmpz_poly_init(a); @@ -64,8 +64,8 @@ TEST_FUNCTION_START(fmpz_poly_bit_pack, state) slong length = n_randint(state, 100) + 1; flint_bitcnt_t bits = n_randint(state, 300) + 1; - mp_ptr arr = (mp_ptr) flint_calloc((length * bits - 1) / FLINT_BITS + 1, - sizeof(mp_limb_t)); + nn_ptr arr = (nn_ptr) flint_calloc((length * bits - 1) / FLINT_BITS + 1, + sizeof(ulong)); fmpz_poly_init(a); fmpz_poly_init(b); diff --git a/src/fmpz_poly/test/t-cos_minpoly.c b/src/fmpz_poly/test/t-cos_minpoly.c index 4e8156f35c..2e8dbb7785 100644 --- a/src/fmpz_poly/test/t-cos_minpoly.c +++ b/src/fmpz_poly/test/t-cos_minpoly.c @@ -59,7 +59,7 @@ TEST_FUNCTION_START(fmpz_poly_cos_minpoly, state) for (n = 0; testdata[n] != -1; n++) { - mp_limb_t y; + ulong y; fmpz_poly_randtest(p, state, 20, 1 + n_randint(state, 100)); diff --git a/src/fmpz_poly/test/t-evaluate_mod.c b/src/fmpz_poly/test/t-evaluate_mod.c index 29c17c380f..e75efd67bc 100644 --- a/src/fmpz_poly/test/t-evaluate_mod.c +++ b/src/fmpz_poly/test/t-evaluate_mod.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(fmpz_poly_evaluate_mod, state) { fmpz_t b, s; fmpz_poly_t f; - mp_limb_t a, n, r; + ulong a, n, r; fmpz_poly_init(f); fmpz_poly_randtest(f, state, n_randint(state, 10), 20); diff --git a/src/fmpz_poly/test/t-get_nmod_poly.c b/src/fmpz_poly/test/t-get_nmod_poly.c index 404ed184b3..34fd7e9198 100644 --- a/src/fmpz_poly/test/t-get_nmod_poly.c +++ b/src/fmpz_poly/test/t-get_nmod_poly.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(fmpz_poly_get_nmod_poly, state) fmpz_poly_t A; nmod_poly_t M, M2; slong length; - mp_limb_t mod; + ulong mod; length = n_randint(state, 50); diff --git a/src/fmpz_poly/test/t-swinnerton_dyer.c b/src/fmpz_poly/test/t-swinnerton_dyer.c index 837dc8ce35..fbc9a98466 100644 --- a/src/fmpz_poly/test/t-swinnerton_dyer.c +++ b/src/fmpz_poly/test/t-swinnerton_dyer.c @@ -12,7 +12,7 @@ #include "test_helpers.h" #include "fmpz_poly.h" -static const mp_limb_t known_values[] = +static const ulong known_values[] = { UWORD(2147483629), UWORD(1073742093), @@ -30,7 +30,7 @@ static const mp_limb_t known_values[] = TEST_FUNCTION_START(fmpz_poly_swinnerton_dyer, state) { fmpz_poly_t S; - mp_limb_t r; + ulong r; slong n; for (n = 0; n <= 10; n++) diff --git a/src/fmpz_poly/xgcd_modular.c b/src/fmpz_poly/xgcd_modular.c index 16c47f628c..20b14c5dd2 100644 --- a/src/fmpz_poly/xgcd_modular.c +++ b/src/fmpz_poly/xgcd_modular.c @@ -21,10 +21,10 @@ void _fmpz_poly_xgcd_modular(fmpz_t r, fmpz * s, fmpz * t, const fmpz * poly1, slong len1, const fmpz * poly2, slong len2) { - mp_ptr G, S, T, A, B, T1, T2; + nn_ptr G, S, T, A, B, T1, T2; fmpz_t prod; int stabilised = 0, first; - mp_limb_t p; + ulong p; flint_bitcnt_t s_bits = 0, t_bits = 0; /* Compute resultant of input polys */ @@ -55,7 +55,7 @@ void _fmpz_poly_xgcd_modular(fmpz_t r, fmpz * s, fmpz * t, for (;;) { - mp_limb_t R; + ulong R; nmod_t mod; /* Get next prime */ @@ -97,7 +97,7 @@ void _fmpz_poly_xgcd_modular(fmpz_t r, fmpz * s, fmpz * t, if (!stabilised) /* Need to keep computing xgcds mod p */ { - mp_limb_t RGinv; + ulong RGinv; /* Compute xgcd mod p */ _nmod_poly_xgcd(G, S, T, A, len1, B, len2, mod); diff --git a/src/fmpz_poly_factor/factor_cubic.c b/src/fmpz_poly_factor/factor_cubic.c index b57b958b1e..4dd44063a3 100644 --- a/src/fmpz_poly_factor/factor_cubic.c +++ b/src/fmpz_poly_factor/factor_cubic.c @@ -133,7 +133,7 @@ static slong binary_sqrt(fmpz_t z, fmpz_t x, slong p) } -static mp_limb_t fmpz_fdiv_r_2exp_flint_bits(const fmpz_t a) +static ulong fmpz_fdiv_r_2exp_flint_bits(const fmpz_t a) { if (COEFF_IS_MPZ(*a)) { @@ -223,7 +223,7 @@ static slong binary_cubic_lift( { slong n; fmpz_t r2, c, d, t; - mp_limb_t A, B, C, D, INV, R, R2, S, E; + ulong A, B, C, D, INV, R, R2, S, E; /* start with a factorization mod 2^n */ n = 1; @@ -238,7 +238,7 @@ static slong binary_cubic_lift( while (n <= FLINT_BITS/2) { - mp_limb_t mask = (UWORD(1) << n); + ulong mask = (UWORD(1) << n); C = (A - (S - R2*E)) >> n; D = (B - (R*S)) >> n; R += (((D - C*R)*INV) % mask) << n; @@ -353,11 +353,11 @@ static slong binary_cubic_lift_continue( /* return f(0)*...*f(largest_prime - 1) mod prime_product */ -static mp_limb_t eval_product_mod_n( +static ulong eval_product_mod_n( const fmpz_t a, const fmpz_t b, - mp_limb_t prime_product, - mp_limb_t largest_prime) + ulong prime_product, + ulong largest_prime) { nmod_t ctx; ulong A, B, F, G, H, P, i; diff --git a/src/fmpz_poly_factor/factor_zassenhaus.c b/src/fmpz_poly_factor/factor_zassenhaus.c index b0b203e991..f30be998e7 100644 --- a/src/fmpz_poly_factor/factor_zassenhaus.c +++ b/src/fmpz_poly_factor/factor_zassenhaus.c @@ -118,7 +118,7 @@ void _fmpz_poly_factor_zassenhaus(fmpz_poly_factor_t final_fac, { slong i, j; slong r = lenF; - mp_limb_t p = 2; + ulong p = 2; nmod_poly_t d, g, t; nmod_poly_factor_t fac; zassenhaus_prune_t Z; diff --git a/src/fmpz_poly_factor/test/main.c b/src/fmpz_poly_factor/test/main.c index ffa9bec4d1..32821f0337 100644 --- a/src/fmpz_poly_factor/test/main.c +++ b/src/fmpz_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/fmpz_poly_mat/test/main.c b/src/fmpz_poly_mat/test/main.c index 556a04277c..5bb494bf16 100644 --- a/src/fmpz_poly_mat/test/main.c +++ b/src/fmpz_poly_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpz_poly_q/test/main.c b/src/fmpz_poly_q/test/main.c index 74e3095d50..054746400f 100644 --- a/src/fmpz_poly_q/test/main.c +++ b/src/fmpz_poly_q/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpz_types.h b/src/fmpz_types.h index 6df30067dc..fec885dd82 100644 --- a/src/fmpz_types.h +++ b/src/fmpz_types.h @@ -32,7 +32,7 @@ typedef fmpz_factor_struct fmpz_factor_t[1]; typedef struct { - mp_ptr dinv; + nn_ptr dinv; slong n; flint_bitcnt_t norm; } fmpz_preinvn_struct; diff --git a/src/fmpz_vec.h b/src/fmpz_vec.h index a4601f322e..94c294ab66 100644 --- a/src/fmpz_vec.h +++ b/src/fmpz_vec.h @@ -90,7 +90,7 @@ slong _fmpz_vec_max_bits_ref(const fmpz * vec, slong len); void _fmpz_vec_sum_max_bits(slong * sumabs, slong * maxabs, const fmpz * coeffs, slong length); -mp_size_t _fmpz_vec_max_limbs(const fmpz * vec, slong len); +slong _fmpz_vec_max_limbs(const fmpz * vec, slong len); void _fmpz_vec_height(fmpz_t height, const fmpz * vec, slong len); @@ -111,16 +111,16 @@ int _fmpz_vec_read(fmpz ** vec, slong * len); /* Conversions *************************************************************/ void _fmpz_vec_set_nmod_vec(fmpz * res, - mp_srcptr poly, slong len, nmod_t mod); + nn_srcptr poly, slong len, nmod_t mod); -void _fmpz_vec_get_nmod_vec(mp_ptr res, +void _fmpz_vec_get_nmod_vec(nn_ptr res, const fmpz * poly, slong len, nmod_t mod); -void _fmpz_vec_get_fft(mp_limb_t ** coeffs_f, +void _fmpz_vec_get_fft(ulong ** coeffs_f, const fmpz * coeffs_m, slong l, slong length); void _fmpz_vec_set_fft(fmpz * coeffs_m, slong length, - const mp_ptr * coeffs_f, slong limbs, slong sign); + const nn_ptr * coeffs_f, slong limbs, slong sign); slong _fmpz_vec_get_d_vec_2exp(double * appv, const fmpz * vec, slong len); diff --git a/src/fmpz_vec/dot.c b/src/fmpz_vec/dot.c index 51407d81e8..2488cca235 100644 --- a/src/fmpz_vec/dot.c +++ b/src/fmpz_vec/dot.c @@ -79,18 +79,18 @@ _fmpz_vec_dot_general_naive(fmpz_t res, const fmpz_t initial, do { \ if ((an) == 0) \ { \ - FLINT_SWAP(mp_ptr, s, b); \ + FLINT_SWAP(nn_ptr, s, b); \ (sn) = (bn); \ } \ else if ((an) >= (bn)) \ { \ - mp_limb_t __cy; \ + ulong __cy; \ (s)[(an)] = __cy = mpn_add((s), (a), (an), (b), (bn)); \ (sn) = (an) + (__cy != 0); \ } \ else \ { \ - mp_limb_t __cy; \ + ulong __cy; \ (s)[(bn)] = __cy = mpn_add((s), (b), (bn), (a), (an)); \ (sn) = (bn) + (__cy != 0); \ } \ @@ -99,7 +99,7 @@ _fmpz_vec_dot_general_naive(fmpz_t res, const fmpz_t initial, /* (s,sn) = (s,sn) + (a,an) * b. Allows sn == 0 but not an == 0. */ #define MPN_ADDMUL_1(s, sn, a, an, b) \ do { \ - mp_limb_t __cy; \ + ulong __cy; \ if ((sn) >= (an)) \ { \ FLINT_ASSERT((an) != 0); \ @@ -123,7 +123,7 @@ _fmpz_vec_dot_general_naive(fmpz_t res, const fmpz_t initial, FLINT_STATIC_NOINLINE -void _fmpz_set_mpn(fmpz_t res, mp_srcptr x, mp_size_t xn, int neg) +void _fmpz_set_mpn(fmpz_t res, nn_srcptr x, slong xn, int neg) { if (xn <= 1 && x[0] <= COEFF_MAX) { @@ -140,27 +140,27 @@ void _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, const fmpz * a, const fmpz * b, int reverse, slong len) { - mp_limb_t tmp1[INITIAL_ALLOC + 2]; - mp_limb_t tmp2[INITIAL_ALLOC + 2]; - mp_limb_t tmp3[INITIAL_ALLOC + 2]; - mp_size_t alloc = INITIAL_ALLOC; - mp_size_t new_alloc; + ulong tmp1[INITIAL_ALLOC + 2]; + ulong tmp2[INITIAL_ALLOC + 2]; + ulong tmp3[INITIAL_ALLOC + 2]; + slong alloc = INITIAL_ALLOC; + slong new_alloc; /* We maintain separate sums for small terms, large positive terms, and large negative terms, the idea being to have fewer adjustments in the main loop in exchange for some added complexity combining things in the end. Should profile alternative strategies. */ - mp_limb_t s0 = 0, s1 = 0, s2 = 0; - mp_ptr neg = tmp1; - mp_ptr pos = tmp2; - mp_size_t posn = 0, negn = 0; + ulong s0 = 0, s1 = 0, s2 = 0; + nn_ptr neg = tmp1; + nn_ptr pos = tmp2; + slong posn = 0, negn = 0; /* Temporary space for products. */ - mp_ptr t = tmp3; - mp_size_t tn; + nn_ptr t = tmp3; + slong tn; - mp_ptr tmp_heap = NULL; + nn_ptr tmp_heap = NULL; slong i; @@ -199,9 +199,9 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, if (initial != NULL) { fmpz ca; - mp_limb_t atmp; - mp_srcptr ap; - mp_size_t an; + ulong atmp; + nn_srcptr ap; + slong an; int aneg; ca = *initial; @@ -222,7 +222,7 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, { new_alloc = an + 4; - tmp_heap = flint_malloc(3 * (new_alloc + 2) * sizeof(mp_limb_t)); + tmp_heap = flint_malloc(3 * (new_alloc + 2) * sizeof(ulong)); t = tmp_heap; pos = t + (new_alloc + 2); @@ -247,10 +247,10 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, for (i = 0; i < len; i++) { fmpz ca, cb; - mp_limb_t atmp, btmp; - mp_srcptr ap, bp; - mp_size_t an, bn; - mp_limb_t cy; + ulong atmp, btmp; + nn_srcptr ap, bp; + slong an, bn; + ulong cy; int aneg, bneg; ca = a[i]; @@ -263,7 +263,7 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, if (!COEFF_IS_MPZ(ca) && !COEFF_IS_MPZ(cb)) { - mp_limb_t hi, lo; + ulong hi, lo; smul_ppmm(hi, lo, ca, cb); add_sssaaaaaa(s2, s1, s0, s2, s1, s0, FLINT_SIGN_EXT(hi), hi, lo); continue; @@ -275,11 +275,11 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, if (tn > alloc) { - mp_ptr p1, p2, p3; + nn_ptr p1, p2, p3; new_alloc = FLINT_MAX(3 * alloc / 2, tn + 4); - p1 = flint_malloc(3 * (new_alloc + 2) * sizeof(mp_limb_t)); + p1 = flint_malloc(3 * (new_alloc + 2) * sizeof(ulong)); p2 = p1 + (new_alloc + 2); p3 = p2 + (new_alloc + 2); @@ -289,7 +289,7 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, pos = p2; neg = p3; - FLINT_SWAP(mp_ptr, tmp_heap, p1); + FLINT_SWAP(nn_ptr, tmp_heap, p1); if (p1 != NULL) flint_free(p1); @@ -299,13 +299,13 @@ _fmpz_vec_dot_general(fmpz_t res, const fmpz_t initial, int subtract, if (an < bn) { - FLINT_SWAP(mp_srcptr, ap, bp); - FLINT_SWAP(mp_size_t, an, bn); + FLINT_SWAP(nn_srcptr, ap, bp); + FLINT_SWAP(slong, an, bn); } if (bn == 1) { - mp_limb_t b0 = bp[0]; + ulong b0 = bp[0]; if (aneg ^ bneg) MPN_ADDMUL_1(neg, negn, ap, an, b0); diff --git a/src/fmpz_vec/get_fft.c b/src/fmpz_vec/get_fft.c index cf60c1bed5..eafe0ba3f2 100644 --- a/src/fmpz_vec/get_fft.c +++ b/src/fmpz_vec/get_fft.c @@ -15,11 +15,11 @@ #include "fmpz.h" #include "fmpz_vec.h" -static void _fmpz_vec_get_fft_coeff(mp_limb_t ** coeffs_f, +static void _fmpz_vec_get_fft_coeff(ulong ** coeffs_f, const fmpz * coeffs_m, slong l, slong i) { slong size_f = l + 1; - mp_limb_t * coeff; + ulong * coeff; slong size_j, c; int signed_c; c = coeffs_m[i]; @@ -33,10 +33,10 @@ static void _fmpz_vec_get_fft_coeff(mp_limb_t ** coeffs_f, { signed_c = 1; c = -c; - coeff = (mp_limb_t *) &c; + coeff = (ulong *) &c; } else - coeff = (mp_limb_t *) coeffs_m + i; + coeff = (ulong *) coeffs_m + i; } else /* coeff is an mpz_t */ { @@ -64,7 +64,7 @@ static void _fmpz_vec_get_fft_coeff(mp_limb_t ** coeffs_f, typedef struct { - mp_limb_t ** coeffs_f; + ulong ** coeffs_f; const fmpz * coeffs_m; slong limbs; } @@ -76,7 +76,7 @@ worker(slong i, work_t * work) _fmpz_vec_get_fft_coeff(work->coeffs_f, work->coeffs_m, work->limbs, i); } -void _fmpz_vec_get_fft(mp_limb_t ** coeffs_f, +void _fmpz_vec_get_fft(ulong ** coeffs_f, const fmpz * coeffs_m, slong limbs, slong length) { work_t work; diff --git a/src/fmpz_vec/get_nmod_vec.c b/src/fmpz_vec/get_nmod_vec.c index 50d00e8dfb..ca1ca2c968 100644 --- a/src/fmpz_vec/get_nmod_vec.c +++ b/src/fmpz_vec/get_nmod_vec.c @@ -16,7 +16,7 @@ #include "nmod_poly.h" void -_fmpz_vec_get_nmod_vec(mp_ptr res, const fmpz * poly, slong len, nmod_t mod) +_fmpz_vec_get_nmod_vec(nn_ptr res, const fmpz * poly, slong len, nmod_t mod) { slong i; diff --git a/src/fmpz_vec/height_index.c b/src/fmpz_vec/height_index.c index cdc91dc6b3..642ab044d3 100644 --- a/src/fmpz_vec/height_index.c +++ b/src/fmpz_vec/height_index.c @@ -23,7 +23,7 @@ _fmpz_vec_height_index(const fmpz * vec, slong len) else { fmpz c; - mp_srcptr max_d; + nn_srcptr max_d; slong max_mpz_limbs, i, max_i, max_coeff, mpz_limbs; max_coeff = 0; diff --git a/src/fmpz_vec/max_bits.c b/src/fmpz_vec/max_bits.c index f7454a4d1c..0e014043f4 100644 --- a/src/fmpz_vec/max_bits.c +++ b/src/fmpz_vec/max_bits.c @@ -17,8 +17,8 @@ slong _fmpz_vec_max_bits(const fmpz * vec, slong len) { slong i, sign, max_limbs; - mp_limb_t max_limb; - mp_size_t limbs; + ulong max_limb; + slong limbs; sign = 1; max_limb = 0; diff --git a/src/fmpz_vec/max_limbs.c b/src/fmpz_vec/max_limbs.c index ca83fe70d1..2fed677d6a 100644 --- a/src/fmpz_vec/max_limbs.c +++ b/src/fmpz_vec/max_limbs.c @@ -13,11 +13,11 @@ #include "fmpz.h" #include "fmpz_vec.h" -mp_size_t +slong _fmpz_vec_max_limbs(const fmpz * vec, slong len) { slong i; - mp_size_t limbs, max_limbs = 0; + slong limbs, max_limbs = 0; for (i = 0; i < len; i++) { diff --git a/src/fmpz_vec/set_fft.c b/src/fmpz_vec/set_fft.c index 1152b7c4a8..dec51d37c7 100644 --- a/src/fmpz_vec/set_fft.c +++ b/src/fmpz_vec/set_fft.c @@ -16,17 +16,17 @@ #include "fmpz_vec.h" static void _fmpz_vec_set_fft_coeff(fmpz * coeffs_m, slong i, - const mp_ptr * coeffs_f, slong limbs, slong sign) + const nn_ptr * coeffs_f, slong limbs, slong sign) { slong size; - mp_limb_t * data; + ulong * data; mpz_ptr mcoeffs_m; coeffs_m += i; if (sign) { - mp_limb_t halflimb = UWORD(1) << (FLINT_BITS - 1); + ulong halflimb = UWORD(1) << (FLINT_BITS - 1); { mcoeffs_m = _fmpz_promote(coeffs_m); @@ -68,7 +68,7 @@ static void _fmpz_vec_set_fft_coeff(fmpz * coeffs_m, slong i, typedef struct { fmpz * coeffs_m; - const mp_ptr * coeffs_f; + const nn_ptr * coeffs_f; slong limbs; int sign; } @@ -81,7 +81,7 @@ worker(slong i, work_t * work) } void _fmpz_vec_set_fft(fmpz * coeffs_m, slong length, - const mp_ptr * coeffs_f, slong limbs, slong sign) + const nn_ptr * coeffs_f, slong limbs, slong sign) { work_t work; slong max_threads; diff --git a/src/fmpz_vec/set_nmod_vec.c b/src/fmpz_vec/set_nmod_vec.c index 270fea0613..b4ecb86dd5 100644 --- a/src/fmpz_vec/set_nmod_vec.c +++ b/src/fmpz_vec/set_nmod_vec.c @@ -16,7 +16,7 @@ #include "nmod_poly.h" void -_fmpz_vec_set_nmod_vec(fmpz * res, mp_srcptr poly, slong len, nmod_t mod) +_fmpz_vec_set_nmod_vec(fmpz * res, nn_srcptr poly, slong len, nmod_t mod) { slong i; diff --git a/src/fmpz_vec/test/main.c b/src/fmpz_vec/test/main.c index cc2b0a010f..2d96f8555a 100644 --- a/src/fmpz_vec/test/main.c +++ b/src/fmpz_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fmpz_vec/test/t-get_set_fft.c b/src/fmpz_vec/test/t-get_set_fft.c index eccb361367..e2de3e3305 100644 --- a/src/fmpz_vec/test/t-get_set_fft.c +++ b/src/fmpz_vec/test/t-get_set_fft.c @@ -25,15 +25,15 @@ TEST_FUNCTION_START(fmpz_vec_get_set_fft, state) fmpz * a, * b; flint_bitcnt_t bits; slong len, limbs; - mp_limb_t ** ii, * ptr; + ulong ** ii, * ptr; slong i, bt; bits = n_randint(state, 300) + 1; len = n_randint(state, 300) + 1; limbs = 2*((bits - 1)/FLINT_BITS + 1); - ii = flint_malloc((len + len*(limbs + 1))*sizeof(mp_limb_t)); - ptr = (mp_limb_t *) ii + len; + ii = flint_malloc((len + len*(limbs + 1))*sizeof(ulong)); + ptr = (ulong *) ii + len; for (i = 0; i < len; i++, ptr += (limbs + 1)) ii[i] = ptr; @@ -71,15 +71,15 @@ TEST_FUNCTION_START(fmpz_vec_get_set_fft, state) fmpz * a, * b; flint_bitcnt_t bits; slong len, limbs; - mp_limb_t ** ii, * ptr; + ulong ** ii, * ptr; slong i, bt; bits = n_randint(state, 300) + 1; len = n_randint(state, 300) + 1; limbs = 2*((bits - 1)/FLINT_BITS + 1); - ii = flint_malloc((len + len*(limbs + 1))*sizeof(mp_limb_t)); - ptr = (mp_limb_t *) ii + len; + ii = flint_malloc((len + len*(limbs + 1))*sizeof(ulong)); + ptr = (ulong *) ii + len; for (i = 0; i < len; i++, ptr += (limbs + 1)) ii[i] = ptr; diff --git a/src/fmpz_vec/test/t-get_set_nmod_vec.c b/src/fmpz_vec/test/t-get_set_nmod_vec.c index 3fb3fa85f6..b2cf09b8bf 100644 --- a/src/fmpz_vec/test/t-get_set_nmod_vec.c +++ b/src/fmpz_vec/test/t-get_set_nmod_vec.c @@ -24,13 +24,13 @@ TEST_FUNCTION_START(fmpz_vec_get_set_nmod_vec, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { fmpz *a, *b; - mp_ptr c; + nn_ptr c; nmod_t mod; slong i; - mp_limb_t t; + ulong t; slong len = n_randint(state, 100); - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); a = _fmpz_vec_init(len); b = _fmpz_vec_init(len); diff --git a/src/fmpz_vec/test/t-max_limbs.c b/src/fmpz_vec/test/t-max_limbs.c index d0dfa7c72a..093ff64d66 100644 --- a/src/fmpz_vec/test/t-max_limbs.c +++ b/src/fmpz_vec/test/t-max_limbs.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(fmpz_vec_max_limbs, state) { fmpz *a; slong len, bits; - mp_size_t limbs, limbs2; + slong limbs, limbs2; len = n_randint(state, 100); diff --git a/src/fmpzi.h b/src/fmpzi.h index ca066bf55d..f8415f8cd4 100644 --- a/src/fmpzi.h +++ b/src/fmpzi.h @@ -92,7 +92,7 @@ fmpzi_set_si_si(fmpzi_t res, slong a, slong b) } FMPZI_INLINE void -fmpzi_randtest(fmpzi_t res, flint_rand_t state, mp_bitcnt_t bits) +fmpzi_randtest(fmpzi_t res, flint_rand_t state, flint_bitcnt_t bits) { fmpz_randtest(fmpzi_realref(res), state, bits); fmpz_randtest(fmpzi_imagref(res), state, bits); diff --git a/src/fmpzi/test/main.c b/src/fmpzi/test/main.c index ad5d8cf141..de0e9cc7c5 100644 --- a/src/fmpzi/test/main.c +++ b/src/fmpzi/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fq/test/main.c b/src/fq/test/main.c index b924a8db23..7b9b555acd 100644 --- a/src/fq/test/main.c +++ b/src/fq/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_default/ctx.c b/src/fq_default/ctx.c index 8cffa5175d..778bb3df15 100644 --- a/src/fq_default/ctx.c +++ b/src/fq_default/ctx.c @@ -71,7 +71,7 @@ void fq_default_ctx_init_modulus_type(fq_default_ctx_t ctx, } else if (type == FQ_DEFAULT_NMOD || (type == 0 && d == 1 && fmpz_abs_fits_ui(p))) { - mp_limb_t c0, c1, a; + ulong c0, c1, a; nmod_t mod; nmod_init(&mod, fmpz_get_ui(p)); @@ -126,7 +126,7 @@ void fq_default_ctx_init_modulus_nmod_type(fq_default_ctx_t ctx, } else if (type == FQ_DEFAULT_NMOD || (type == 0 && d == 1)) { - mp_limb_t c0, c1, a; + ulong c0, c1, a; nmod_t mod; nmod_init(&mod, p); @@ -142,7 +142,7 @@ void fq_default_ctx_init_modulus_nmod_type(fq_default_ctx_t ctx, else if (type == FQ_DEFAULT_FMPZ_MOD || (type == 0 && d == 1)) { fmpz_t pp; - mp_limb_t c0, c1, a; + ulong c0, c1, a; c0 = modulus->coeffs[0]; c1 = modulus->coeffs[1]; diff --git a/src/fq_default/test/main.c b/src/fq_default/test/main.c index a459b07fb3..16ec6ae972 100644 --- a/src/fq_default/test/main.c +++ b/src/fq_default/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-ctx_init.c" diff --git a/src/fq_default_mat/test/main.c b/src/fq_default_mat/test/main.c index ce0a1b0bb5..a7d21aee26 100644 --- a/src/fq_default_mat/test/main.c +++ b/src/fq_default_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-init.c" diff --git a/src/fq_default_poly/test/main.c b/src/fq_default_poly/test/main.c index c77a89ea35..1d711b383c 100644 --- a/src/fq_default_poly/test/main.c +++ b/src/fq_default_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-init.c" diff --git a/src/fq_default_poly_factor/test/main.c b/src/fq_default_poly_factor/test/main.c index 3e77de79e8..28f31e42ef 100644 --- a/src/fq_default_poly_factor/test/main.c +++ b/src/fq_default_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-init.c" diff --git a/src/fq_embed/test/main.c b/src/fq_embed/test/main.c index 9761ca58af..db16993ed3 100644 --- a/src/fq_embed/test/main.c +++ b/src/fq_embed/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-composition_matrix.c" diff --git a/src/fq_embed_templates/matrices.c b/src/fq_embed_templates/matrices.c index 3ef7d86b38..a00249b75f 100644 --- a/src/fq_embed_templates/matrices.c +++ b/src/fq_embed_templates/matrices.c @@ -116,9 +116,9 @@ int __fmpz_mod_inv_degree(fmpz_t invd, slong d, const fmpz_t p) } static inline -int __nmod_inv_degree(fmpz_t invd, slong d, mp_limb_t p) +int __nmod_inv_degree(fmpz_t invd, slong d, ulong p) { - mp_limb_t ud = d % p; + ulong ud = d % p; if (!ud) return 0; ud = n_invmod(ud, p); diff --git a/src/fq_mat/test/main.c b/src/fq_mat/test/main.c index 357b77d853..9b3ba2d59b 100644 --- a/src/fq_mat/test/main.c +++ b/src/fq_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fq_nmod.h b/src/fq_nmod.h index 14ea7e2f7f..8335be0730 100644 --- a/src/fq_nmod.h +++ b/src/fq_nmod.h @@ -74,9 +74,9 @@ void fq_nmod_init2(fq_nmod_t rop, const fq_nmod_ctx_t ctx); void fq_nmod_clear(fq_nmod_t rop, const fq_nmod_ctx_t FLINT_UNUSED(ctx)); -void _fq_nmod_sparse_reduce(mp_limb_t *R, slong lenR, const fq_nmod_ctx_t ctx); -void _fq_nmod_dense_reduce(mp_limb_t* R, slong lenR, const fq_nmod_ctx_t ctx); -void _fq_nmod_reduce(mp_limb_t* R, slong lenR, const fq_nmod_ctx_t ctx); +void _fq_nmod_sparse_reduce(ulong *R, slong lenR, const fq_nmod_ctx_t ctx); +void _fq_nmod_dense_reduce(ulong* R, slong lenR, const fq_nmod_ctx_t ctx); +void _fq_nmod_reduce(ulong* R, slong lenR, const fq_nmod_ctx_t ctx); void fq_nmod_reduce(fq_nmod_t rop, const fq_nmod_ctx_t ctx); /* Basic arithmetic **********************************************************/ @@ -107,7 +107,7 @@ void fq_nmod_sqr(fq_nmod_t rop, const fq_nmod_t op, const fq_nmod_ctx_t ctx); void fq_nmod_inv(fq_nmod_t rop, const fq_nmod_t op1, const fq_nmod_ctx_t ctx); -void _fq_nmod_pow(mp_limb_t *rop, const mp_limb_t *op, +void _fq_nmod_pow(ulong *rop, const ulong *op, slong len, const fmpz_t e, const fq_nmod_ctx_t ctx); void fq_nmod_pow(fq_nmod_t rop, const fq_nmod_t op1, @@ -177,17 +177,17 @@ char * fq_nmod_get_str_pretty(const fq_nmod_t op, const fq_nmod_ctx_t ctx); /* Special functions *********************************************************/ -void _fq_nmod_trace(fmpz_t rop, const mp_limb_t *op, slong len, +void _fq_nmod_trace(fmpz_t rop, const ulong *op, slong len, const fq_nmod_ctx_t ctx); void fq_nmod_trace(fmpz_t rop, const fq_nmod_t op, const fq_nmod_ctx_t ctx); -void _fq_nmod_frobenius(mp_limb_t *rop, const mp_limb_t *op, slong len, slong e, +void _fq_nmod_frobenius(ulong *rop, const ulong *op, slong len, slong e, const fq_nmod_ctx_t ctx); void fq_nmod_frobenius(fq_nmod_t rop, const fq_nmod_t op, slong e, const fq_nmod_ctx_t ctx); -void _fq_nmod_norm(fmpz_t rop, const mp_limb_t *op, slong len, +void _fq_nmod_norm(fmpz_t rop, const ulong *op, slong len, const fq_nmod_ctx_t ctx); void fq_nmod_norm(fmpz_t rop, const fq_nmod_t op, const fq_nmod_ctx_t ctx); diff --git a/src/fq_nmod/assignments.c b/src/fq_nmod/assignments.c index 7b9a4b597d..9e79c1176e 100644 --- a/src/fq_nmod/assignments.c +++ b/src/fq_nmod/assignments.c @@ -23,7 +23,7 @@ void fq_nmod_set(fq_nmod_t rop, const fq_nmod_t op, const fq_nmod_ctx_t FLINT_UN void fq_nmod_set_si(fq_nmod_t rop, const slong x, const fq_nmod_ctx_t ctx) { - mp_limb_t rx = x < 0 ? -x : x; + ulong rx = x < 0 ? -x : x; rx = n_mod2_preinv(rx, ctx->mod.n, ctx->mod.ninv); if (x < 0) rx = ctx->mod.n - rx; diff --git a/src/fq_nmod/ctx_init.c b/src/fq_nmod/ctx_init.c index 583a4b5543..06cccbe9b1 100644 --- a/src/fq_nmod/ctx_init.c +++ b/src/fq_nmod/ctx_init.c @@ -90,7 +90,7 @@ fq_nmod_ctx_init_randtest(fq_nmod_ctx_t ctx, flint_rand_t state, int type) if (n_randint(state, 2)) { nmod_poly_t modulus; - mp_limb_t x; + ulong x; nmod_poly_init(modulus, ctx->mod.n); nmod_poly_set(modulus, ctx->modulus); diff --git a/src/fq_nmod/ctx_init_modulus.c b/src/fq_nmod/ctx_init_modulus.c index 9a2f77c249..b9edbfe802 100644 --- a/src/fq_nmod/ctx_init_modulus.c +++ b/src/fq_nmod/ctx_init_modulus.c @@ -21,7 +21,7 @@ fq_nmod_ctx_init_modulus(fq_nmod_ctx_t ctx, const nmod_poly_t modulus, const cha { slong nz; int i, j; - mp_limb_t inv; + ulong inv; ctx->mod.n = modulus->mod.n; ctx->mod.ninv = modulus->mod.ninv; @@ -39,7 +39,7 @@ fq_nmod_ctx_init_modulus(fq_nmod_ctx_t ctx, const nmod_poly_t modulus, const cha ctx->len = nz; ctx->a = _nmod_vec_init(ctx->len); - ctx->j = flint_malloc(ctx->len * sizeof(mp_limb_t)); + ctx->j = flint_malloc(ctx->len * sizeof(ulong)); inv = n_invmod(modulus->coeffs[modulus->length - 1], ctx->mod.n); diff --git a/src/fq_nmod/frobenius.c b/src/fq_nmod/frobenius.c index 0f72b0bb48..1dc60a7f14 100644 --- a/src/fq_nmod/frobenius.c +++ b/src/fq_nmod/frobenius.c @@ -20,7 +20,7 @@ raised to the e-th power, assuming that neither op nor e are zero. */ -void _fq_nmod_frobenius(mp_limb_t *rop, const mp_limb_t *op, slong len, slong e, +void _fq_nmod_frobenius(ulong *rop, const ulong *op, slong len, slong e, const fq_nmod_ctx_t ctx) { const slong d = ctx->j[ctx->len - 1]; @@ -59,7 +59,7 @@ void fq_nmod_frobenius(fq_nmod_t rop, const fq_nmod_t op, slong e, const fq_nmod } else { - mp_limb_t *t; + ulong *t; if (rop == op) { diff --git a/src/fq_nmod/inv.c b/src/fq_nmod/inv.c index eb6ae7550c..c61beff802 100644 --- a/src/fq_nmod/inv.c +++ b/src/fq_nmod/inv.c @@ -16,7 +16,7 @@ #include "nmod_poly.h" #include "fq_nmod.h" -void _fq_nmod_inv(mp_limb_t *rop, const mp_limb_t *op, slong len, +void _fq_nmod_inv(ulong *rop, const ulong *op, slong len, const fq_nmod_ctx_t ctx) { const slong d = fq_nmod_ctx_degree(ctx); @@ -41,7 +41,7 @@ void fq_nmod_inv(fq_nmod_t rop, const fq_nmod_t op, const fq_nmod_ctx_t ctx) else { const slong d = fq_nmod_ctx_degree(ctx); - mp_limb_t *t; + ulong *t; if (rop == op) { diff --git a/src/fq_nmod/norm.c b/src/fq_nmod/norm.c index 23348c44dd..1865674e9f 100644 --- a/src/fq_nmod/norm.c +++ b/src/fq_nmod/norm.c @@ -20,12 +20,12 @@ This computes the norm on $\mathbf{F}_q$. */ -void _fq_nmod_norm(fmpz_t rop2, const mp_limb_t *op, slong len, +void _fq_nmod_norm(fmpz_t rop2, const ulong *op, slong len, const fq_nmod_ctx_t ctx) { const slong d = fq_nmod_ctx_degree(ctx); - mp_limb_t rop; + ulong rop; if (d == 1) { @@ -49,7 +49,7 @@ void _fq_nmod_norm(fmpz_t rop2, const mp_limb_t *op, slong len, */ if (ctx->modulus->coeffs[d] != WORD(1)) { - mp_limb_t f; + ulong f; f = n_powmod2_ui_preinv(ctx->modulus->coeffs[d], len - 1, ctx->mod.n, ctx->mod.ninv); f = n_invmod(f, ctx->mod.n); rop = n_mulmod2_preinv(f, rop, ctx->mod.n, ctx->mod.ninv); diff --git a/src/fq_nmod/pow.c b/src/fq_nmod/pow.c index a092e8be68..1f4062c8a1 100644 --- a/src/fq_nmod/pow.c +++ b/src/fq_nmod/pow.c @@ -16,7 +16,7 @@ #include "fmpz.h" #include "fq_nmod.h" -void _fq_nmod_pow(mp_limb_t *rop, const mp_limb_t *op, slong len, const fmpz_t e, +void _fq_nmod_pow(ulong *rop, const ulong *op, slong len, const fmpz_t e, const fq_nmod_ctx_t ctx) { const slong d = fq_nmod_ctx_degree(ctx); @@ -34,8 +34,8 @@ void _fq_nmod_pow(mp_limb_t *rop, const mp_limb_t *op, slong len, const fmpz_t e else { ulong bit; - mp_limb_t *v = _nmod_vec_init(2 * d - 1); - mp_limb_t *R, *S, *T; + ulong *v = _nmod_vec_init(2 * d - 1); + ulong *R, *S, *T; _nmod_vec_zero(v, 2 * d - 1); _nmod_vec_zero(rop, 2 * d - 1); @@ -133,7 +133,7 @@ void fq_nmod_pow(fq_nmod_t rop, const fq_nmod_t op, const fmpz_t e, const fq_nmo else { const slong d = fq_nmod_ctx_degree(ctx); - mp_limb_t *t; + ulong *t; if (rop == op) { diff --git a/src/fq_nmod/reduce.c b/src/fq_nmod/reduce.c index 7952c8a84f..f9b6b74c51 100644 --- a/src/fq_nmod/reduce.c +++ b/src/fq_nmod/reduce.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" #include "fq_nmod.h" -void _fq_nmod_sparse_reduce(mp_limb_t *R, slong lenR, const fq_nmod_ctx_t ctx) +void _fq_nmod_sparse_reduce(ulong *R, slong lenR, const fq_nmod_ctx_t ctx) { slong i, k; const slong d = ctx->j[ctx->len - 1]; @@ -34,9 +34,9 @@ void _fq_nmod_sparse_reduce(mp_limb_t *R, slong lenR, const fq_nmod_ctx_t ctx) } } -void _fq_nmod_dense_reduce(mp_limb_t* R, slong lenR, const fq_nmod_ctx_t ctx) +void _fq_nmod_dense_reduce(ulong* R, slong lenR, const fq_nmod_ctx_t ctx) { - mp_limb_t *q, *r; + ulong *q, *r; if (lenR < ctx->modulus->length) { @@ -58,7 +58,7 @@ void _fq_nmod_dense_reduce(mp_limb_t* R, slong lenR, const fq_nmod_ctx_t ctx) } -void _fq_nmod_reduce(mp_limb_t* R, slong lenR, const fq_nmod_ctx_t ctx) +void _fq_nmod_reduce(ulong* R, slong lenR, const fq_nmod_ctx_t ctx) { if (ctx->sparse_modulus) _fq_nmod_sparse_reduce(R, lenR, ctx); diff --git a/src/fq_nmod/test/main.c b/src/fq_nmod/test/main.c index e8612d98e3..5268dfc20a 100644 --- a/src/fq_nmod/test/main.c +++ b/src/fq_nmod/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_nmod/trace.c b/src/fq_nmod/trace.c index e87548ea32..8ce0c0f9aa 100644 --- a/src/fq_nmod/trace.c +++ b/src/fq_nmod/trace.c @@ -15,13 +15,13 @@ #include "fmpz.h" #include "fq_nmod.h" -void _fq_nmod_trace(fmpz_t rop2, const mp_limb_t *op, slong len, +void _fq_nmod_trace(fmpz_t rop2, const ulong *op, slong len, const fq_nmod_ctx_t ctx) { const slong d = fq_nmod_ctx_degree(ctx); slong i, l; - mp_limb_t *t, rop; + ulong *t, rop; t = _nmod_vec_init(d); _nmod_vec_zero(t, d); diff --git a/src/fq_nmod_embed/mul_matrix.c b/src/fq_nmod_embed/mul_matrix.c index 249cefda64..e741e14f55 100644 --- a/src/fq_nmod_embed/mul_matrix.c +++ b/src/fq_nmod_embed/mul_matrix.c @@ -20,7 +20,7 @@ void fq_nmod_embed_mul_matrix(nmod_mat_t matrix, slong i, j, len = fq_nmod_ctx_degree(ctx); const nmod_poly_struct *modulus = ctx->modulus; const nmod_t mod = modulus->mod; - mp_limb_t lead; + ulong lead; /* This is usually 1, unless the context is non-monic */ lead = nmod_inv(modulus->coeffs[len], mod); diff --git a/src/fq_nmod_embed/test/main.c b/src/fq_nmod_embed/test/main.c index 8a878da164..660dbf9536 100644 --- a/src/fq_nmod_embed/test/main.c +++ b/src/fq_nmod_embed/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-composition_matrix.c" diff --git a/src/fq_nmod_mat/test/main.c b/src/fq_nmod_mat/test/main.c index 02c235712c..941fd74ad1 100644 --- a/src/fq_nmod_mat/test/main.c +++ b/src/fq_nmod_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fq_nmod_mpoly.h b/src/fq_nmod_mpoly.h index 9e76deb15e..9fd30fc815 100644 --- a/src/fq_nmod_mpoly.h +++ b/src/fq_nmod_mpoly.h @@ -123,20 +123,20 @@ void bad_fq_nmod_embed_sm_to_lg(fq_nmod_t out, void bad_fq_nmod_embed_lg_to_sm(fq_nmod_poly_t out, const fq_nmod_t in, const bad_fq_nmod_embed_t emb); -void bad_n_fq_embed_sm_to_lg(mp_limb_t * out_, const n_poly_t in_, +void bad_n_fq_embed_sm_to_lg(ulong * out_, const n_poly_t in_, const bad_fq_nmod_embed_t emb); void bad_fq_nmod_embed_n_fq_sm_to_fq_nmod_lg(fq_nmod_t out, const n_poly_t in_, const bad_fq_nmod_embed_t emb); -void bad_n_fq_embed_lg_to_sm(n_poly_t out_, const mp_limb_t * in_, +void bad_n_fq_embed_lg_to_sm(n_poly_t out_, const ulong * in_, const bad_fq_nmod_embed_t emb); void bad_fq_nmod_embed_fq_nmod_lg_to_n_fq_sm(n_poly_t out_, const fq_nmod_t in, const bad_fq_nmod_embed_t emb); -void bad_n_fq_embed_sm_elem_to_lg(mp_limb_t * out, - const mp_limb_t * in, const bad_fq_nmod_embed_t emb); +void bad_n_fq_embed_sm_elem_to_lg(ulong * out, + const ulong * in, const bad_fq_nmod_embed_t emb); void bad_fq_nmod_embed_sm_elem_to_lg(fq_nmod_t out, const fq_nmod_t in, const bad_fq_nmod_embed_t emb); @@ -147,7 +147,7 @@ typedef struct bad_fq_nmod_mpoly_embed_chooser slong m; /* degree of the extension F_q / F_p */ slong n; /* degree of the extension F_q^n / F_q */ slong k; /* index of current in embed */ - mp_limb_t p; + ulong p; } bad_fq_nmod_mpoly_embed_chooser_struct; typedef bad_fq_nmod_mpoly_embed_chooser_struct bad_fq_nmod_mpoly_embed_chooser_t[1]; @@ -174,7 +174,7 @@ bad_fq_nmod_embed_struct * bad_fq_nmod_mpoly_embed_chooser_next( /* Context object ************************************************************/ void fq_nmod_mpoly_ctx_init_deg(fq_nmod_mpoly_ctx_t ctx, slong nvars, - const ordering_t ord, mp_limb_t p, slong deg); + const ordering_t ord, ulong p, slong deg); void fq_nmod_mpoly_ctx_init(fq_nmod_mpoly_ctx_t ctx, slong nvars, const ordering_t ord, const fq_nmod_ctx_t fqctx); @@ -241,7 +241,7 @@ void fq_nmod_mpoly_fit_length_reset_bits(fq_nmod_mpoly_t A, FQ_NMOD_MPOLY_INLINE void _fq_nmod_mpoly_fit_length( - mp_limb_t ** coeffs, + ulong ** coeffs, slong * coeffs_alloc, slong d, ulong ** exps, @@ -252,14 +252,14 @@ void _fq_nmod_mpoly_fit_length( if (d*length > *coeffs_alloc) { *coeffs_alloc = FLINT_MAX(d*length, *coeffs_alloc*2); - *coeffs = (mp_limb_t *) flint_realloc(*coeffs, - *coeffs_alloc*sizeof(mp_limb_t)); + *coeffs = (ulong *) flint_realloc(*coeffs, + *coeffs_alloc*sizeof(ulong)); } if (N*length > *exps_alloc) { *exps_alloc = FLINT_MAX(N*length, *exps_alloc*2); - *exps = (mp_limb_t *) flint_realloc(*exps, *exps_alloc*sizeof(ulong)); + *exps = (ulong *) flint_realloc(*exps, *exps_alloc*sizeof(ulong)); } } @@ -322,7 +322,7 @@ void fq_nmod_mpoly_swap(fq_nmod_mpoly_t A, fq_nmod_mpoly_t B, /* Constants *****************************************************************/ FQ_NMOD_MPOLY_INLINE -mp_limb_t * fq_nmod_mpoly_get_nonzero_n_fq(const fq_nmod_mpoly_t A, +ulong * fq_nmod_mpoly_get_nonzero_n_fq(const fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t FLINT_UNUSED(ctx)) { FLINT_ASSERT(A->length == 1); @@ -341,7 +341,7 @@ void fq_nmod_mpoly_set_fq_nmod(fq_nmod_mpoly_t A, const fq_nmod_t c, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_set_n_fq(fq_nmod_mpoly_t A, - const mp_limb_t * c, const fq_nmod_mpoly_ctx_t ctx); + const ulong * c, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_set_ui(fq_nmod_mpoly_t A, ulong c, const fq_nmod_mpoly_ctx_t ctx); @@ -425,7 +425,7 @@ void fq_nmod_mpoly_get_coeff_vars_ui(fq_nmod_mpoly_t C, const fq_nmod_mpoly_t A, const slong * vars, const ulong * exps, slong length, const fq_nmod_mpoly_ctx_t ctx); -FQ_NMOD_MPOLY_INLINE mp_limb_t * _fq_nmod_mpoly_leadcoeff( +FQ_NMOD_MPOLY_INLINE ulong * _fq_nmod_mpoly_leadcoeff( const fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t FLINT_UNUSED(ctx)) { FLINT_ASSERT(A->length > 0); @@ -557,9 +557,9 @@ void fq_nmod_mpoly_randtest_bits(fq_nmod_mpoly_t A, flint_rand_t state, /* Addition/Subtraction ******************************************************/ slong _fq_nmod_mpoly_add( - mp_limb_t * coeff1, ulong * exp1, - mp_limb_t * coeff2, const ulong * exp2, slong len2, - mp_limb_t * coeff3, const ulong * exp3, slong len3, + ulong * coeff1, ulong * exp1, + ulong * coeff2, const ulong * exp2, slong len2, + ulong * coeff3, const ulong * exp3, slong len3, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx); void fq_nmod_mpoly_add_fq_nmod(fq_nmod_mpoly_t A, @@ -567,7 +567,7 @@ void fq_nmod_mpoly_add_fq_nmod(fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_add_n_fq(fq_nmod_mpoly_t A, - const fq_nmod_mpoly_t B, const mp_limb_t * c, + const fq_nmod_mpoly_t B, const ulong * c, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_sub_fq_nmod(fq_nmod_mpoly_t A, @@ -592,7 +592,7 @@ void fq_nmod_mpoly_scalar_mul_fq_nmod(fq_nmod_mpoly_t A, const fq_nmod_mpoly_t B, const fq_nmod_t c, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_scalar_mul_n_fq(fq_nmod_mpoly_t A, - const fq_nmod_mpoly_t B, const mp_limb_t * c, const fq_nmod_mpoly_ctx_t ctx); + const fq_nmod_mpoly_t B, const ulong * c, const fq_nmod_mpoly_ctx_t ctx); void fq_nmod_mpoly_make_monic(fq_nmod_mpoly_t A, const fq_nmod_mpoly_t B, const fq_nmod_mpoly_ctx_t ctx); @@ -614,7 +614,7 @@ void fq_nmod_mpoly_evaluate_one_fq_nmod(fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t ctx); void _fq_nmod_mpoly_eval_all_fq_nmod(fq_nmod_t ev, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, fq_nmod_struct * const * alphas, const mpoly_ctx_t mctx, const fq_nmod_ctx_t fqctx); @@ -657,10 +657,10 @@ void fq_nmod_mpoly_mul_johnson(fq_nmod_mpoly_t poly1, void _fq_nmod_mpoly_mul_johnson( fq_nmod_mpoly_t A, - const mp_limb_t * Bcoeffs, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, flint_bitcnt_t bits, @@ -726,8 +726,8 @@ void fq_nmod_mpoly_divrem_ideal_monagan_pearce( slong len, const fq_nmod_mpoly_ctx_t ctx); int _fq_nmod_mpoly_divides_monagan_pearce(fq_nmod_mpoly_t A, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, flint_bitcnt_t bits, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx); @@ -1069,7 +1069,7 @@ int fq_nmod_mpolyu_gcdm_zippel(fq_nmod_mpolyu_t G, fq_nmod_mpolyu_t Abar, fq_nmod_mpolyu_t Bbar, fq_nmod_mpolyu_t A, fq_nmod_mpolyu_t B, fq_nmod_mpoly_ctx_t ctx, flint_rand_t randstate); -FQ_NMOD_MPOLY_INLINE mp_limb_t * fq_nmod_mpolyu_leadcoeff( +FQ_NMOD_MPOLY_INLINE ulong * fq_nmod_mpolyu_leadcoeff( const fq_nmod_mpolyu_t A, const fq_nmod_mpoly_ctx_t ctx) { FLINT_ASSERT(A->length > 0); @@ -1106,7 +1106,7 @@ void fq_nmod_mpolyn_fit_bits(fq_nmod_mpolyn_t A, slong bits, void fq_nmod_mpolyn_set(fq_nmod_mpolyn_t A, const fq_nmod_mpolyn_t B, const fq_nmod_mpoly_ctx_t ctx); -FQ_NMOD_MPOLY_INLINE mp_limb_t * fq_nmod_mpolyn_leadcoeff( +FQ_NMOD_MPOLY_INLINE ulong * fq_nmod_mpolyn_leadcoeff( fq_nmod_mpolyn_t A, const fq_nmod_mpoly_ctx_t ctx) { /* slong d = fq_nmod_ctx_degree(ctx->fqctx); */ @@ -1350,7 +1350,7 @@ int nmod_mpolyun_interp_crt_lg_mpolyu(slong * lastdeg, int nmod_mpolyn_interp_mcrt_lg_mpoly(slong * lastdeg_, nmod_mpolyn_t H, const nmod_mpoly_ctx_t smctx, const n_poly_t m, - const mp_limb_t * inv_m_eval,fq_nmod_mpoly_t A, + const ulong * inv_m_eval,fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t lgctx); int nmod_mpolyun_interp_mcrt_lg_mpolyu(slong * lastdeg, diff --git a/src/fq_nmod_mpoly/add.c b/src/fq_nmod_mpoly/add.c index a11ef1b334..609a85a5bf 100644 --- a/src/fq_nmod_mpoly/add.c +++ b/src/fq_nmod_mpoly/add.c @@ -15,9 +15,9 @@ #include "fq_nmod_mpoly.h" slong _fq_nmod_mpoly_add( - mp_limb_t * Acoeffs, ulong * Aexps, - mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, + ulong * Acoeffs, ulong * Aexps, + ulong * Bcoeffs, const ulong * Bexps, slong Blen, + ulong * Ccoeffs, const ulong * Cexps, slong Clen, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx) { slong d = fq_nmod_ctx_degree(fqctx); diff --git a/src/fq_nmod_mpoly/add_fq_nmod.c b/src/fq_nmod_mpoly/add_fq_nmod.c index bfdc565756..437cf7df3e 100644 --- a/src/fq_nmod_mpoly/add_fq_nmod.c +++ b/src/fq_nmod_mpoly/add_fq_nmod.c @@ -17,7 +17,7 @@ void fq_nmod_mpoly_add_n_fq( fq_nmod_mpoly_t A, const fq_nmod_mpoly_t B, - const mp_limb_t * c, + const ulong * c, const fq_nmod_mpoly_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx->fqctx); diff --git a/src/fq_nmod_mpoly/cmp.c b/src/fq_nmod_mpoly/cmp.c index 97ea958449..09acc5f449 100644 --- a/src/fq_nmod_mpoly/cmp.c +++ b/src/fq_nmod_mpoly/cmp.c @@ -21,8 +21,8 @@ int fq_nmod_mpoly_cmp( slong d = fq_nmod_ctx_degree(ctx->fqctx); slong i; slong length = A->length; - const mp_limb_t * Acoeffs = A->coeffs; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Acoeffs = A->coeffs; + const ulong * Bcoeffs = B->coeffs; int cmp; if (A->length != B->length) diff --git a/src/fq_nmod_mpoly/compose_fq_nmod_mpoly_horner.c b/src/fq_nmod_mpoly/compose_fq_nmod_mpoly_horner.c index b57a080155..9a80e289f9 100644 --- a/src/fq_nmod_mpoly/compose_fq_nmod_mpoly_horner.c +++ b/src/fq_nmod_mpoly/compose_fq_nmod_mpoly_horner.c @@ -91,7 +91,7 @@ int fq_nmod_mpoly_compose_fq_nmod_mpoly_horner(fq_nmod_mpoly_t A, ulong * counts; slong Blen = B->length; slong * Blist; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; ulong * Bexp = B->exps; flint_bitcnt_t Bbits = B->bits; slong BN = mpoly_words_per_exp(Bbits, ctxB->minfo); diff --git a/src/fq_nmod_mpoly/compose_fq_nmod_poly.c b/src/fq_nmod_mpoly/compose_fq_nmod_poly.c index 6a447a738a..2e2a467439 100644 --- a/src/fq_nmod_mpoly/compose_fq_nmod_poly.c +++ b/src/fq_nmod_mpoly/compose_fq_nmod_poly.c @@ -26,7 +26,7 @@ int _fq_nmod_mpoly_compose_fq_nmod_poly_sp(fq_nmod_poly_t A, const fq_nmod_mpoly slong i, k, N, nvars = ctx->minfo->nvars; slong entries, k_len, shift, off; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; ulong * Bexp = B->exps; slong * degrees; slong * offs; @@ -127,7 +127,7 @@ int _fq_nmod_mpoly_compose_fq_nmod_poly_mp(fq_nmod_poly_t A, const fq_nmod_mpoly slong i, k, N, nvars = ctx->minfo->nvars; slong entries, k_len, off; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; ulong * Bexp = B->exps; fmpz * degrees; slong * offs; diff --git a/src/fq_nmod_mpoly/compose_mat.c b/src/fq_nmod_mpoly/compose_mat.c index 5a57930ff8..e06daab3a4 100644 --- a/src/fq_nmod_mpoly/compose_mat.c +++ b/src/fq_nmod_mpoly/compose_mat.c @@ -33,7 +33,7 @@ void _fq_nmod_mpoly_compose_mat( flint_bitcnt_t Bbits = B->bits; slong BN = mpoly_words_per_exp(Bbits, ctxB->minfo); const ulong * Bexp = B->exps; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; slong AN; FLINT_ASSERT(A != B); diff --git a/src/fq_nmod_mpoly/ctx_init.c b/src/fq_nmod_mpoly/ctx_init.c index cee945fcb9..29717719fa 100644 --- a/src/fq_nmod_mpoly/ctx_init.c +++ b/src/fq_nmod_mpoly/ctx_init.c @@ -14,7 +14,7 @@ #include "fq_nmod_mpoly.h" void fq_nmod_mpoly_ctx_init_deg(fq_nmod_mpoly_ctx_t ctx, slong nvars, - const ordering_t ord, mp_limb_t p, slong deg) + const ordering_t ord, ulong p, slong deg) { mpoly_ctx_init(ctx->minfo, nvars, ord); fq_nmod_ctx_init_ui(ctx->fqctx, p, deg, "#"); diff --git a/src/fq_nmod_mpoly/derivative.c b/src/fq_nmod_mpoly/derivative.c index f5c3f64f0f..b74a556104 100644 --- a/src/fq_nmod_mpoly/derivative.c +++ b/src/fq_nmod_mpoly/derivative.c @@ -16,9 +16,9 @@ #include "fq_nmod_mpoly.h" static slong _fq_nmod_mpoly_derivative( - mp_limb_t * Acoeff, + ulong * Acoeff, ulong * Aexp, - const mp_limb_t * Bcoeff, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, flint_bitcnt_t bits, @@ -52,9 +52,9 @@ static slong _fq_nmod_mpoly_derivative( static slong _fq_nmod_mpoly_derivative_mp( - mp_limb_t * Acoeff, + ulong * Acoeff, ulong * Aexp, - const mp_limb_t * Bcoeff, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, flint_bitcnt_t bits, diff --git a/src/fq_nmod_mpoly/div_monagan_pearce.c b/src/fq_nmod_mpoly/div_monagan_pearce.c index 19bc79de53..c25c83f72e 100644 --- a/src/fq_nmod_mpoly/div_monagan_pearce.c +++ b/src/fq_nmod_mpoly/div_monagan_pearce.c @@ -16,8 +16,8 @@ static int _fq_nmod_mpoly_div_monagan_pearce( fq_nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -31,7 +31,7 @@ static int _fq_nmod_mpoly_div_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; slong Qlen; ulong * exp, * exps; @@ -40,13 +40,13 @@ static int _fq_nmod_mpoly_div_monagan_pearce( ulong mask; slong * hind; int lt_divides; - mp_limb_t * lc_minus_inv, * pp; + ulong * lc_minus_inv, * pp; TMP_INIT; TMP_START; - pp = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + pp = (ulong *) TMP_ALLOC(d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); /* alloc array of heap nodes which can be chained together */ next_loc = Blen + 4; /* something bigger than heap can ever be */ diff --git a/src/fq_nmod_mpoly/divides_monagan_pearce.c b/src/fq_nmod_mpoly/divides_monagan_pearce.c index 21b7229a48..62492d6916 100644 --- a/src/fq_nmod_mpoly/divides_monagan_pearce.c +++ b/src/fq_nmod_mpoly/divides_monagan_pearce.c @@ -18,8 +18,8 @@ static int _fq_nmod_mpoly_divides_monagan_pearce1( fq_nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, slong bits, ulong cmpmask, const fq_nmod_ctx_t fqctx) @@ -32,18 +32,18 @@ static int _fq_nmod_mpoly_divides_monagan_pearce1( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; slong * hind; ulong mask, exp, maxexp = Aexps[Alen - 1]; - mp_limb_t * lc_minus_inv, * t; + ulong * lc_minus_inv, * t; int lazy_size = _n_fq_dot_lazy_size(Blen, fqctx); TMP_INIT; TMP_START; - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); /* alloc array of heap nodes which can be chained together */ next_loc = Blen + 4; /* something bigger than heap can ever be */ @@ -244,8 +244,8 @@ case n: \ int _fq_nmod_mpoly_divides_monagan_pearce( fq_nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -259,13 +259,13 @@ int _fq_nmod_mpoly_divides_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; slong Qlen; ulong * exp, * exps; ulong ** exp_list; slong exp_next; - mp_limb_t * lc_minus_inv, * t; + ulong * lc_minus_inv, * t; int lazy_size = _n_fq_dot_lazy_size(Blen, fqctx); ulong mask; slong * hind; @@ -277,8 +277,8 @@ int _fq_nmod_mpoly_divides_monagan_pearce( TMP_START; - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); next_loc = Blen + 4; /* something bigger than heap can ever be */ heap = (mpoly_heap_s *) TMP_ALLOC((Blen + 1)*sizeof(mpoly_heap_s)); diff --git a/src/fq_nmod_mpoly/divrem_ideal_monagan_pearce.c b/src/fq_nmod_mpoly/divrem_ideal_monagan_pearce.c index b150e4116f..d236cf9fa9 100644 --- a/src/fq_nmod_mpoly/divrem_ideal_monagan_pearce.c +++ b/src/fq_nmod_mpoly/divrem_ideal_monagan_pearce.c @@ -22,7 +22,7 @@ int _fq_nmod_mpoly_divrem_ideal_monagan_pearce( fq_nmod_mpoly_struct ** Q, fq_nmod_mpoly_t R, - const mp_limb_t * poly2, const ulong * exp2, slong len2, + const ulong * poly2, const ulong * exp2, slong len2, fq_nmod_mpoly_struct * const * poly3, ulong * const * exp3, slong len, slong N, flint_bitcnt_t bits, @@ -39,7 +39,7 @@ int _fq_nmod_mpoly_divrem_ideal_monagan_pearce( mpoly_nheap_t ** chains, * chains_ptr; slong ** hinds, * hinds_ptr; mpoly_nheap_t * x; - mp_limb_t * r_coeff = R->coeffs; + ulong * r_coeff = R->coeffs; ulong * r_exp = R->exps; slong r_len; ulong * exp, * exps, * texp; @@ -47,7 +47,7 @@ int _fq_nmod_mpoly_divrem_ideal_monagan_pearce( slong exp_next; ulong mask; slong * q_len, * s; - mp_limb_t * acc, * pp, * lc_minus_inv; + ulong * acc, * pp, * lc_minus_inv; TMP_INIT; TMP_START; @@ -107,9 +107,9 @@ int _fq_nmod_mpoly_divrem_ideal_monagan_pearce( /* precompute leading coeff info */ - pp = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); - acc = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*len*sizeof(mp_limb_t)); + pp = (ulong *) TMP_ALLOC(d*sizeof(ulong)); + acc = (ulong *) TMP_ALLOC(d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*len*sizeof(ulong)); for (w = 0; w < len; w++) { n_fq_inv(lc_minus_inv + d*w, poly3[w]->coeffs + d*0, ctx->fqctx); diff --git a/src/fq_nmod_mpoly/divrem_monagan_pearce.c b/src/fq_nmod_mpoly/divrem_monagan_pearce.c index caa65d080a..3a368e9191 100644 --- a/src/fq_nmod_mpoly/divrem_monagan_pearce.c +++ b/src/fq_nmod_mpoly/divrem_monagan_pearce.c @@ -17,19 +17,19 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce1_binomial( fq_nmod_mpoly_t Q, fq_nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, flint_bitcnt_t bits, ulong maskhi, const fq_nmod_ctx_t fqctx) { slong d = fq_nmod_ctx_degree(fqctx); - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; ulong lexp, mask = mpoly_overflow_mask_sp(bits); - mp_limb_t * tmp, * lc_inv, * mBcoeff1; + ulong * tmp, * lc_inv, * mBcoeff1; int lc_inv_is_one; slong Qlen = 0; slong Rlen = 0; @@ -39,8 +39,8 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce1_binomial( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*(2 + FLINT_MAX(N_FQ_MUL_ITCH, - N_FQ_INV_ITCH))*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*(2 + FLINT_MAX(N_FQ_MUL_ITCH, + N_FQ_INV_ITCH))*sizeof(ulong)); lc_inv = tmp + d*FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_INV_ITCH); mBcoeff1 = lc_inv + d; @@ -155,8 +155,8 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce1_binomial( static int _fq_nmod_mpoly_divrem_monagan_pearce1( fq_nmod_mpoly_t Q, fq_nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, ulong maskhi, const fq_nmod_ctx_t ctx) @@ -168,21 +168,21 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce1( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; slong * hind; ulong mask, exp; int lt_divides; - mp_limb_t * lc_minus_inv, * t; + ulong * lc_minus_inv, * t; int lazy_size = _n_fq_dot_lazy_size(Blen, ctx); TMP_INIT; TMP_START; - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); /* alloc array of heap nodes which can be chained together */ next_loc = Blen + 4; /* something bigger than heap can ever be */ @@ -442,8 +442,8 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce1( static int _fq_nmod_mpoly_divrem_monagan_pearce( fq_nmod_mpoly_t Q, fq_nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -457,8 +457,8 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; slong Qlen; @@ -469,7 +469,7 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce( ulong mask; slong * hind; int lt_divides; - mp_limb_t * lc_minus_inv, * pp; + ulong * lc_minus_inv, * pp; TMP_INIT; if (N == 1) @@ -484,8 +484,8 @@ static int _fq_nmod_mpoly_divrem_monagan_pearce( TMP_START; - pp = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + pp = (ulong *) TMP_ALLOC(d*sizeof(ulong)); + lc_minus_inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); /* alloc array of heap nodes which can be chained together */ next_loc = Blen + 4; /* something bigger than heap can ever be */ diff --git a/src/fq_nmod_mpoly/evaluate_all.c b/src/fq_nmod_mpoly/evaluate_all.c index 5cf8a94171..a7a6d034c4 100644 --- a/src/fq_nmod_mpoly/evaluate_all.c +++ b/src/fq_nmod_mpoly/evaluate_all.c @@ -18,7 +18,7 @@ void _fq_nmod_mpoly_eval_all_fq_nmod( fq_nmod_t eval, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, @@ -35,14 +35,14 @@ void _fq_nmod_mpoly_eval_all_fq_nmod( fmpz_t varexp_mp; slong * offsets, * shifts; n_poly_struct * caches; - mp_limb_t * t; + ulong * t; TMP_INIT; TMP_START; fmpz_init(varexp_mp); - t = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(d*sizeof(ulong)); caches = (n_poly_struct *) TMP_ALLOC(3*nvars*sizeof(n_poly_struct)); offsets = (slong *) TMP_ALLOC(2*nvars*sizeof(slong)); shifts = offsets + nvars; diff --git a/src/fq_nmod_mpoly/evaluate_one.c b/src/fq_nmod_mpoly/evaluate_one.c index 4e9abd80e6..18ae448f0b 100644 --- a/src/fq_nmod_mpoly/evaluate_one.c +++ b/src/fq_nmod_mpoly/evaluate_one.c @@ -27,11 +27,11 @@ void _fq_nmod_mpoly_evaluate_one_fq_nmod_sp( slong i, N, off, shift; ulong * cmpmask, * one; slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t bits = B->bits; slong Alen; - mp_limb_t * Acoeffs; + ulong * Acoeffs; ulong * Aexps; ulong mask, k; int need_sort = 0, cmp; @@ -115,11 +115,11 @@ static void _fq_nmod_mpoly_evaluate_one_fq_nmod_mp( slong i, N, off; ulong * cmpmask, * one, * tmp; slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t bits = B->bits; slong Alen; - mp_limb_t * Acoeffs; + ulong * Acoeffs; ulong * Aexps; fmpz_t k; int need_sort = 0, cmp; diff --git a/src/fq_nmod_mpoly/fit_length_fit_bits.c b/src/fq_nmod_mpoly/fit_length_fit_bits.c index 6d1d647efd..4de8916832 100644 --- a/src/fq_nmod_mpoly/fit_length_fit_bits.c +++ b/src/fq_nmod_mpoly/fit_length_fit_bits.c @@ -25,7 +25,7 @@ void fq_nmod_mpoly_fit_length_fit_bits( if (d*len > A->coeffs_alloc) { A->coeffs_alloc = FLINT_MAX(d*len, 2*A->coeffs_alloc); - A->coeffs = flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(mp_limb_t)); + A->coeffs = flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); } if (bits > A->bits) diff --git a/src/fq_nmod_mpoly/fq_nmod_embed.c b/src/fq_nmod_mpoly/fq_nmod_embed.c index 63cbcbb03f..d53af70b1d 100644 --- a/src/fq_nmod_mpoly/fq_nmod_embed.c +++ b/src/fq_nmod_mpoly/fq_nmod_embed.c @@ -107,7 +107,7 @@ static void _set_matrices(bad_fq_nmod_embed_t cur) slong n = fq_nmod_ctx_degree(cur->lgctx); slong i; n_fq_poly_t phi_as_n_fq_poly, phi_pow, q; - mp_limb_t ** Mrows = cur->lg_to_sm_mat->rows; + ulong ** Mrows = cur->lg_to_sm_mat->rows; n_fq_poly_init(phi_as_n_fq_poly); n_fq_poly_init(phi_pow); @@ -159,8 +159,8 @@ void bad_fq_nmod_embed_array_init(bad_fq_nmod_embed_struct * emb, nmod_mat_t M, Msol; fq_nmod_t biggen; fmpz_t P; - mp_limb_t lc_inv; - mp_limb_t p = smallctx->modulus->mod.n; + ulong lc_inv; + ulong p = smallctx->modulus->mod.n; slong n, m = nmod_poly_degree(smallctx->modulus); /* n is the degree of the extension */ @@ -346,7 +346,7 @@ void bad_fq_nmod_embed_array_init(bad_fq_nmod_embed_struct * emb, /* just matrix-vector multiplication */ void bad_n_fq_embed_lg_to_sm( n_fq_poly_t out, /* poly over smctx */ - const mp_limb_t * in, /* element of lgctx */ + const ulong * in, /* element of lgctx */ const bad_fq_nmod_embed_t emb) { slong smd = fq_nmod_ctx_degree(emb->smctx); @@ -431,7 +431,7 @@ void bad_fq_nmod_embed_fq_nmod_lg_to_n_fq_sm( /* just matrix-vector multiplication */ void bad_n_fq_embed_sm_to_lg( - mp_limb_t * out, /* element of lgctx */ + ulong * out, /* element of lgctx */ const n_fq_poly_t in, /* poly over smctx */ const bad_fq_nmod_embed_t emb) { @@ -537,8 +537,8 @@ void bad_fq_nmod_embed_n_fq_sm_to_fq_nmod_lg( /**************** convert Fp[theta]/f(theta) to Fp[phi]/g(phi) ***************/ void bad_n_fq_embed_sm_elem_to_lg( - mp_limb_t * out, - const mp_limb_t * in, + ulong * out, + const ulong * in, const bad_fq_nmod_embed_t emb) { slong smd = fq_nmod_ctx_degree(emb->smctx); @@ -584,7 +584,7 @@ bad_fq_nmod_mpoly_embed_chooser_init(bad_fq_nmod_mpoly_embed_chooser_t embc, { nmod_poly_t ext_modulus; fq_nmod_ctx_t ext_fqctx; - mp_limb_t p = ctx->fqctx->modulus->mod.n; + ulong p = ctx->fqctx->modulus->mod.n; slong m = nmod_poly_degree(ctx->fqctx->modulus); slong n; @@ -630,7 +630,7 @@ bad_fq_nmod_mpoly_embed_chooser_next(bad_fq_nmod_mpoly_embed_chooser_t embc, { nmod_poly_t ext_modulus; fq_nmod_ctx_t ext_fqctx; - mp_limb_t p = embc->p; + ulong p = embc->p; slong m = embc->m; slong n = embc->n; diff --git a/src/fq_nmod_mpoly/gcd.c b/src/fq_nmod_mpoly/gcd.c index 392a91ec08..3c8c499b38 100644 --- a/src/fq_nmod_mpoly/gcd.c +++ b/src/fq_nmod_mpoly/gcd.c @@ -50,8 +50,8 @@ void fq_nmod_mpoly_evals( ulong varexp; slong total_degree, lo, hi; n_poly_struct * caches = FLINT_ARRAY_ALLOC(3*nvars, n_poly_struct); - mp_limb_t * t = FLINT_ARRAY_ALLOC(2*d, mp_limb_t); - mp_limb_t * meval = t + d; + ulong * t = FLINT_ARRAY_ALLOC(2*d, ulong); + ulong * meval = t + d; for (j = 0; j < nvars; j++) { @@ -77,7 +77,7 @@ void fq_nmod_mpoly_evals( total_degree = 0; for (i = 0; i < A->length; i++) { - mp_limb_t * s = A->coeffs + d*i; /* source */ + ulong * s = A->coeffs + d*i; /* source */ lo = hi = 0; for (j = 0; j < nvars; j++) @@ -158,8 +158,8 @@ void fq_nmod_mpoly_evals_lgprime( ulong varexp, lo, hi; slong total_degree; n_poly_struct * caches = FLINT_ARRAY_ALLOC(3*nvars, n_poly_struct); - mp_limb_t * t = FLINT_ARRAY_ALLOC(2*lgd, mp_limb_t); - mp_limb_t * meval = t + lgd; + ulong * t = FLINT_ARRAY_ALLOC(2*lgd, ulong); + ulong * meval = t + lgd; for (j = 0; j < nvars; j++) { @@ -635,7 +635,7 @@ static int _try_monomial_cofactors( slong NA, NG; slong nvars = ctx->minfo->nvars; fmpz * Abarexps, * Bbarexps, * Texps; - mp_limb_t * tmp, * t1, * t2, * a0, * b0; + ulong * tmp, * t1, * t2, * a0, * b0; fq_nmod_mpoly_t T; flint_bitcnt_t Gbits = FLINT_MIN(A->bits, B->bits); flint_bitcnt_t Abarbits = A->bits; @@ -650,8 +650,8 @@ static int _try_monomial_cofactors( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*(4 + FLINT_MAX(N_FQ_MUL_ITCH, - N_FQ_INV_ITCH))*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*(4 + FLINT_MAX(N_FQ_MUL_ITCH, + N_FQ_INV_ITCH))*sizeof(ulong)); t1 = tmp + d*FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_INV_ITCH); t2 = t1 + d; a0 = t2 + d; diff --git a/src/fq_nmod_mpoly/gcd_hensel.c b/src/fq_nmod_mpoly/gcd_hensel.c index ee5ec62cd0..3e4faa442f 100644 --- a/src/fq_nmod_mpoly/gcd_hensel.c +++ b/src/fq_nmod_mpoly/gcd_hensel.c @@ -58,7 +58,7 @@ int fq_nmod_mpolyl_gcd_hensel_smprime( slong Adegx, Bdegx, gdegx; fq_nmod_mpoly_t t1, t2, g, abar, bbar, hbar; flint_rand_t state; - mp_limb_t * tmp, * q; + ulong * tmp, * q; FLINT_ASSERT(n > 0); FLINT_ASSERT(A->length > 0); @@ -70,7 +70,7 @@ int fq_nmod_mpolyl_gcd_hensel_smprime( flint_rand_init(state); - tmp = FLINT_ARRAY_ALLOC(d*(N_FQ_MUL_INV_ITCH + 1), mp_limb_t); + tmp = FLINT_ARRAY_ALLOC(d*(N_FQ_MUL_INV_ITCH + 1), ulong); q = tmp + d*N_FQ_MUL_INV_ITCH; fq_nmod_init(mu1, ctx->fqctx); diff --git a/src/fq_nmod_mpoly/gcd_zippel2.c b/src/fq_nmod_mpoly/gcd_zippel2.c index fbc8d34c38..8b5adc03ac 100644 --- a/src/fq_nmod_mpoly/gcd_zippel2.c +++ b/src/fq_nmod_mpoly/gcd_zippel2.c @@ -37,7 +37,7 @@ void _fq_nmod_mpoly_monomial_evals_cache( slong N = mpoly_words_per_exp_sp(Abits, ctx->minfo); slong * off, * shift; n_poly_struct * caches; - mp_limb_t * c; + ulong * c; slong num = stop - start; FLINT_ASSERT(Abits <= FLINT_BITS); @@ -102,7 +102,7 @@ void _fq_nmod_mpoly_monomial_evals2_cache( slong N = mpoly_words_per_exp_sp(Abits, ctx->minfo); slong * off, * shift; n_fq_poly_struct * caches; - mp_limb_t * c; + ulong * c; FLINT_ASSERT(Abits <= FLINT_BITS); FLINT_ASSERT(Alen > 0); @@ -209,7 +209,7 @@ void n_fq_bpoly_eval_step_sep( slong d = fq_nmod_ctx_degree(ctx); slong i, Ai; slong e0, e1; - mp_limb_t * c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c = FLINT_ARRAY_ALLOC(d, ulong); n_bpoly_zero(E); @@ -237,7 +237,7 @@ void n_fq_bpoly_eval_step_sep( } static void n_fq_poly_eval_step_sep( - mp_limb_t * res, + ulong * res, n_fq_poly_t cur, const n_fq_poly_t inc, const fq_nmod_mpoly_t A, @@ -259,7 +259,7 @@ static void n_fq_bpoly_evalp_step_sep( slong d = fq_nmod_ctx_degree(ctx); slong i, Ai; slong e0, e1; - mp_limb_t * c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c = FLINT_ARRAY_ALLOC(d, ulong); n_bpoly_zero(E); @@ -288,7 +288,7 @@ static void n_fq_bpoly_evalp_step_sep( static void n_fq_poly_evalp_step_sep( - mp_limb_t * res, + ulong * res, n_poly_t cur, const n_poly_t inc, const fq_nmod_mpoly_t A, @@ -321,7 +321,7 @@ static void fq_nmod_mpoly_monomial_evals2( static void fq_nmod_mpoly_monomial_evalsp2( n_polyun_t E, const fq_nmod_mpoly_t A, - const mp_limb_t * betas, + const ulong * betas, slong m, const fq_nmod_mpoly_ctx_t ctx) { @@ -332,7 +332,7 @@ static void fq_nmod_mpoly_monomial_evalsp2( static void fq_nmod_mpoly_monomial_evalsp( n_poly_t E, const fq_nmod_mpoly_t A, - const mp_limb_t * betas, + const ulong * betas, slong start, slong stop, const fq_nmod_mpoly_ctx_t ctx) @@ -375,7 +375,7 @@ int n_fq_polyun_zip_solve( if (A->length*d > A->coeffs_alloc) { slong new_alloc = FLINT_MAX(A->coeffs_alloc + A->coeffs_alloc/2, A->length*d); - A->coeffs = (mp_limb_t *) flint_realloc(A->coeffs, new_alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_realloc(A->coeffs, new_alloc*sizeof(ulong)); A->coeffs_alloc = new_alloc; } @@ -432,7 +432,7 @@ static int n_fq_polyun_zip_solvep( if (A->length*d > A->coeffs_alloc) { slong new_alloc = FLINT_MAX(A->coeffs_alloc + A->coeffs_alloc/2, A->length*d); - A->coeffs = FLINT_ARRAY_REALLOC(A->coeffs, new_alloc, mp_limb_t); + A->coeffs = FLINT_ARRAY_REALLOC(A->coeffs, new_alloc, ulong); A->coeffs_alloc = new_alloc; } @@ -574,13 +574,13 @@ int fq_nmod_mpolyl_gcd_zippel_smprime( slong nvars = ctx->minfo->nvars; flint_bitcnt_t bits = A->bits; fq_nmod_struct * alphas, * betas; - mp_limb_t * betasp; + ulong * betasp; flint_rand_t state; fq_nmod_mpoly_t cont; fq_nmod_mpoly_t T, G, Abar, Bbar; n_fq_polyun_t HG, HAbar, HBbar, MG, MAbar, MBbar, ZG, ZAbar, ZBbar; n_fq_bpoly_t Aev, Bev, Gev, Abarev, Bbarev; - const mp_limb_t * gammaev; + const ulong * gammaev; fq_nmod_mpolyn_t Tn, Gn, Abarn, Bbarn; slong lastdeg; slong cur_zip_image, req_zip_images, this_length; @@ -590,7 +590,7 @@ int fq_nmod_mpolyl_gcd_zippel_smprime( fq_nmod_mpoly_struct * Aevals, * Bevals; fq_nmod_mpoly_struct * gammaevals; n_poly_bpoly_stack_t St; - mp_limb_t * c; + ulong * c; fq_nmod_t start_alpha; ulong GdegboundXY, newdegXY, Abideg, Bbideg; slong degxAB, degyAB; @@ -631,7 +631,7 @@ int fq_nmod_mpolyl_gcd_zippel_smprime( FLINT_ASSERT(gammadegs[1] == 0); fq_nmod_init(start_alpha, ctx->fqctx); - c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + c = FLINT_ARRAY_ALLOC(d, ulong); n_fq_polyun_init(HG); n_fq_polyun_init(HAbar); @@ -667,7 +667,7 @@ int fq_nmod_mpolyl_gcd_zippel_smprime( n_poly_stack_init(St->poly_stack); n_bpoly_stack_init(St->bpoly_stack); - betasp = FLINT_ARRAY_ALLOC(nvars, mp_limb_t); + betasp = FLINT_ARRAY_ALLOC(nvars, ulong); betas = FLINT_ARRAY_ALLOC(nvars, fq_nmod_struct); alphas = FLINT_ARRAY_ALLOC(nvars, fq_nmod_struct); for (i = 0; i < nvars; i++) @@ -1344,7 +1344,7 @@ static int newfq_nmod_mpolyn_interp_mcrt_lg_mpoly( fq_nmod_mpolyn_t H, const fq_nmod_mpoly_ctx_t ctx, const n_fq_poly_t m, - const mp_limb_t * inv_m_eval, + const ulong * inv_m_eval, fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t ectx, const bad_fq_nmod_embed_t emb) @@ -1355,7 +1355,7 @@ static int newfq_nmod_mpolyn_interp_mcrt_lg_mpoly( slong N = mpoly_words_per_exp(A->bits, ctx->minfo); #endif int changed = 0; - mp_limb_t * u, * v, * tmp; + ulong * u, * v, * tmp; n_fq_poly_struct * w, * u_sm; n_poly_stack_t St; @@ -1423,7 +1423,7 @@ int fq_nmod_mpolyl_gcd_zippel_lgprime( fq_nmod_mpoly_t T, G, Abar, Bbar; n_fq_polyun_t HG, HAbar, HBbar, MG, MAbar, MBbar, ZG, ZAbar, ZBbar; n_fq_bpoly_t Aev, Bev, Gev, Abarev, Bbarev; - const mp_limb_t * gammaev; + const ulong * gammaev; fq_nmod_mpolyn_t Tn, Gn, Abarn, Bbarn; slong lastdeg; slong cur_zip_image, req_zip_images, this_length; diff --git a/src/fq_nmod_mpoly/get_coeff_vars_ui.c b/src/fq_nmod_mpoly/get_coeff_vars_ui.c index e3373fcd70..74cc0aa93a 100644 --- a/src/fq_nmod_mpoly/get_coeff_vars_ui.c +++ b/src/fq_nmod_mpoly/get_coeff_vars_ui.c @@ -31,7 +31,7 @@ void fq_nmod_mpoly_get_coeff_vars_ui( ulong * uexp; ulong * tmask, * texp; slong nvars = ctx->minfo->nvars; - mp_limb_t * Ccoeffs; + ulong * Ccoeffs; ulong * Cexps; slong Clen; TMP_INIT; diff --git a/src/fq_nmod_mpoly/get_set_is_fq_nmod_poly.c b/src/fq_nmod_mpoly/get_set_is_fq_nmod_poly.c index 86a3c4a518..5cb77985b0 100644 --- a/src/fq_nmod_mpoly/get_set_is_fq_nmod_poly.c +++ b/src/fq_nmod_mpoly/get_set_is_fq_nmod_poly.c @@ -32,7 +32,7 @@ int fq_nmod_mpoly_get_fq_nmod_poly( { slong d = fq_nmod_ctx_degree(ctx->fqctx); slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t Bbits = B->bits; slong i, N = mpoly_words_per_exp(Bbits, ctx->minfo); diff --git a/src/fq_nmod_mpoly/get_str_pretty.c b/src/fq_nmod_mpoly/get_str_pretty.c index bfea85555e..f776f7f6a7 100644 --- a/src/fq_nmod_mpoly/get_str_pretty.c +++ b/src/fq_nmod_mpoly/get_str_pretty.c @@ -19,7 +19,7 @@ #define ALLOC_PER_VAR ((FLINT_BITS+4)/3) char * _fq_nmod_mpoly_get_str_pretty( - const mp_limb_t * coeff, + const ulong * coeff, const ulong * exp, slong len, const char ** x_in, diff --git a/src/fq_nmod_mpoly/init.c b/src/fq_nmod_mpoly/init.c index ccf13d5e80..3a96f1c77e 100644 --- a/src/fq_nmod_mpoly/init.c +++ b/src/fq_nmod_mpoly/init.c @@ -25,7 +25,7 @@ void fq_nmod_mpoly_init3( if (alloc > 0) { A->coeffs_alloc = d*alloc; - A->coeffs = FLINT_ARRAY_ALLOC(A->coeffs_alloc, mp_limb_t); + A->coeffs = FLINT_ARRAY_ALLOC(A->coeffs_alloc, ulong); A->exps_alloc = N*alloc; A->exps = FLINT_ARRAY_ALLOC(A->exps_alloc, ulong); } diff --git a/src/fq_nmod_mpoly/interp.c b/src/fq_nmod_mpoly/interp.c index 11f3f01c2c..682f0b61d6 100644 --- a/src/fq_nmod_mpoly/interp.c +++ b/src/fq_nmod_mpoly/interp.c @@ -29,12 +29,12 @@ static void _n_poly_mul_n_fq( n_poly_t a, const n_poly_t b, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); n_poly_t C; - C->coeffs = (mp_limb_t *) c; + C->coeffs = (ulong *) c; C->length = d; C->alloc = d; _n_poly_normalise(C); @@ -344,9 +344,9 @@ int nmod_mpolyn_interp_crt_lg_bpoly( n_poly_struct * Fcoeffs = F->coeffs; ulong * Texps = T->exps; n_poly_struct * Tcoeffs = T->coeffs; - mp_limb_t * u = FLINT_ARRAY_ALLOC(3*lgd, mp_limb_t); - mp_limb_t * v = u + lgd; - mp_limb_t * inv_m_eval = v + lgd; + ulong * u = FLINT_ARRAY_ALLOC(3*lgd, ulong); + ulong * v = u + lgd; + ulong * inv_m_eval = v + lgd; ulong Fexpi, mask; mask = (-UWORD(1)) >> (FLINT_BITS - F->bits); @@ -950,7 +950,7 @@ static int nmod_mpolyn_interp_crt_lg_mpoly( slong Flen = F->length, Alen = A->length; ulong * Fexp = F->exps, * Aexp = A->exps; ulong * Texp; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; n_poly_struct * Fcoeff = F->coeffs; n_poly_struct * Tcoeff; fq_nmod_t at; @@ -1175,7 +1175,7 @@ int nmod_mpolyn_interp_mcrt_lg_mpoly( nmod_mpolyn_t H, const nmod_mpoly_ctx_t smctx, const n_poly_t m, - const mp_limb_t * inv_m_eval, + const ulong * inv_m_eval, fq_nmod_mpoly_t A, const fq_nmod_mpoly_ctx_t lgctx) { @@ -1185,7 +1185,7 @@ int nmod_mpolyn_interp_mcrt_lg_mpoly( slong N = mpoly_words_per_exp(A->bits, smctx->minfo); #endif int changed = 0; - mp_limb_t * u = FLINT_ARRAY_ALLOC(lgd, mp_limb_t); + ulong * u = FLINT_ARRAY_ALLOC(lgd, ulong); n_poly_t w; n_poly_init(w); @@ -1597,7 +1597,7 @@ int fq_nmod_mpolyn_interp_crt_sm_bpoly( n_fq_poly_struct * Fcoeffs = F->coeffs; ulong * Texps = T->exps; n_fq_poly_struct * Tcoeffs = T->coeffs; - mp_limb_t * v = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * v = FLINT_ARRAY_ALLOC(d, ulong); ulong Fexpi, mask; FLINT_ASSERT(fq_nmod_mpolyn_is_canonical(F, ctx)); @@ -1866,9 +1866,9 @@ int fq_nmod_mpolyn_interp_mcrt_sm_mpoly( #endif slong lastdeg = *lastdeg_; int changed = 0; - mp_limb_t * v = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * v = FLINT_ARRAY_ALLOC(d, ulong); slong i, Alen = A->length; - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; n_fq_poly_struct * Fcoeffs = F->coeffs; FLINT_ASSERT(F->bits == A->bits); @@ -2392,9 +2392,9 @@ int fq_nmod_mpolyn_interp_crt_lg_bpoly( n_fq_poly_struct * Fcoeffs = F->coeffs; ulong * Texps = T->exps; n_fq_poly_struct * Tcoeffs = T->coeffs; - mp_limb_t * u = FLINT_ARRAY_ALLOC(3*lgd, mp_limb_t); - mp_limb_t * v = u + lgd; - mp_limb_t * inv_m_eval = v + lgd; + ulong * u = FLINT_ARRAY_ALLOC(3*lgd, ulong); + ulong * v = u + lgd; + ulong * inv_m_eval = v + lgd; n_fq_poly_t u_sm; ulong Fexpi, mask; @@ -2923,7 +2923,7 @@ void fq_nmod_mpolyn_interp_lift_sm_mpoly( slong i; slong N; n_fq_poly_struct * Acoeff; - mp_limb_t * Bcoeff; + ulong * Bcoeff; ulong * Aexp, * Bexp; slong Blen; @@ -2987,7 +2987,7 @@ int fq_nmod_mpolyn_interp_crt_sm_mpoly( slong Flen = F->length, Alen = A->length; ulong * Fexp = F->exps, * Aexp = A->exps; ulong * Texp; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; n_fq_poly_struct * Fcoeff = F->coeffs; n_fq_poly_struct * Tcoeff; fq_nmod_poly_t tp; @@ -3333,7 +3333,7 @@ int fq_nmod_mpolyn_interp_crt_lg_mpoly( slong Flen = F->length, Alen = A->length; ulong * Fexp = F->exps, * Aexp = A->exps; ulong * Texp; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; n_fq_poly_struct * Fcoeff = F->coeffs; n_fq_poly_struct * Tcoeff; fq_nmod_t at; diff --git a/src/fq_nmod_mpoly/make_monic.c b/src/fq_nmod_mpoly/make_monic.c index a0a21af036..597e0838f6 100644 --- a/src/fq_nmod_mpoly/make_monic.c +++ b/src/fq_nmod_mpoly/make_monic.c @@ -19,7 +19,7 @@ void fq_nmod_mpoly_make_monic( const fq_nmod_mpoly_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx->fqctx); - mp_limb_t * c; + ulong * c; TMP_INIT; if (B->length < 1) @@ -28,7 +28,7 @@ void fq_nmod_mpoly_make_monic( } TMP_START; - c = (mp_limb_t *) TMP_ALLOC((1 + N_FQ_INV_ITCH)*d*sizeof(mp_limb_t)); + c = (ulong *) TMP_ALLOC((1 + N_FQ_INV_ITCH)*d*sizeof(ulong)); _n_fq_inv(c, B->coeffs + d*0, ctx->fqctx, c + d); fq_nmod_mpoly_scalar_mul_n_fq(A, B, c, ctx); diff --git a/src/fq_nmod_mpoly/mpolyu.c b/src/fq_nmod_mpoly/mpolyu.c index 80d0fe970a..c46a59fa74 100644 --- a/src/fq_nmod_mpoly/mpolyu.c +++ b/src/fq_nmod_mpoly/mpolyu.c @@ -325,7 +325,7 @@ void fq_nmod_mpoly_from_mpolyu_perm_inflate( slong i, j, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * uexps; ulong * Aexps; @@ -496,7 +496,7 @@ void fq_nmod_mpoly_from_mpolyuu_perm_inflate( /* only for 2 main vars */ slong i, j, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * uexps; ulong * Aexps; @@ -764,14 +764,14 @@ void fq_nmod_mpolyu_divexact_mpoly_inplace( if (fq_nmod_mpoly_is_fq_nmod(c, ctx)) { slong d = fq_nmod_ctx_degree(ctx->fqctx); - mp_limb_t * inv; + ulong * inv; if (_n_fq_is_one(c->coeffs + d*0, d)) return; TMP_START; - inv = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + inv = (ulong *) TMP_ALLOC(d*sizeof(ulong)); n_fq_inv(inv, c->coeffs + d*0, ctx->fqctx); diff --git a/src/fq_nmod_mpoly/mpolyu_divides.c b/src/fq_nmod_mpoly/mpolyu_divides.c index 810496512e..e635661815 100644 --- a/src/fq_nmod_mpoly/mpolyu_divides.c +++ b/src/fq_nmod_mpoly/mpolyu_divides.c @@ -16,9 +16,9 @@ /* A = D - B*C, coefficients of D are clobbered */ static void _fq_nmod_mpoly_mulsub(fq_nmod_mpoly_t A, - mp_limb_t * Dcoeff, const ulong * Dexp, slong Dlen, - const mp_limb_t * Bcoeffs, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexp, slong Clen, + ulong * Dcoeff, const ulong * Dexp, slong Dlen, + const ulong * Bcoeffs, const ulong * Bexp, slong Blen, + const ulong * Ccoeffs, const ulong * Cexp, slong Clen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx) { @@ -32,13 +32,13 @@ static void _fq_nmod_mpoly_mulsub(fq_nmod_mpoly_t A, mpoly_heap_t * x; slong Di; slong Alen; - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; ulong * Aexps = A->exps; ulong * exp, * exps; ulong ** exp_list; slong exp_next; slong * hind; - mp_limb_t * t; + ulong * t; int lazy_size = _n_fq_dot_lazy_size(Blen, fqctx); TMP_INIT; @@ -61,7 +61,7 @@ static void _fq_nmod_mpoly_mulsub(fq_nmod_mpoly_t A, for (i = 0; i < Blen; i++) hind[i] = 1; - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); /* start with no heap nodes and no exponent vectors in use */ exp_next = 0; diff --git a/src/fq_nmod_mpoly/mpolyu_gcdp_zippel.c b/src/fq_nmod_mpoly/mpolyu_gcdp_zippel.c index 15856b6394..ab0724960d 100644 --- a/src/fq_nmod_mpoly/mpolyu_gcdp_zippel.c +++ b/src/fq_nmod_mpoly/mpolyu_gcdp_zippel.c @@ -216,13 +216,13 @@ static int fq_nmod_mpolyu_evalfromsk( slong d = fq_nmod_ctx_degree(ctx->fqctx); slong i, j; int ret = 0; - mp_limb_t * pp, * acc; + ulong * pp, * acc; fq_nmod_t acct; FLINT_ASSERT(A->length == SK->length); - pp = FLINT_ARRAY_ALLOC(d, mp_limb_t); - acc = FLINT_ARRAY_ALLOC(d, mp_limb_t); + pp = FLINT_ARRAY_ALLOC(d, ulong); + acc = FLINT_ARRAY_ALLOC(d, ulong); fq_nmod_init(acct, ctx->fqctx); fq_nmod_poly_zero(e, ctx->fqctx); @@ -281,7 +281,7 @@ void fq_nmod_poly_product_roots(fq_nmod_poly_t P, fq_nmod_struct * r, for x */ -int fq_nmod_vandsolve(mp_limb_t * X, mp_limb_t * A, fq_nmod_struct * b, +int fq_nmod_vandsolve(ulong * X, ulong * A, fq_nmod_struct * b, slong n, const fq_nmod_ctx_t fqctx) { slong d = fq_nmod_ctx_degree(fqctx); diff --git a/src/fq_nmod_mpoly/mpolyun.c b/src/fq_nmod_mpoly/mpolyun.c index e4452f99c3..92b5b6f227 100644 --- a/src/fq_nmod_mpoly/mpolyun.c +++ b/src/fq_nmod_mpoly/mpolyun.c @@ -173,14 +173,14 @@ void fq_nmod_mpolyn_scalar_mul_fq_nmod( const fq_nmod_mpoly_ctx_t ctx) { slong i; - mp_limb_t * cc; + ulong * cc; FLINT_ASSERT(!fq_nmod_is_zero(c, ctx->fqctx)); if (fq_nmod_is_one(c, ctx->fqctx)) return; - cc = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx->fqctx), mp_limb_t); + cc = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx->fqctx), ulong); n_fq_set_fq_nmod(cc, c, ctx->fqctx); @@ -199,13 +199,13 @@ void fq_nmod_mpolyun_scalar_mul_fq_nmod( const fq_nmod_mpoly_ctx_t ctx) { slong i, j; - mp_limb_t * cc; + ulong * cc; FLINT_ASSERT(!fq_nmod_is_zero(c, ctx->fqctx)); if (fq_nmod_is_one(c, ctx->fqctx)) return; - cc = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx->fqctx), mp_limb_t); + cc = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx->fqctx), ulong); n_fq_set_fq_nmod(cc, c, ctx->fqctx); @@ -542,7 +542,7 @@ void fq_nmod_mpoly_from_mpolyn_perm_inflate( slong i, h, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * Bexps; ulong * Aexps, * tAexp, * tAgexp; diff --git a/src/fq_nmod_mpoly/mul_johnson.c b/src/fq_nmod_mpoly/mul_johnson.c index 79280fd67c..590ba702c9 100644 --- a/src/fq_nmod_mpoly/mul_johnson.c +++ b/src/fq_nmod_mpoly/mul_johnson.c @@ -18,8 +18,8 @@ void _fq_nmod_mpoly_mul_johnson1( fq_nmod_mpoly_t A, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, ulong maskhi, const fq_nmod_ctx_t ctx) { @@ -33,9 +33,9 @@ void _fq_nmod_mpoly_mul_johnson1( mpoly_heap_t * x; slong * hind; ulong exp; - mp_limb_t * t; + ulong * t; int lazy_size = _n_fq_dot_lazy_size(Blen, ctx); - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; ulong * Aexps = A->exps; slong Acoeffs_alloc = A->coeffs_alloc; slong Aexps_alloc = A->exps_alloc; @@ -49,7 +49,7 @@ void _fq_nmod_mpoly_mul_johnson1( chain = (mpoly_heap_t *) TMP_ALLOC(Blen*sizeof(mpoly_heap_t)); store = store_base = (slong *) TMP_ALLOC(2*Blen*sizeof(slong)); hind = (slong *) TMP_ALLOC(Blen*sizeof(slong)); - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); for (i = 0; i < Blen; i++) hind[i] = 1; @@ -183,8 +183,8 @@ void _fq_nmod_mpoly_mul_johnson1( void _fq_nmod_mpoly_mul_johnson( fq_nmod_mpoly_t A, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -202,9 +202,9 @@ void _fq_nmod_mpoly_mul_johnson( ulong ** exp_list; slong exp_next; slong * hind; - mp_limb_t * t; + ulong * t; int lazy_size = _n_fq_dot_lazy_size(Blen, ctx); - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; ulong * Aexps = A->exps; slong Alen; TMP_INIT; @@ -229,7 +229,7 @@ void _fq_nmod_mpoly_mul_johnson( exps = (ulong *) TMP_ALLOC(Blen*N*sizeof(ulong)); exp_list = (ulong **) TMP_ALLOC(Blen*sizeof(ulong *)); hind = (slong *) TMP_ALLOC(Blen*sizeof(slong)); - t = (mp_limb_t *) TMP_ALLOC(6*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(6*d*sizeof(ulong)); for (i = 0; i < Blen; i++) { diff --git a/src/fq_nmod_mpoly/profile/p-gcd.c b/src/fq_nmod_mpoly/profile/p-gcd.c index 91438af23e..370bdf262d 100644 --- a/src/fq_nmod_mpoly/profile/p-gcd.c +++ b/src/fq_nmod_mpoly/profile/p-gcd.c @@ -127,7 +127,7 @@ int main(int argc, char *argv[]) { slong i; const char * vars[] = {"x", "y", "z", "t" ,"u", "v", "w", "s", "p"}; - mp_limb_t p = UWORD(4611686018427388073); + ulong p = UWORD(4611686018427388073); print_banner(); for (i = 50; i < 100; i += 4) diff --git a/src/fq_nmod_mpoly/quadratic_root.c b/src/fq_nmod_mpoly/quadratic_root.c index 54c1f16d85..f6ae8c9da7 100644 --- a/src/fq_nmod_mpoly/quadratic_root.c +++ b/src/fq_nmod_mpoly/quadratic_root.c @@ -17,22 +17,22 @@ /* solve z^2+z=c */ static int _quadratic_root_const( - mp_limb_t * z, - const mp_limb_t * c, + ulong * z, + const ulong * c, const fq_nmod_ctx_t fqctx) { slong i, d = fq_nmod_ctx_degree(fqctx); - mp_limb_t * t, * p, * u, * cp, * ut, * up, * ct; + ulong * t, * p, * u, * cp, * ut, * up, * ct; int success; TMP_INIT; #if FLINT_WANT_ASSERT - mp_limb_t * c_org = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c_org = FLINT_ARRAY_ALLOC(d, ulong); _n_fq_set(c_org, c, d); #endif TMP_START; i = FLINT_MAX(N_FQ_REDUCE_ITCH, N_FQ_MUL_INV_ITCH); - t = (mp_limb_t *) TMP_ALLOC((i + 7)*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC((i + 7)*d*sizeof(ulong)); p = t + d*i; u = p + d*2; ut = u + d; @@ -94,8 +94,8 @@ static int _quadratic_root_const( */ static int _fq_nmod_mpoly_quadratic_root_heap( fq_nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, slong bits, slong N, const ulong * cmpmask, @@ -109,13 +109,13 @@ static int _fq_nmod_mpoly_quadratic_root_heap( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong * exp, * exps; ulong ** exp_list; slong exp_next; ulong mask; - mp_limb_t * t, * c, * lcAinv; + ulong * t, * c, * lcAinv; int mcmp; TMP_INIT; @@ -124,7 +124,7 @@ static int _fq_nmod_mpoly_quadratic_root_heap( TMP_START; - t = (mp_limb_t *) TMP_ALLOC(8*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(8*d*sizeof(ulong)); c = t + 6*d; lcAinv = c + d; _n_fq_inv(lcAinv, Acoeffs + d*0, fqctx, t); @@ -187,7 +187,7 @@ static int _fq_nmod_mpoly_quadratic_root_heap( } else { - const mp_limb_t * s = (x->i == -UWORD(2)) ? + const ulong * s = (x->i == -UWORD(2)) ? Qcoeffs + d*x->j : Acoeffs + d*x->i; FLINT_ASSERT(x->j < Qlen); FLINT_ASSERT(x->i == -UWORD(2) || x->i < Alen); @@ -456,7 +456,7 @@ int fq_nmod_mpoly_quadratic_root( if (ctx->fqctx->mod.n != 2) { - mp_limb_t mhalf = (ctx->fqctx->mod.n - 1)/2; + ulong mhalf = (ctx->fqctx->mod.n - 1)/2; fq_nmod_mpoly_t t1, t2; fq_nmod_t c; diff --git a/src/fq_nmod_mpoly/realloc.c b/src/fq_nmod_mpoly/realloc.c index 5948a259c1..eef0708352 100644 --- a/src/fq_nmod_mpoly/realloc.c +++ b/src/fq_nmod_mpoly/realloc.c @@ -32,5 +32,5 @@ void fq_nmod_mpoly_realloc( A->exps = (ulong *) flint_realloc(A->exps, A->exps_alloc*sizeof(ulong)); A->coeffs_alloc = d*alloc; - A->coeffs = (mp_limb_t *) flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); + A->coeffs = (ulong *) flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); } diff --git a/src/fq_nmod_mpoly/scalar_addmul_fq_nmod.c b/src/fq_nmod_mpoly/scalar_addmul_fq_nmod.c index baa28a1028..5d2c69484f 100644 --- a/src/fq_nmod_mpoly/scalar_addmul_fq_nmod.c +++ b/src/fq_nmod_mpoly/scalar_addmul_fq_nmod.c @@ -15,22 +15,22 @@ #include "fq_nmod_mpoly.h" static slong _fq_nmod_mpoly_scalar_addmul_n_fq( - mp_limb_t * Acoeffs, ulong * Aexps, - mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, - const mp_limb_t * f, + ulong * Acoeffs, ulong * Aexps, + ulong * Bcoeffs, const ulong * Bexps, slong Blen, + ulong * Ccoeffs, const ulong * Cexps, slong Clen, + const ulong * f, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx) { slong d = fq_nmod_ctx_degree(fqctx); slong i = 0, j = 0, k = 0; - mp_limb_t * tmp; + ulong * tmp; TMP_INIT; TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(ulong)); while (i < Blen && j < Clen) { @@ -94,7 +94,7 @@ void fq_nmod_mpoly_scalar_addmul_fq_nmod( slong N = mpoly_words_per_exp(Abits, ctx->minfo); ulong * cmpmask; int freeBexps = 0, freeCexps = 0; - mp_limb_t * f; + ulong * f; TMP_INIT; if (fq_nmod_mpoly_is_zero(B, ctx)) @@ -111,7 +111,7 @@ void fq_nmod_mpoly_scalar_addmul_fq_nmod( TMP_START; cmpmask = (ulong*) TMP_ALLOC(N*sizeof(ulong)); mpoly_get_cmpmask(cmpmask, N, Abits, ctx->minfo); - f = (mp_limb_t *) TMP_ALLOC(d*sizeof(mp_limb_t)); + f = (ulong *) TMP_ALLOC(d*sizeof(ulong)); n_fq_set_fq_nmod(f, e, ctx->fqctx); if (Abits != B->bits) diff --git a/src/fq_nmod_mpoly/scalar_mul_fq_nmod.c b/src/fq_nmod_mpoly/scalar_mul_fq_nmod.c index b6af66bbce..ccae33bbdc 100644 --- a/src/fq_nmod_mpoly/scalar_mul_fq_nmod.c +++ b/src/fq_nmod_mpoly/scalar_mul_fq_nmod.c @@ -17,12 +17,12 @@ void fq_nmod_mpoly_scalar_mul_n_fq( fq_nmod_mpoly_t A, const fq_nmod_mpoly_t B, - const mp_limb_t * c, + const ulong * c, const fq_nmod_mpoly_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx->fqctx); slong i; - mp_limb_t * t; + ulong * t; TMP_INIT; if (_n_fq_is_zero(c, d)) @@ -53,7 +53,7 @@ void fq_nmod_mpoly_scalar_mul_n_fq( TMP_START; - t = (mp_limb_t *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(ulong)); for (i = 0; i < B->length; i++) _n_fq_mul(A->coeffs + d*i, B->coeffs + d*i, c, ctx->fqctx, t); @@ -70,7 +70,7 @@ void fq_nmod_mpoly_scalar_mul_fq_nmod( { slong d = fq_nmod_ctx_degree(ctx->fqctx); slong i; - mp_limb_t * t; + ulong * t; TMP_INIT; if (fq_nmod_is_zero(c, ctx->fqctx)) @@ -102,7 +102,7 @@ void fq_nmod_mpoly_scalar_mul_fq_nmod( TMP_START; - t = (mp_limb_t *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(ulong)); n_fq_set_fq_nmod(t, c, ctx->fqctx); for (i = 0; i < B->length; i++) diff --git a/src/fq_nmod_mpoly/set_fq_nmod.c b/src/fq_nmod_mpoly/set_fq_nmod.c index ae6eee5f5d..f0219c0557 100644 --- a/src/fq_nmod_mpoly/set_fq_nmod.c +++ b/src/fq_nmod_mpoly/set_fq_nmod.c @@ -16,7 +16,7 @@ void fq_nmod_mpoly_set_n_fq( fq_nmod_mpoly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_mpoly_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx->fqctx); diff --git a/src/fq_nmod_mpoly/sqrt_heap.c b/src/fq_nmod_mpoly/sqrt_heap.c index c9b92cd1e2..7cf4a8b91e 100644 --- a/src/fq_nmod_mpoly/sqrt_heap.c +++ b/src/fq_nmod_mpoly/sqrt_heap.c @@ -25,7 +25,7 @@ static int _is_proved_not_square( int count, flint_rand_t state, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, @@ -86,7 +86,7 @@ static int _is_proved_not_square( return success; } -static int n_fq_sqrt(mp_limb_t * q, const mp_limb_t * a, const fq_nmod_ctx_t ctx) +static int n_fq_sqrt(ulong * q, const ulong * a, const fq_nmod_ctx_t ctx) { int res; fq_nmod_t t; @@ -101,7 +101,7 @@ static int n_fq_sqrt(mp_limb_t * q, const mp_limb_t * a, const fq_nmod_ctx_t ctx static int _fq_nmod_mpoly_sqrt_heap( fq_nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t bits, @@ -120,14 +120,14 @@ static int _fq_nmod_mpoly_sqrt_heap( mpoly_heap_t ** chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong * exp, * exp3; ulong * exps[64]; ulong ** exp_list; slong exp_next; ulong mask; - mp_limb_t * t, * t2, * lc_inv; + ulong * t, * t2, * lc_inv; int lt_divides, halves; flint_rand_t heuristic_state; int heuristic_count = 0; @@ -135,7 +135,7 @@ static int _fq_nmod_mpoly_sqrt_heap( TMP_START; - t = (mp_limb_t *) TMP_ALLOC(13*d*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(13*d*sizeof(ulong)); t2 = t + 6*d; lc_inv = t2 + 6*d; @@ -259,7 +259,7 @@ static int _fq_nmod_mpoly_sqrt_heap( exp_list[--exp_next] = heap[1].exp; x = _mpoly_heap_pop(heap, &heap_len, N, cmpmask); do { - mp_limb_t * dest; + ulong * dest; *store++ = x->i; *store++ = x->j; dest = (x->i != x->j) ? t2 : t; @@ -398,11 +398,11 @@ int fq_nmod_mpoly_sqrt_heap(fq_nmod_mpoly_t Q, const fq_nmod_mpoly_t A, { slong d = fq_nmod_ctx_degree(ctx->fqctx); flint_bitcnt_t bits = A->bits; - mp_limb_t * Aexps = A->exps; + ulong * Aexps = A->exps; slong Alen = A->length; slong i, j, N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (bits <= FLINT_BITS) ? mpoly_overflow_mask_sp(bits) : 0; - mp_limb_t * t; + ulong * t; if (Q != A) fq_nmod_mpoly_fit_length_reset_bits(Q, Alen, bits, ctx); @@ -418,7 +418,7 @@ int fq_nmod_mpoly_sqrt_heap(fq_nmod_mpoly_t Q, const fq_nmod_mpoly_t A, } } - t = FLINT_ARRAY_ALLOC(N_FQ_MUL_ITCH*d, mp_limb_t); + t = FLINT_ARRAY_ALLOC(N_FQ_MUL_ITCH*d, ulong); for (i = 0; i < Alen; i++) { diff --git a/src/fq_nmod_mpoly/sub.c b/src/fq_nmod_mpoly/sub.c index 565dba8b15..84aeb640c4 100644 --- a/src/fq_nmod_mpoly/sub.c +++ b/src/fq_nmod_mpoly/sub.c @@ -15,9 +15,9 @@ #include "fq_nmod_mpoly.h" slong _fq_nmod_mpoly_sub( - mp_limb_t * Acoeffs, ulong * Aexps, - mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, + ulong * Acoeffs, ulong * Aexps, + ulong * Bcoeffs, const ulong * Bexps, slong Blen, + ulong * Ccoeffs, const ulong * Cexps, slong Clen, slong N, const ulong * cmpmask, const fq_nmod_ctx_t fqctx) { slong d = fq_nmod_ctx_degree(fqctx); diff --git a/src/fq_nmod_mpoly/test/main.c b/src/fq_nmod_mpoly/test/main.c index b4c4d4d021..7daf7a4d92 100644 --- a/src/fq_nmod_mpoly/test/main.c +++ b/src/fq_nmod_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fq_nmod_mpoly/test/t-gcd.c b/src/fq_nmod_mpoly/test/t-gcd.c index a662174896..bbc35a617a 100644 --- a/src/fq_nmod_mpoly/test/t-gcd.c +++ b/src/fq_nmod_mpoly/test/t-gcd.c @@ -230,7 +230,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd, state) fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; fq_nmod_mpoly_ctx_init_rand(ctx, state, 10, FLINT_BITS, 5); fq_nmod_init(c, ctx->fqctx); @@ -323,7 +323,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd, state) { fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len, len1, len2; slong degbound; @@ -520,7 +520,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd, state) { fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len1, len2, len3, len4; ulong degbounds1[4]; diff --git a/src/fq_nmod_mpoly/test/t-gcd_cofactors.c b/src/fq_nmod_mpoly/test/t-gcd_cofactors.c index 32c423705c..ee752836bc 100644 --- a/src/fq_nmod_mpoly/test/t-gcd_cofactors.c +++ b/src/fq_nmod_mpoly/test/t-gcd_cofactors.c @@ -367,7 +367,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd_cofactors, state) fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; + ulong exp_bound, exp_bound1, exp_bound2; fq_nmod_mpoly_ctx_init_rand(ctx, state, 10, FLINT_BITS, 5); @@ -469,7 +469,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd_cofactors, state) { fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len, len1, len2; slong degbound; @@ -677,7 +677,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_gcd_cofactors, state) { fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len1, len2, len3, len4; ulong degbounds1[4]; diff --git a/src/fq_nmod_mpoly/test/t-univar_resultant.c b/src/fq_nmod_mpoly/test/t-univar_resultant.c index 75f0ba90b7..258e181416 100644 --- a/src/fq_nmod_mpoly/test/t-univar_resultant.c +++ b/src/fq_nmod_mpoly/test/t-univar_resultant.c @@ -137,7 +137,7 @@ TEST_FUNCTION_START(fq_nmod_mpoly_univar_resultant, state) { fq_nmod_mpoly_ctx_t ctx; fq_nmod_mpoly_t f, g, t; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/fq_nmod_mpoly/to_from_fq_nmod_poly.c b/src/fq_nmod_mpoly/to_from_fq_nmod_poly.c index 5d928e34ee..ce9fc5dc3f 100644 --- a/src/fq_nmod_mpoly/to_from_fq_nmod_poly.c +++ b/src/fq_nmod_mpoly/to_from_fq_nmod_poly.c @@ -31,7 +31,7 @@ void _fq_nmod_mpoly_to_fq_nmod_poly_deflate( ulong mask; slong i, shift, off, N; slong len = B->length; - mp_limb_t * coeff = B->coeffs; + ulong * coeff = B->coeffs; ulong * exp = B->exps; ulong var_shift, var_stride; flint_bitcnt_t bits = B->bits; diff --git a/src/fq_nmod_mpoly/univar.c b/src/fq_nmod_mpoly/univar.c index 80d3007fd2..6c651996fb 100644 --- a/src/fq_nmod_mpoly/univar.c +++ b/src/fq_nmod_mpoly/univar.c @@ -236,7 +236,7 @@ void fq_nmod_mpoly_to_univar(fq_nmod_mpoly_univar_t A, const fq_nmod_mpoly_t B, slong N = mpoly_words_per_exp(bits, ctx->minfo); slong shift, off; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; const ulong * Bexp = B->exps; slong i; int its_new; diff --git a/src/fq_nmod_mpoly_factor.h b/src/fq_nmod_mpoly_factor.h index 51ff504495..9dc779cbc4 100644 --- a/src/fq_nmod_mpoly_factor.h +++ b/src/fq_nmod_mpoly_factor.h @@ -373,7 +373,7 @@ int fq_nmod_mpoly_factor_irred_lgprime_zippel( /*****************************************************************************/ void fq_nmod_mpoly_compression_do(fq_nmod_mpoly_t L, - const fq_nmod_mpoly_ctx_t Lctx, mp_limb_t * Acoeffs, slong Alen, + const fq_nmod_mpoly_ctx_t Lctx, ulong * Acoeffs, slong Alen, mpoly_compression_t M); void fq_nmod_mpoly_compression_undo(fq_nmod_mpoly_t A, @@ -498,7 +498,7 @@ int fq_nmod_mpoly_factor_zippel(fq_nmod_mpoly_factor_t f, int _fq_nmod_mpoly_eval_rest_n_fq_poly(n_poly_struct * E, slong * starts, slong * ends, slong * stops, ulong * es, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, slong var, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, slong var, const n_fq_poly_struct * alphas, const slong * offsets, const slong * shifts, slong N, ulong mask, slong nvars, const fq_nmod_ctx_t ctx); diff --git a/src/fq_nmod_mpoly_factor/compression.c b/src/fq_nmod_mpoly_factor/compression.c index a09e52dc8e..fed0239fd0 100644 --- a/src/fq_nmod_mpoly_factor/compression.c +++ b/src/fq_nmod_mpoly_factor/compression.c @@ -17,7 +17,7 @@ void fq_nmod_mpoly_compression_do( fq_nmod_mpoly_t L, const fq_nmod_mpoly_ctx_t Lctx, - mp_limb_t * Acoeffs, + ulong * Acoeffs, slong Alen, mpoly_compression_t M) { diff --git a/src/fq_nmod_mpoly_factor/eval.c b/src/fq_nmod_mpoly_factor/eval.c index 31d77b8e68..cf513a92b8 100644 --- a/src/fq_nmod_mpoly_factor/eval.c +++ b/src/fq_nmod_mpoly_factor/eval.c @@ -24,7 +24,7 @@ int _fq_nmod_mpoly_eval_rest_n_fq_poly( slong * ends, slong * stops, ulong * es, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, slong var, diff --git a/src/fq_nmod_mpoly_factor/irred_smprime_zippel.c b/src/fq_nmod_mpoly_factor/irred_smprime_zippel.c index b7e6426e15..058a9aedd1 100644 --- a/src/fq_nmod_mpoly_factor/irred_smprime_zippel.c +++ b/src/fq_nmod_mpoly_factor/irred_smprime_zippel.c @@ -84,11 +84,11 @@ static void fq_nmod_mpoly_set_eval_helper3( ulong y, x, z; slong yoff, xoff, zoff, * off; slong yshift, xshift, zshift, * shift; - mp_limb_t * p; + ulong * p; flint_bitcnt_t bits = A->bits; slong Alen = A->length; const ulong * Aexps = A->exps; - const mp_limb_t * Acoeffs = A->coeffs; + const ulong * Acoeffs = A->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -194,11 +194,11 @@ static void fq_nmod_mpoly_set_evalp_helper3( ulong y, x, z; slong yoff, xoff, zoff, * off; slong yshift, xshift, zshift, * shift; - mp_limb_t * p; + ulong * p; flint_bitcnt_t bits = A->bits; slong Alen = A->length; const ulong * Aexps = A->exps; - const mp_limb_t * Acoeffs = A->coeffs; + const ulong * Acoeffs = A->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -311,13 +311,13 @@ static slong fq_nmod_mpoly_set_eval_helper_and_zip_form3( slong i, j, k, n; slong * off, * shift; ulong y, x, z; - mp_limb_t * p; + ulong * p; fq_nmod_mpoly_struct * Hc; slong old_len, zip_length = 0; flint_bitcnt_t bits = B->bits; slong Blen = B->length; const ulong * Bexps = B->exps; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -468,13 +468,13 @@ static slong fq_nmod_mpoly_set_evalp_helper_and_zip_form3( slong i, j, k, n; slong * off, * shift; ulong y, x, z; - mp_limb_t * p; + ulong * p; fq_nmod_mpoly_struct * Hc; slong old_len, zip_length = 0; flint_bitcnt_t bits = B->bits; slong Blen = B->length; const ulong * Bexps = B->exps; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -618,7 +618,7 @@ static void fq_nmod_polyu_eval_step( { slong d = fq_nmod_ctx_degree(ctx); slong Ai, Ei, n; - mp_limb_t * p; + ulong * p; n_polyu_fit_length(E, d*A->length); @@ -646,7 +646,7 @@ static void fq_nmod_polyu_evalp_step( { slong d = fq_nmod_ctx_degree(ctx); slong Ai, Ei, n; - mp_limb_t * p; + ulong * p; n_polyu_fit_length(E, d*A->length); @@ -833,7 +833,7 @@ static int fq_nmod_mpoly_from_zip( slong zvar = 1; ulong x, y, z; flint_bitcnt_t bits = B->bits; - mp_limb_t * Bcoeffs; + ulong * Bcoeffs; ulong * Bexps; slong N = mpoly_words_per_exp_sp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); @@ -932,7 +932,7 @@ static int fq_nmod_mpoly_from_zipp( slong zvar = 1; ulong x, y, z; flint_bitcnt_t bits = B->bits; - mp_limb_t * Bcoeffs; + ulong * Bcoeffs; ulong * Bexps; slong N = mpoly_words_per_exp_sp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); diff --git a/src/fq_nmod_mpoly_factor/n_bpoly_fq.c b/src/fq_nmod_mpoly_factor/n_bpoly_fq.c index ce726efcac..10297ab215 100644 --- a/src/fq_nmod_mpoly_factor/n_bpoly_fq.c +++ b/src/fq_nmod_mpoly_factor/n_bpoly_fq.c @@ -348,8 +348,8 @@ void n_fq_bpoly_make_primitive( { slong d = fq_nmod_ctx_degree(ctx); n_poly_struct * Alead = A->coeffs + Alen - 1; - mp_limb_t * c, * c_ = Alead->coeffs + d*(Alead->length - 1); - c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c, * c_ = Alead->coeffs + d*(Alead->length - 1); + c = FLINT_ARRAY_ALLOC(d, ulong); if (!_n_fq_is_one(c_, d)) { n_fq_poly_scalar_mul_n_fq(g, g, c_, ctx); diff --git a/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_lgprime.c b/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_lgprime.c index ec89ff572e..733c6e34f6 100644 --- a/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_lgprime.c +++ b/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_lgprime.c @@ -191,11 +191,11 @@ static void _lattice( n_bpoly_struct * ld; nmod_mat_t M, T1, T2; int nlimbs; - mp_limb_t * trow; + ulong * trow; slong lift_order = lift_alpha_pow->length - 1; nlimbs = _nmod_vec_dot_bound_limbs(r, ctx->mod); - trow = (mp_limb_t *) flint_malloc(r*sizeof(mp_limb_t)); + trow = (ulong *) flint_malloc(r*sizeof(ulong)); n_bpoly_init(Q); n_bpoly_init(R); n_bpoly_init(dg); diff --git a/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_smprime.c b/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_smprime.c index 2676a7e184..a4cc35e13b 100644 --- a/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_smprime.c +++ b/src/fq_nmod_mpoly_factor/n_bpoly_fq_factor_smprime.c @@ -957,10 +957,10 @@ static void _lattice( n_fq_bpoly_struct * ld; nmod_mat_t M, T1, T2; int nlimbs; - mp_limb_t * trow; + ulong * trow; nlimbs = _nmod_vec_dot_bound_limbs(r, ctx->mod); - trow = (mp_limb_t *) flint_malloc(r*sizeof(mp_limb_t)); + trow = (ulong *) flint_malloc(r*sizeof(ulong)); n_fq_bpoly_init(Q); n_fq_bpoly_init(R); n_fq_bpoly_init(dg); diff --git a/src/fq_nmod_mpoly_factor/n_bpoly_hlift.c b/src/fq_nmod_mpoly_factor/n_bpoly_hlift.c index bab9cd2bdd..1c6e82e42f 100644 --- a/src/fq_nmod_mpoly_factor/n_bpoly_hlift.c +++ b/src/fq_nmod_mpoly_factor/n_bpoly_hlift.c @@ -38,7 +38,7 @@ int n_fq_bpoly_hlift2_cubic( slong i, j; n_fq_poly_struct * c, * s, * t, * u, * v, * g, * ce; n_fq_bpoly_struct * B0e, * B1e; - mp_limb_t * alpha; + ulong * alpha; FLINT_ASSERT(n_fq_bpoly_is_canonical(A, ctx)); FLINT_ASSERT(n_fq_bpoly_is_canonical(B0, ctx)); @@ -60,7 +60,7 @@ int n_fq_bpoly_hlift2_cubic( B0e = n_bpoly_stack_take_top(St->bpoly_stack); B1e = n_bpoly_stack_take_top(St->bpoly_stack); - alpha = FLINT_ARRAY_ALLOC(d, mp_limb_t); + alpha = FLINT_ARRAY_ALLOC(d, ulong); n_fq_set_fq_nmod(alpha, alpha_, ctx); n_fq_bpoly_taylor_shift_gen0_n_fq(A, alpha, ctx); @@ -231,7 +231,7 @@ int n_fq_bpoly_hlift2( int success; slong i, j; n_fq_poly_struct * c, * s, * t, * u, * v, * g; - mp_limb_t * alpha; + ulong * alpha; FLINT_ASSERT(n_fq_bpoly_is_canonical(A, ctx)); FLINT_ASSERT(n_fq_bpoly_is_canonical(B0, ctx)); @@ -248,7 +248,7 @@ int n_fq_bpoly_hlift2( v = n_poly_stack_take_top(St->poly_stack); g = n_poly_stack_take_top(St->poly_stack); - alpha = FLINT_ARRAY_ALLOC(d, mp_limb_t); + alpha = FLINT_ARRAY_ALLOC(d, ulong); n_fq_set_fq_nmod(alpha, alpha_, ctx); n_fq_bpoly_taylor_shift_gen0_n_fq(A, alpha, ctx); diff --git a/src/fq_nmod_mpoly_factor/polyu3_hlift.c b/src/fq_nmod_mpoly_factor/polyu3_hlift.c index e9a36420c1..4223441952 100644 --- a/src/fq_nmod_mpoly_factor/polyu3_hlift.c +++ b/src/fq_nmod_mpoly_factor/polyu3_hlift.c @@ -142,7 +142,7 @@ void n_fq_poly_fill_power( n_fq_poly_t alphapow, slong e, const fq_nmod_ctx_t ctx, - mp_limb_t * tmp) + ulong * tmp) { if (e + 1 > alphapow->length) { @@ -170,12 +170,12 @@ void fq_nmod_polyu3_interp_reduce_bpoly( slong d = fq_nmod_ctx_degree(ctx); slong i; slong cur0, cur1, e0, e1, e2; - mp_limb_t * tmp, * t; + ulong * tmp, * t; TMP_INIT; TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(ulong)); t = tmp + d*N_FQ_MUL_ITCH; n_bpoly_zero(Ap); @@ -293,7 +293,7 @@ int fq_nmod_polyu3n_interp_crt_sm_bpoly( slong Fi; const n_poly_struct * Acoeffs = A->coeffs; slong Ai, ai; - mp_limb_t * v = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * v = FLINT_ARRAY_ALLOC(d, ulong); FLINT_ASSERT(n_fq_bpoly_is_canonical(A, ctx)); FLINT_ASSERT(n_polyun_fq_is_canonical(F, ctx)); @@ -436,7 +436,7 @@ int n_fq_polyu3_hlift( slong * BBdegZ; slong AdegY, AdegX, AdegZ; slong bad_primes_left; - mp_limb_t * c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c = FLINT_ARRAY_ALLOC(d, ulong); nmod_eval_interp_t E; fq_nmod_init(alpha, ctx); diff --git a/src/fq_nmod_mpoly_factor/profile/p-factor.c b/src/fq_nmod_mpoly_factor/profile/p-factor.c index 9b872ec489..8e626cd759 100644 --- a/src/fq_nmod_mpoly_factor/profile/p-factor.c +++ b/src/fq_nmod_mpoly_factor/profile/p-factor.c @@ -59,7 +59,7 @@ flint_printf("---------------------------\n"); int main(int argc, char *argv[]) { slong i, time, total_time = 0; - mp_limb_t p = UWORD(4611686018427388073); + ulong p = UWORD(4611686018427388073); flint_printf("starting dense\n"); { diff --git a/src/fq_nmod_mpoly_factor/test/main.c b/src/fq_nmod_mpoly_factor/test/main.c index ea5ae23672..3be370e674 100644 --- a/src/fq_nmod_mpoly_factor/test/main.c +++ b/src/fq_nmod_mpoly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/fq_nmod_poly/mul_univariate.c b/src/fq_nmod_poly/mul_univariate.c index d1d0ea9d06..ef02057459 100644 --- a/src/fq_nmod_poly/mul_univariate.c +++ b/src/fq_nmod_poly/mul_univariate.c @@ -34,9 +34,9 @@ _fq_nmod_poly_mul_univariate_no_pad (fq_nmod_struct * rop, slong i; slong len; - mp_ptr cop1, cop2, crop; + nn_ptr cop1, cop2, crop; - cop1 = (mp_limb_t *) flint_malloc(clen1*sizeof(mp_limb_t)); + cop1 = (ulong *) flint_malloc(clen1*sizeof(ulong)); for (i = 0; i < len1 - 1; i++) { flint_mpn_copyi(cop1 + pfqlen*i, (op1 + i)->coeffs, (op1 + i)->length); @@ -48,7 +48,7 @@ _fq_nmod_poly_mul_univariate_no_pad (fq_nmod_struct * rop, if (op2 != op1) { - cop2 = (mp_limb_t *) flint_malloc(clen2*sizeof(mp_limb_t)); + cop2 = (ulong *) flint_malloc(clen2*sizeof(ulong)); for (i = 0; i < len2 - 1; i++) { flint_mpn_copyi(cop2 + pfqlen*i, (op2 + i)->coeffs,(op2 + i)->length); @@ -63,7 +63,7 @@ _fq_nmod_poly_mul_univariate_no_pad (fq_nmod_struct * rop, cop2 = cop1; } - crop = (mp_limb_t *) flint_malloc(crlen*sizeof(mp_limb_t)); + crop = (ulong *) flint_malloc(crlen*sizeof(ulong)); if (clen1 >= clen2) _nmod_poly_mul(crop, cop1, clen1, cop2, clen2, mod); diff --git a/src/fq_nmod_poly/mullow_univariate.c b/src/fq_nmod_poly/mullow_univariate.c index 87c3f41d35..90dd9b9baf 100644 --- a/src/fq_nmod_poly/mullow_univariate.c +++ b/src/fq_nmod_poly/mullow_univariate.c @@ -31,7 +31,7 @@ _fq_nmod_poly_mullow_univariate (fq_nmod_struct * rop, slong i; slong len; - mp_ptr cop1, cop2, crop; + nn_ptr cop1, cop2, crop; if (!len1 || !len2) { @@ -39,7 +39,7 @@ _fq_nmod_poly_mullow_univariate (fq_nmod_struct * rop, return; } - cop1 = (mp_limb_t *) flint_malloc(clen1*sizeof(mp_limb_t)); + cop1 = (ulong *) flint_malloc(clen1*sizeof(ulong)); for (i = 0; i < len1; i++) { flint_mpn_copyi(cop1 + pfqlen*i, (op1 + i)->coeffs, (op1 + i)->length); @@ -48,7 +48,7 @@ _fq_nmod_poly_mullow_univariate (fq_nmod_struct * rop, if (op2 != op1) { - cop2 = (mp_limb_t *) flint_malloc(clen2*sizeof(mp_limb_t)); + cop2 = (ulong *) flint_malloc(clen2*sizeof(ulong)); for (i = 0; i < len2; i++) { flint_mpn_copyi(cop2 + pfqlen*i, (op2 + i)->coeffs,(op2 + i)->length); @@ -60,7 +60,7 @@ _fq_nmod_poly_mullow_univariate (fq_nmod_struct * rop, cop2 = cop1; } - crop = (mp_limb_t *) flint_malloc(cmlen*sizeof(mp_limb_t)); + crop = (ulong *) flint_malloc(cmlen*sizeof(ulong)); if (clen1 >= clen2) _nmod_poly_mullow(crop, cop1, clen1, cop2, clen2, cmlen, mod); else diff --git a/src/fq_nmod_poly/test/main.c b/src/fq_nmod_poly/test/main.c index 0b0f87bbe5..5518e47a4d 100644 --- a/src/fq_nmod_poly/test/main.c +++ b/src/fq_nmod_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_nmod_poly_factor/test/main.c b/src/fq_nmod_poly_factor/test/main.c index 3809a65dca..19e65d17d8 100644 --- a/src/fq_nmod_poly_factor/test/main.c +++ b/src/fq_nmod_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor_berlekamp.c" diff --git a/src/fq_nmod_types.h b/src/fq_nmod_types.h index 8039befa92..a58a493048 100644 --- a/src/fq_nmod_types.h +++ b/src/fq_nmod_types.h @@ -28,7 +28,7 @@ typedef struct int sparse_modulus; int is_conway; /* whether field was generated using Flint Conway table (assures primitivity */ - mp_limb_t *a; + ulong *a; slong *j; slong len; @@ -74,11 +74,11 @@ fq_nmod_poly_factor_struct; typedef fq_nmod_poly_factor_struct fq_nmod_poly_factor_t[1]; typedef struct { - mp_limb_t * coeffs; + ulong * coeffs; ulong * exps; slong length; flint_bitcnt_t bits; /* number of bits per exponent */ - slong coeffs_alloc; /* abs size in mp_limb_t units */ + slong coeffs_alloc; /* abs size in ulong units */ slong exps_alloc; /* abs size in ulong units */ } fq_nmod_mpoly_struct; diff --git a/src/fq_nmod_vec/test/main.c b/src/fq_nmod_vec/test/main.c index 98e3888bf1..5bf155cad0 100644 --- a/src/fq_nmod_vec/test/main.c +++ b/src/fq_nmod_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_poly/test/main.c b/src/fq_poly/test/main.c index 0d539073cb..14d27a7087 100644 --- a/src/fq_poly/test/main.c +++ b/src/fq_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_poly_factor/test/main.c b/src/fq_poly_factor/test/main.c index d59172d00f..8d318faa62 100644 --- a/src/fq_poly_factor/test/main.c +++ b/src/fq_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor_berlekamp.c" diff --git a/src/fq_vec/test/main.c b/src/fq_vec/test/main.c index 9ece17dc02..ffadab303d 100644 --- a/src/fq_vec/test/main.c +++ b/src/fq_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_zech.h b/src/fq_zech.h index 334da870d7..c60ba12607 100644 --- a/src/fq_zech.h +++ b/src/fq_zech.h @@ -99,7 +99,7 @@ fq_zech_clear(fq_zech_t FLINT_UNUSED(rop), const fq_zech_ctx_t FLINT_UNUSED(ctx) FQ_ZECH_INLINE void fq_zech_reduce(fq_zech_t rop, const fq_zech_ctx_t ctx) { - mp_limb_t order = fq_zech_ctx_order_ui(ctx); + ulong order = fq_zech_ctx_order_ui(ctx); if (rop->value >= order) { rop->value -= order; diff --git a/src/fq_zech/add.c b/src/fq_zech/add.c index a487f21a2b..0d03544a51 100644 --- a/src/fq_zech/add.c +++ b/src/fq_zech/add.c @@ -16,7 +16,7 @@ void fq_zech_add(fq_zech_t rop, const fq_zech_t op1, const fq_zech_t op2, const fq_zech_ctx_t ctx) { - mp_limb_t index, c; + ulong index, c; if (op1->value == ctx->qm1) { rop->value = op2->value; diff --git a/src/fq_zech/ctx.c b/src/fq_zech/ctx.c index ee9b88aaeb..4bfcfc63b1 100644 --- a/src/fq_zech/ctx.c +++ b/src/fq_zech/ctx.c @@ -18,7 +18,7 @@ #include "fq_zech.h" static ulong -_nmod_poly_evaluate_ui(mp_srcptr poly, slong len, ulong xd) +_nmod_poly_evaluate_ui(nn_srcptr poly, slong len, ulong xd) { slong ix; ulong res; @@ -144,7 +144,7 @@ fq_zech_ctx_init_fq_nmod_ctx_check(fq_zech_ctx_t ctx, fq_nmod_ctx_t ctx2) fq_nmod_t r, gen; ulong result; ulong j, nz; - mp_ptr n_reverse_table; + nn_ptr n_reverse_table; ctx->fq_nmod_ctx = ctx2; ctx->owns_fq_nmod_ctx = 0; diff --git a/src/fq_zech/get_fq_nmod.c b/src/fq_zech/get_fq_nmod.c index 40f1b9db73..6bea914c92 100644 --- a/src/fq_zech/get_fq_nmod.c +++ b/src/fq_zech/get_fq_nmod.c @@ -18,7 +18,7 @@ void fq_zech_get_fq_nmod(fq_nmod_t rop, const fq_zech_t op, const fq_zech_ctx_t ctx) { slong i; - mp_limb_t q, r; + ulong q, r; nmod_poly_fit_length(rop, fq_zech_ctx_degree(ctx)); nmod_poly_zero(rop); diff --git a/src/fq_zech/get_nmod_poly.c b/src/fq_zech/get_nmod_poly.c index 72013b4404..4b0b074b47 100644 --- a/src/fq_zech/get_nmod_poly.c +++ b/src/fq_zech/get_nmod_poly.c @@ -18,7 +18,7 @@ fq_zech_get_nmod_poly(nmod_poly_t rop, const fq_zech_t op, const fq_zech_ctx_t ctx) { slong i; - mp_limb_t q, r; + ulong q, r; rop->mod = ctx->fq_nmod_ctx->modulus->mod; diff --git a/src/fq_zech/mul.c b/src/fq_zech/mul.c index 597f0f34cc..63e89f162b 100644 --- a/src/fq_zech/mul.c +++ b/src/fq_zech/mul.c @@ -29,7 +29,7 @@ void fq_zech_mul_fmpz(fq_zech_t rop, const fq_zech_t op, const fmpz_t x, const fq_zech_ctx_t ctx) { - mp_limb_t ux; + ulong ux; fmpz_t y; fmpz_init(y); @@ -46,7 +46,7 @@ void fq_zech_mul_si(fq_zech_t rop, const fq_zech_t op, slong x, const fq_zech_ctx_t ctx) { - mp_limb_t y; + ulong y; if (x == 0 || fq_zech_is_zero(op, ctx)) { fq_zech_zero(rop, ctx); @@ -66,10 +66,10 @@ fq_zech_mul_si(fq_zech_t rop, const fq_zech_t op, slong x, } void -fq_zech_mul_ui(fq_zech_t rop, const fq_zech_t op, mp_limb_t x, +fq_zech_mul_ui(fq_zech_t rop, const fq_zech_t op, ulong x, const fq_zech_ctx_t ctx) { - mp_limb_t b; + ulong b; if (x == 0 || fq_zech_is_zero(op, ctx)) { diff --git a/src/fq_zech/pth_root.c b/src/fq_zech/pth_root.c index 0cb691e7af..0fde8a2ae5 100644 --- a/src/fq_zech/pth_root.c +++ b/src/fq_zech/pth_root.c @@ -16,7 +16,7 @@ void fq_zech_pth_root(fq_zech_t rop, const fq_zech_t op1, const fq_zech_ctx_t ctx) { slong i, d; - mp_limb_t e; + ulong e; double qm1inv; if (fq_zech_is_zero(op1, ctx) || fq_zech_is_one(op1, ctx)) diff --git a/src/fq_zech/set_fmpz.c b/src/fq_zech/set_fmpz.c index 007968107f..56b9390578 100644 --- a/src/fq_zech/set_fmpz.c +++ b/src/fq_zech/set_fmpz.c @@ -16,7 +16,7 @@ void fq_zech_set_fmpz(fq_zech_t rop, const fmpz_t x, const fq_zech_ctx_t ctx) { /* TODO: Clean this up */ - mp_limb_t ux; + ulong ux; fmpz_t y; fmpz_init(y); diff --git a/src/fq_zech/set_nmod_poly.c b/src/fq_zech/set_nmod_poly.c index b14c6be703..24ac5653c5 100644 --- a/src/fq_zech/set_nmod_poly.c +++ b/src/fq_zech/set_nmod_poly.c @@ -14,8 +14,8 @@ void fq_zech_set_nmod_poly(fq_zech_t a, const nmod_poly_t b, const fq_zech_ctx_t ctx) { ulong blen = b->length; - const mp_limb_t * bcoeffs = b->coeffs; - mp_limb_t qm1 = ctx->qm1; + const ulong * bcoeffs = b->coeffs; + ulong qm1 = ctx->qm1; ulong i; fq_zech_t t; fq_zech_zero(a, ctx); diff --git a/src/fq_zech/sub.c b/src/fq_zech/sub.c index 82393e1daf..fb5f342475 100644 --- a/src/fq_zech/sub.c +++ b/src/fq_zech/sub.c @@ -16,7 +16,7 @@ void fq_zech_sub(fq_zech_t rop, const fq_zech_t op1, const fq_zech_t op2, const fq_zech_ctx_t ctx) { - mp_limb_t index, c; + ulong index, c; if (op2->value == ctx->qm1) { rop->value = op1->value; diff --git a/src/fq_zech/test/main.c b/src/fq_zech/test/main.c index a99eb240ed..08fa5016a7 100644 --- a/src/fq_zech/test/main.c +++ b/src/fq_zech/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_zech/test/t-mul_ui.c b/src/fq_zech/test/t-mul_ui.c index 6712738366..3c0ff316e0 100644 --- a/src/fq_zech/test/t-mul_ui.c +++ b/src/fq_zech/test/t-mul_ui.c @@ -27,7 +27,7 @@ TEST_FUNCTION_START(fq_zech_mul_ui, state) for (jx = 0; jx < 10; jx++) { - mp_limb_t x; + ulong x; fq_nmod_t aa, bb; fq_zech_t a, b, c; @@ -74,7 +74,7 @@ TEST_FUNCTION_START(fq_zech_mul_ui, state) for (jx = 0; jx < 10; jx++) { - mp_limb_t x; + ulong x; fq_nmod_t aa, bb; fq_zech_t a, b; diff --git a/src/fq_zech/trace.c b/src/fq_zech/trace.c index e70f62dc2b..0bbe41291b 100644 --- a/src/fq_zech/trace.c +++ b/src/fq_zech/trace.c @@ -16,7 +16,7 @@ void fq_zech_trace(fmpz_t rop, const fq_zech_t op, const fq_zech_ctx_t ctx) { - mp_limb_t p_i, trace; + ulong p_i, trace; fq_zech_t t, op_p_i; double qm1inv; if (fq_zech_is_zero(op, ctx)) diff --git a/src/fq_zech_embed/test/main.c b/src/fq_zech_embed/test/main.c index b4a28d9cff..a2e021c186 100644 --- a/src/fq_zech_embed/test/main.c +++ b/src/fq_zech_embed/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-composition_matrix.c" diff --git a/src/fq_zech_mat/test/main.c b/src/fq_zech_mat/test/main.c index 17fff1f6d3..9c0488bcc7 100644 --- a/src/fq_zech_mat/test/main.c +++ b/src/fq_zech_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/fq_zech_mpoly.h b/src/fq_zech_mpoly.h index f72b0ef147..db8ff7c21a 100644 --- a/src/fq_zech_mpoly.h +++ b/src/fq_zech_mpoly.h @@ -139,7 +139,7 @@ typedef fq_zech_mpoly_geobucket_struct fq_zech_mpoly_geobucket_t[1]; /* Context object ************************************************************/ void fq_zech_mpoly_ctx_init_deg(fq_zech_mpoly_ctx_t ctx, slong nvars, - const ordering_t ord, mp_limb_t p, slong deg); + const ordering_t ord, ulong p, slong deg); void fq_zech_mpoly_ctx_clear(fq_zech_mpoly_ctx_t ctx); diff --git a/src/fq_zech_mpoly/ctx.c b/src/fq_zech_mpoly/ctx.c index deba4b892e..f72908daae 100644 --- a/src/fq_zech_mpoly/ctx.c +++ b/src/fq_zech_mpoly/ctx.c @@ -14,7 +14,7 @@ #include "fq_zech_mpoly.h" void fq_zech_mpoly_ctx_init_deg(fq_zech_mpoly_ctx_t ctx, slong nvars, - const ordering_t ord, mp_limb_t p, slong deg) + const ordering_t ord, ulong p, slong deg) { mpoly_ctx_init(ctx->minfo, nvars, ord); fq_zech_ctx_init_ui(ctx->fqctx, p, deg, "#"); diff --git a/src/fq_zech_mpoly/derivative.c b/src/fq_zech_mpoly/derivative.c index 58a0c759a5..f4ba112d94 100644 --- a/src/fq_zech_mpoly/derivative.c +++ b/src/fq_zech_mpoly/derivative.c @@ -26,7 +26,7 @@ slong _fq_zech_mpoly_derivative(fq_zech_struct * Acoeff, ulong * Aexp, Alen = 0; for (i = 0; i < Blen; i++) { - mp_limb_t cr; + ulong cr; ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong c = (Bexp[N*i + offset] >> shift) & mask; if (c == 0) @@ -56,7 +56,7 @@ slong _fq_zech_mpoly_derivative_mp(fq_zech_struct * Acoeff, ulong * Aexp, Alen = 0; for (i = 0; i < Blen; i++) { - mp_limb_t cr; + ulong cr; fmpz_set_ui_array(c, Bexp + N*i + offset, bits/FLINT_BITS); if (fmpz_is_zero(c)) continue; diff --git a/src/fq_zech_mpoly_factor/bpoly_factor_smprime.c b/src/fq_zech_mpoly_factor/bpoly_factor_smprime.c index 6562fd213a..84851961cd 100644 --- a/src/fq_zech_mpoly_factor/bpoly_factor_smprime.c +++ b/src/fq_zech_mpoly_factor/bpoly_factor_smprime.c @@ -497,10 +497,10 @@ static void _lattice( fq_zech_bpoly_struct * ld; nmod_mat_t M, T1, T2; int nlimbs; - mp_limb_t * trow; + ulong * trow; nlimbs = _nmod_vec_dot_bound_limbs(r, fq_zech_ctx_mod(ctx)); - trow = (mp_limb_t *) flint_malloc(r*sizeof(mp_limb_t)); + trow = (ulong *) flint_malloc(r*sizeof(ulong)); fq_zech_bpoly_init(Q, ctx); fq_zech_bpoly_init(R, ctx); fq_zech_bpoly_init(dg, ctx); diff --git a/src/fq_zech_poly/test/main.c b/src/fq_zech_poly/test/main.c index 179eb81b10..e22b1d375c 100644 --- a/src/fq_zech_poly/test/main.c +++ b/src/fq_zech_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/fq_zech_poly_factor/test/main.c b/src/fq_zech_poly_factor/test/main.c index 47ebee4491..23e5c19906 100644 --- a/src/fq_zech_poly_factor/test/main.c +++ b/src/fq_zech_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor_berlekamp.c" diff --git a/src/fq_zech_types.h b/src/fq_zech_types.h index c66e858156..d63dcbea8e 100644 --- a/src/fq_zech_types.h +++ b/src/fq_zech_types.h @@ -20,7 +20,7 @@ extern "C" { typedef struct { - mp_limb_t value; + ulong value; } fq_zech_struct; diff --git a/src/fq_zech_vec/test/main.c b/src/fq_zech_vec/test/main.c index 7efa01d8fc..c396a61039 100644 --- a/src/fq_zech_vec/test/main.c +++ b/src/fq_zech_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/generic_files/io.c b/src/generic_files/io.c index fc592b643c..09b2fb7318 100644 --- a/src/generic_files/io.c +++ b/src/generic_files/io.c @@ -324,9 +324,9 @@ int flint_vfprintf(FILE * fs, const char * ip, va_list vlist) iplen = strlen(ip); TMP_START; -#if defined(_LONG_LONG_LIMB) +#if FLINT_LONG_LONG /* - If mp_limb_t is long long, then + If ulong is long long, then `%(format args...)w' -> `%(format args...)ll'. @@ -381,7 +381,7 @@ int flint_vfprintf(FILE * fs, const char * ip, va_list vlist) memcpy(opcur, ip, sizeof(char) * cpsz); ip = ipcur; -#if defined(_LONG_LONG_LIMB) +#if FLINT_LONG_LONG opcur += cpsz + 1; opcur[-1] = opcur[-2]; opcur[-2] = 'l'; diff --git a/src/generic_files/profiler.c b/src/generic_files/profiler.c index 86b11b3499..b8930dee11 100644 --- a/src/generic_files/profiler.c +++ b/src/generic_files/profiler.c @@ -16,8 +16,8 @@ #include #include "profiler.h" -#if (defined( _MSC_VER ) || (GMP_LIMB_BITS == 64 && defined (__amd64__)) || \ - (GMP_LIMB_BITS == 32 && (defined (__i386__) || \ +#if (defined( _MSC_VER ) || (FLINT_BITS == 64 && defined (__amd64__)) || \ + (FLINT_BITS == 32 && (defined (__i386__) || \ defined (__i486__) || defined(__amd64__)))) /* diff --git a/src/gmpcompat-longlong.h.in b/src/gmpcompat-longlong.h.in index a93c149301..2c905a8d5c 100644 --- a/src/gmpcompat-longlong.h.in +++ b/src/gmpcompat-longlong.h.in @@ -12,6 +12,7 @@ #ifndef GMP_COMPAT_H #define GMP_COMPAT_H +#include #include "flint.h" #define FLINT_MPZ_REALLOC(z, len) \ diff --git a/src/gmpcompat.h.in b/src/gmpcompat.h.in index 816e93cd80..3ed3980e3a 100644 --- a/src/gmpcompat.h.in +++ b/src/gmpcompat.h.in @@ -12,6 +12,7 @@ #ifndef GMP_COMPAT_H #define GMP_COMPAT_H +#include #include "flint.h" #define FLINT_MPZ_REALLOC(z, len) \ diff --git a/src/gr/fmpz_mod.c b/src/gr/fmpz_mod.c index 7f1e031dca..7b1e482bca 100644 --- a/src/gr/fmpz_mod.c +++ b/src/gr/fmpz_mod.c @@ -605,7 +605,7 @@ _gr_fmpz_mod_poly_div_series(fmpz * Q, const fmpz * A, slong lenA, const fmpz * return _gr_poly_div_series_newton(Q, A, lenA, B, lenB, len, cutoff, ctx); } -int _gr_fmpz_mod_poly_gcd(mp_ptr G, slong * lenG, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +int _gr_fmpz_mod_poly_gcd(nn_ptr G, slong * lenG, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { if (FLINT_MIN(lenA, lenB) < FMPZ_MOD_POLY_GCD_CUTOFF) return _gr_poly_gcd_euclidean(G, lenG, A, lenA, B, lenB, ctx); diff --git a/src/gr/fq_nmod.c b/src/gr/fq_nmod.c index 2fda75c1a8..14c4fcfd27 100644 --- a/src/gr/fq_nmod.c +++ b/src/gr/fq_nmod.c @@ -373,7 +373,7 @@ int __gr_fq_nmod_vec_dot(fq_nmod_struct * res, const fq_nmod_struct * initial, int subtract, const fq_nmod_struct * vec1, const fq_nmod_struct * vec2, slong len, gr_ctx_t ctx) { slong i; - mp_ptr s, t; + nn_ptr s, t; slong slen, tlen, len1, len2; slong plen; nmod_t mod; @@ -389,7 +389,7 @@ __gr_fq_nmod_vec_dot(fq_nmod_struct * res, const fq_nmod_struct * initial, int s plen = FQ_CTX(ctx)->modulus->length; - t = GR_TMP_ALLOC((4 * plen) * sizeof(mp_limb_t)); + t = GR_TMP_ALLOC((4 * plen) * sizeof(ulong)); s = t + 2 * plen; mod = FQ_CTX(ctx)->mod; @@ -458,7 +458,7 @@ __gr_fq_nmod_vec_dot(fq_nmod_struct * res, const fq_nmod_struct * initial, int s _nmod_vec_set(res->coeffs, s, slen); _nmod_poly_set_length(res, slen); - GR_TMP_FREE(t, (4 * plen) * sizeof(mp_limb_t)); + GR_TMP_FREE(t, (4 * plen) * sizeof(ulong)); return GR_SUCCESS; } @@ -468,7 +468,7 @@ int __gr_fq_nmod_vec_dot_rev(fq_nmod_struct * res, const fq_nmod_struct * initial, int subtract, const fq_nmod_struct * vec1, const fq_nmod_struct * vec2, slong len, gr_ctx_t ctx) { slong i; - mp_ptr s, t; + nn_ptr s, t; slong slen, tlen, len1, len2; slong plen; nmod_t mod; @@ -484,7 +484,7 @@ __gr_fq_nmod_vec_dot_rev(fq_nmod_struct * res, const fq_nmod_struct * initial, i plen = FQ_CTX(ctx)->modulus->length; - t = GR_TMP_ALLOC((4 * plen) * sizeof(mp_limb_t)); + t = GR_TMP_ALLOC((4 * plen) * sizeof(ulong)); s = t + 2 * plen; mod = FQ_CTX(ctx)->mod; @@ -553,7 +553,7 @@ __gr_fq_nmod_vec_dot_rev(fq_nmod_struct * res, const fq_nmod_struct * initial, i _nmod_vec_set(res->coeffs, s, slen); _nmod_poly_set_length(res, slen); - GR_TMP_FREE(t, (4 * plen) * sizeof(mp_limb_t)); + GR_TMP_FREE(t, (4 * plen) * sizeof(ulong)); return GR_SUCCESS; } diff --git a/src/gr/nmod.c b/src/gr/nmod.c index f778170687..e803aae684 100644 --- a/src/gr/nmod.c +++ b/src/gr/nmod.c @@ -647,8 +647,8 @@ _gr_nmod_vec_sub(ulong * res, const ulong * vec1, const ulong * vec2, slong len, } -static inline void _nmod_vec_scalar_mul_nmod_fullword_inline(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +static inline void _nmod_vec_scalar_mul_nmod_fullword_inline(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; @@ -656,8 +656,8 @@ static inline void _nmod_vec_scalar_mul_nmod_fullword_inline(mp_ptr res, mp_srcp NMOD_MUL_FULLWORD(res[i], vec[i], c, mod); } -static inline void _nmod_vec_scalar_mul_nmod_generic_inline(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +static inline void _nmod_vec_scalar_mul_nmod_generic_inline(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; @@ -665,8 +665,8 @@ static inline void _nmod_vec_scalar_mul_nmod_generic_inline(mp_ptr res, mp_srcpt NMOD_MUL_PRENORM(res[i], vec[i], c << mod.norm, mod); } -static inline void _nmod_vec_scalar_mul_nmod_inline(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +static inline void _nmod_vec_scalar_mul_nmod_inline(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { if (NMOD_BITS(mod) == FLINT_BITS) _nmod_vec_scalar_mul_nmod_fullword_inline(res, vec, len, c, mod); @@ -1040,14 +1040,14 @@ _gr_nmod_poly_mullow(ulong * res, /* fixme: duplicates _nmod_poly_divrem for error handling */ int -_gr_nmod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, gr_ctx_t ctx) +_gr_nmod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, gr_ctx_t ctx) { if (lenA <= 20 || lenB <= 8 || lenA - lenB <= 6 || (NMOD_BITS(NMOD_CTX(ctx)) <= 61 && lenA <= 40) || (NMOD_BITS(NMOD_CTX(ctx)) <= 29 && lenA <= 70)) { - mp_limb_t invB; + ulong invB; int status; status = _gr_nmod_inv(&invB, &B[lenB - 1], ctx); @@ -1072,7 +1072,7 @@ _gr_nmod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, } int -_gr_nmod_poly_divexact(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +_gr_nmod_poly_divexact(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { slong lenQ = lenA - lenB + 1; @@ -1106,13 +1106,13 @@ static const short inv_series_cutoff_tab[64] = {38, 36, 38, 36, 41, 48, 49, 54, #endif -void _nmod_poly_inv_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, mp_limb_t q, nmod_t mod); +void _nmod_poly_inv_series_basecase_preinv1(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, ulong q, nmod_t mod); int _gr_nmod_poly_inv_series_basecase(ulong * res, const ulong * f, slong flen, slong n, gr_ctx_t ctx) { - mp_limb_t q; + ulong q; q = f[0]; if (q != 1) @@ -1142,13 +1142,13 @@ _gr_nmod_poly_inv_series(ulong * res, } -void _nmod_poly_div_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr P, slong Plen, mp_srcptr Q, slong Qlen, slong n, mp_limb_t q, nmod_t mod); +void _nmod_poly_div_series_basecase_preinv1(nn_ptr Qinv, nn_srcptr P, slong Plen, nn_srcptr Q, slong Qlen, slong n, ulong q, nmod_t mod); int _gr_nmod_poly_div_series_basecase(ulong * res, const ulong * f, slong flen, const ulong * g, slong glen, slong n, gr_ctx_t ctx) { - mp_limb_t q; + ulong q; q = g[0]; if (q != 1) @@ -1389,7 +1389,7 @@ _gr_nmod_mat_mul(gr_mat_t res, const gr_mat_t x, const gr_mat_t y, gr_ctx_t ctx) nmod_mat_struct *XX, *YY; R->entries = res->entries; - R->rows = (mp_ptr *) res->rows; + R->rows = (nn_ptr *) res->rows; R->r = res->r; R->c = res->c; R->mod = NMOD_CTX(ctx); @@ -1401,7 +1401,7 @@ _gr_nmod_mat_mul(gr_mat_t res, const gr_mat_t x, const gr_mat_t y, gr_ctx_t ctx) else { X->entries = x->entries; - X->rows = (mp_ptr *) x->rows; + X->rows = (nn_ptr *) x->rows; X->r = x->r; X->c = x->c; X->mod = NMOD_CTX(ctx); @@ -1419,7 +1419,7 @@ _gr_nmod_mat_mul(gr_mat_t res, const gr_mat_t x, const gr_mat_t y, gr_ctx_t ctx) else { Y->entries = y->entries; - Y->rows = (mp_ptr *) y->rows; + Y->rows = (nn_ptr *) y->rows; Y->r = y->r; Y->c = y->c; Y->mod = NMOD_CTX(ctx); diff --git a/src/gr/test/main.c b/src/gr/test/main.c index a418b2113d..7628198d49 100644 --- a/src/gr/test/main.c +++ b/src/gr/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - #include "mpoly.h" #include "fmpz_mpoly_q.h" #include "nf.h" diff --git a/src/gr_generic/generic_pow.c b/src/gr_generic/generic_pow.c index d76033f48e..d2158962f4 100644 --- a/src/gr_generic/generic_pow.c +++ b/src/gr_generic/generic_pow.c @@ -39,14 +39,14 @@ sliding_select_k(ulong bits) /* todo: avoid swaps (or perform pointer swaps) */ /* note: supports aliasing */ static int -_gr_pow_mpn_sliding(gr_ptr f, gr_srcptr g, mp_srcptr exp, mp_size_t en, gr_ctx_t ctx) +_gr_pow_mpn_sliding(gr_ptr f, gr_srcptr g, nn_srcptr exp, slong en, gr_ctx_t ctx) { slong h, k, value; slong i, j, alloc; gr_ptr temp; fmpz * g_powers; slong sz = ctx->sizeof_elem; - mp_bitcnt_t ebits; + flint_bitcnt_t ebits; int status = GR_SUCCESS; ebits = (en - 1) * FLINT_BITS + FLINT_BIT_COUNT(exp[en - 1]); diff --git a/src/gr_generic/test/main.c b/src/gr_generic/test/main.c index 30f748f5c3..0275809281 100644 --- a/src/gr_generic/test/main.c +++ b/src/gr_generic/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - #include "fmpz_poly.h" #include "fmpz_mpoly.h" #include "gr_generic.h" diff --git a/src/gr_mat/test/main.c b/src/gr_mat/test/main.c index 8a2a8af6ed..b4e197be07 100644 --- a/src/gr_mat/test/main.c +++ b/src/gr_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-adjugate.c" diff --git a/src/gr_mpoly.h b/src/gr_mpoly.h index 2563e5e758..aaeda95bcd 100644 --- a/src/gr_mpoly.h +++ b/src/gr_mpoly.h @@ -33,7 +33,7 @@ typedef struct ulong * exps; slong length; flint_bitcnt_t bits; /* number of bits per exponent */ - slong coeffs_alloc; /* abs size in mp_limb_t units */ + slong coeffs_alloc; /* abs size in ulong units */ slong exps_alloc; /* abs size in ulong units */ } gr_mpoly_struct; diff --git a/src/gr_mpoly/test/main.c b/src/gr_mpoly/test/main.c index 824def1426..0aac4d51c5 100644 --- a/src/gr_mpoly/test/main.c +++ b/src/gr_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/gr_poly/compose_series_divconquer.c b/src/gr_poly/compose_series_divconquer.c index aa3b47a0d8..f132a6dc6d 100644 --- a/src/gr_poly/compose_series_divconquer.c +++ b/src/gr_poly/compose_series_divconquer.c @@ -50,7 +50,7 @@ _gr_poly_compose_series_divconquer(gr_ptr res, gr_srcptr poly1, slong len1, alloc += hlen[i]; GR_TMP_INIT_VEC(v, alloc + 2 * powlen, ctx); - h = (gr_ptr *) flint_malloc(((len1 + 1) / 2) * sizeof(mp_ptr)); + h = (gr_ptr *) flint_malloc(((len1 + 1) / 2) * sizeof(nn_ptr)); h[0] = v; for (i = 0; i < (len1 - 1) / 2; i++) { diff --git a/src/gr_poly/test/main.c b/src/gr_poly/test/main.c index 82549659da..bf5880e24e 100644 --- a/src/gr_poly/test/main.c +++ b/src/gr_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-atan_series.c" diff --git a/src/gr_poly/tune/cutoffs.c b/src/gr_poly/tune/cutoffs.c index 5fc2c84742..885e3185e4 100644 --- a/src/gr_poly/tune/cutoffs.c +++ b/src/gr_poly/tune/cutoffs.c @@ -31,7 +31,7 @@ slong next_powhalf2(slong n) return WORD(1) << FLINT_BIT_COUNT(n); } -void _nmod_poly_mul_mid_default_mpn_ctx(mp_ptr res, slong zl, slong zh, mp_srcptr a, slong an, mp_srcptr b, slong bn, nmod_t mod); +void _nmod_poly_mul_mid_default_mpn_ctx(nn_ptr res, slong zl, slong zh, nn_srcptr a, slong an, nn_srcptr b, slong bn, nmod_t mod); #define TIMEIT_END_REPEAT3(__timer, __reps, __min_time) \ } \ diff --git a/src/gr_special/fac.c b/src/gr_special/fac.c index bb82ff3389..d342d8a360 100644 --- a/src/gr_special/fac.c +++ b/src/gr_special/fac.c @@ -23,7 +23,7 @@ #define FAC_TAB_SIZE 13 #endif -static const mp_limb_t fac_tab[] = +static const ulong fac_tab[] = { UWORD(1), UWORD(1), UWORD(2), UWORD(6), UWORD(24), UWORD(120), UWORD(720), UWORD(5040), UWORD(40320), UWORD(362880), UWORD(3628800), UWORD(39916800), UWORD(479001600), diff --git a/src/gr_special/fib.c b/src/gr_special/fib.c index adf55cf876..1de4ed81de 100644 --- a/src/gr_special/fib.c +++ b/src/gr_special/fib.c @@ -70,8 +70,8 @@ gr_generic_fib2_fmpz(gr_ptr v, gr_ptr u, const fmpz_t n, gr_ctx_t ctx) int status = GR_SUCCESS; gr_ptr t; slong real_prec = 0; - mp_srcptr np; - mp_limb_t ntmp; + nn_srcptr np; + ulong ntmp; if (fmpz_sgn(n) < 0) { diff --git a/src/gr_special/test/main.c b/src/gr_special/test/main.c index 0f9fa4717a..1805fd696f 100644 --- a/src/gr_special/test/main.c +++ b/src/gr_special/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-chebyshev.c" diff --git a/src/gr_vec/test/main.c b/src/gr_vec/test/main.c index 7dafad0e3a..8f62badc72 100644 --- a/src/gr_vec/test/main.c +++ b/src/gr_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-product.c" diff --git a/src/interfaces/test/t-NTL-interface.cpp b/src/interfaces/test/t-NTL-interface.cpp index 0a658e1687..7b1910509e 100644 --- a/src/interfaces/test/t-NTL-interface.cpp +++ b/src/interfaces/test/t-NTL-interface.cpp @@ -21,7 +21,7 @@ NTL_CLIENT TEST_FUNCTION_START(ZZ_to_fmpz, state) { int i, result; - mp_bitcnt_t bits, randbits; + flint_bitcnt_t bits, randbits; fmpz_t int1, int2; ZZ z; @@ -60,7 +60,7 @@ TEST_FUNCTION_START(ZZX_to_fmpz_poly, state) { fmpz_poly_t f_poly1, f_poly2; slong length; - mp_bitcnt_t bits; + flint_bitcnt_t bits; int i, result; /* Check aliasing of a and c */ diff --git a/src/long_extras.h b/src/long_extras.h index 424e70dfbe..c916714399 100644 --- a/src/long_extras.h +++ b/src/long_extras.h @@ -59,11 +59,11 @@ int z_mat22_det_is_negative(slong m11, slong m12, slong m21, slong m22) /* Randomisation ************************************************************/ -mp_limb_signed_t z_randtest(flint_rand_t state); +slong z_randtest(flint_rand_t state); -mp_limb_signed_t z_randtest_not_zero(flint_rand_t state); +slong z_randtest_not_zero(flint_rand_t state); -mp_limb_signed_t z_randint(flint_rand_t state, mp_limb_t limit); +slong z_randint(flint_rand_t state, ulong limit); /*****************************************************************************/ diff --git a/src/long_extras/randint.c b/src/long_extras/randint.c index 97208bad19..95121c603a 100644 --- a/src/long_extras/randint.c +++ b/src/long_extras/randint.c @@ -16,9 +16,9 @@ #include "ulong_extras.h" #include "long_extras.h" -mp_limb_signed_t z_randint(flint_rand_t state, mp_limb_t limit) +slong z_randint(flint_rand_t state, ulong limit) { - mp_limb_signed_t z; + slong z; if ((limit == UWORD(0)) || (limit > WORD_MAX)) limit = WORD_MAX; diff --git a/src/long_extras/randtest.c b/src/long_extras/randtest.c index 2511c80ade..6aa99f44ea 100644 --- a/src/long_extras/randtest.c +++ b/src/long_extras/randtest.c @@ -16,10 +16,10 @@ #include "ulong_extras.h" #include "long_extras.h" -mp_limb_signed_t z_randtest(flint_rand_t state) +slong z_randtest(flint_rand_t state) { - mp_limb_t m; - mp_limb_signed_t z; + ulong m; + slong z; m = n_randlimb(state); @@ -47,9 +47,9 @@ mp_limb_signed_t z_randtest(flint_rand_t state) return z; } -mp_limb_signed_t z_randtest_not_zero(flint_rand_t state) +slong z_randtest_not_zero(flint_rand_t state) { - mp_limb_signed_t z; + slong z; while ((z = z_randtest(state)) == 0) ; return z; diff --git a/src/long_extras/test/main.c b/src/long_extras/test/main.c index eccf1a35b7..7ef2b1343b 100644 --- a/src/long_extras/test/main.c +++ b/src/long_extras/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-kronecker.c" diff --git a/src/longlong.h b/src/longlong.h index 7d7bc623c5..f6a8520dd7 100644 --- a/src/longlong.h +++ b/src/longlong.h @@ -36,18 +36,18 @@ extern "C" { #if defined(__GNUC__) /* Trailing and leading zeros */ -# ifndef _LONG_LONG_LIMB -# define flint_clz __builtin_clzl -# define flint_ctz __builtin_ctzl -# else +# if FLINT_LONG_LONG # define flint_clz __builtin_clzll # define flint_ctz __builtin_ctzll +# else +# define flint_clz __builtin_clzl +# define flint_ctz __builtin_ctzl # endif /* Byte swap */ # define _FLINT_CAT_(X,Y) X##Y # define _FLINT_CAT(X,Y) _FLINT_CAT_(X,Y) -# define byte_swap(x) do { (x) = _FLINT_CAT(__builtin_bswap, GMP_LIMB_BITS)(x); } while (0) +# define byte_swap(x) do { (x) = _FLINT_CAT(__builtin_bswap, FLINT_BITS)(x); } while (0) /* Addition, subtraction and multiplication */ # if defined(__clang__) @@ -84,7 +84,7 @@ FLINT_DLL extern const unsigned char __flint_clz_tab[128]; # define flint_clz flint_clz static inline int flint_clz(ulong x) { - mp_limb_t a, xr = x; + ulong a, xr = x; const unsigned int bits4 = FLINT_BITS / 4; if (FLINT_BITS == 32) a = xr < ((ulong) 1 << 2 * bits4) @@ -141,18 +141,18 @@ static inline int flint_ctz(ulong x) # define add_sssaaaaaa(s2, s1, s0, a2, a1, a0, b2, b1, b0) \ do { \ - mp_limb_t __t1, __t2; \ - add_ssaaaa(__t1, s0, (mp_limb_t) 0, a0, (mp_limb_t) 0, b0); \ - add_ssaaaa(__t2, s1, (mp_limb_t) 0, a1, (mp_limb_t) 0, b1); \ + ulong __t1, __t2; \ + add_ssaaaa(__t1, s0, (ulong) 0, a0, (ulong) 0, b0); \ + add_ssaaaa(__t2, s1, (ulong) 0, a1, (ulong) 0, b1); \ add_ssaaaa(s2, s1, (a2) + (b2), s1, __t2, __t1); \ } while (0) # define add_ssssaaaaaaaa(s3, s2, s1, s0, a3, a2, a1, a0, b3, b2, b1, b0) \ do { \ - mp_limb_t __u2; \ - add_sssaaaaaa(__u2, s1, s0, (mp_limb_t) 0, a1, a0, (mp_limb_t) 0, b1, b0); \ + ulong __u2; \ + add_sssaaaaaa(__u2, s1, s0, (ulong) 0, a1, a0, (ulong) 0, b1, b0); \ add_ssaaaa(s3, s2, a3, a2, b3, b2); \ - add_ssaaaa(s3, s2, s3, s2, (mp_limb_t) 0, __u2); \ + add_ssaaaa(s3, s2, s3, s2, (ulong) 0, __u2); \ } while (0) # define sub_ddmmss(s1, s0, a1, a0, b1, b0) \ @@ -164,34 +164,29 @@ static inline int flint_ctz(ulong x) # define sub_dddmmmsss(d2, d1, d0, m2, m1, m0, s2, s1, s0) \ do { \ - mp_limb_t __t1, __t2; \ - sub_ddmmss(__t1, d0, (mp_limb_t) 0, m0, (mp_limb_t) 0, s0); \ - sub_ddmmss(__t2, d1, (mp_limb_t) 0, m1, (mp_limb_t) 0, s1); \ + ulong __t1, __t2; \ + sub_ddmmss(__t1, d0, (ulong) 0, m0, (ulong) 0, s0); \ + sub_ddmmss(__t2, d1, (ulong) 0, m1, (ulong) 0, s1); \ sub_ddmmss(d2, d1, (m2) - (s2), d1, -__t2, -__t1); \ } while (0) #endif #if !defined(MPN_INCR_U) -# define MPN_INCR_U MPN_INCR_U -# define MPN_DECR_U MPN_DECR_U -FLINT_FORCE_INLINE void MPN_INCR_U(mp_ptr ptr, mp_size_t size, mp_limb_t incr) -{ # if FLINT_WANT_ASSERT - mp_limb_t cy = mpn_add_1(ptr, ptr, size, incr); - FLINT_ASSERT(cy == 0); -# else - mpn_add_1(ptr, ptr, size, incr); -# endif -} -FLINT_FORCE_INLINE void MPN_DECR_U(mp_ptr ptr, mp_size_t size, mp_limb_t incr) -{ -# if FLINT_WANT_ASSERT - mp_limb_t cy = mpn_sub_1(ptr, ptr, size, incr); - FLINT_ASSERT(cy == 0); +# define MPN_INCR_U(ptr, size, incr) \ + do { \ + ulong __cy = mpn_add_1(ptr, ptr, size, incr); \ + FLINT_ASSERT(__cy == 0); \ + } while (0) +# define MPN_DECR_U(ptr, size, incr) \ + do { \ + ulong __cy = mpn_sub_1(ptr, ptr, size, incr); \ + FLINT_ASSERT(__cy == 0); \ + } while (0) # else - mpn_sub_1(ptr, ptr, size, incr); +# define MPN_INCR_U(ptr, size, incr) mpn_add_1(ptr, ptr, size, incr) +# define MPN_DECR_U(ptr, size, incr) mpn_sub_1(ptr, ptr, size, incr) # endif -} #endif /* Multiplication */ @@ -267,7 +262,7 @@ FLINT_FORCE_INLINE void MPN_DECR_U(mp_ptr ptr, mp_size_t size, mp_limb_t incr) if (__norm) \ { \ udiv_qrnnd_int((q), (r), ((n1) << __norm) + ((n0) >> (FLINT_BITS - __norm)), (n0) << __norm, (d) << __norm); \ - (r) = ((mp_limb_t) (r) >> __norm); \ + (r) = ((ulong) (r) >> __norm); \ } \ else \ udiv_qrnnd_int((q), (r), (n1), (n0), (d)); \ @@ -277,8 +272,8 @@ FLINT_FORCE_INLINE void MPN_DECR_U(mp_ptr ptr, mp_size_t size, mp_limb_t incr) # define sdiv_qrnnd(q, r, n1, n0, d) \ do { \ - mp_limb_t __n1, __n0, __d; \ - mp_limb_t __q, __r; \ + ulong __n1, __n0, __d; \ + ulong __q, __r; \ unsigned int __sgn_n = 0, __sgn_d = 0; \ if ((n1) & __highbit) \ { \ diff --git a/src/longlong_asm_clang.h b/src/longlong_asm_clang.h index 5c29604fb4..f18beb7461 100644 --- a/src/longlong_asm_clang.h +++ b/src/longlong_asm_clang.h @@ -20,7 +20,7 @@ # define _STOR_SLONG long long int #endif -#if defined(_LONG_LONG_LIMB) +#if FLINT_LONG_LONG # define _FLINT_ADC __builtin_addcll # define _FLINT_SBB __builtin_subcll #else diff --git a/src/longlong_asm_gcc.h b/src/longlong_asm_gcc.h index 0d10b19b9a..1d12322084 100644 --- a/src/longlong_asm_gcc.h +++ b/src/longlong_asm_gcc.h @@ -43,9 +43,9 @@ see https://www.gnu.org/licenses/. */ #define LONGLONG_ASM_H /* Machine specific operations */ -#if defined (__amd64__) || (GMP_LIMB_BITS == 32 && (defined (__i386__) || defined (__i486__))) +#if defined (__amd64__) || (FLINT_BITS == 32 && (defined (__i386__) || defined (__i486__))) -# if GMP_LIMB_BITS == 64 && defined (__amd64__) +# if FLINT_BITS == 64 && defined (__amd64__) # define _ASM_ADD "addq" # define _ASM_ADC "adcq" # define _ASM_SUB "subq" @@ -125,7 +125,7 @@ see https://www.gnu.org/licenses/. */ : "=a" (w0), "=d" (w1) \ : "%0" ((ulong)(u)), "rm" ((ulong)(v))) -#elif (GMP_LIMB_BITS == 64 && defined(__aarch64__)) || (GMP_LIMB_BITS == 32 && defined(__arm__)) +#elif (FLINT_BITS == 64 && defined(__aarch64__)) || (FLINT_BITS == 32 && defined(__arm__)) # define add_ssaaaa(s1, s0, a1, a0, b1, b0) \ __asm__("adds %1,%3,%5\n" \ diff --git a/src/longlong_asm_gnu.h b/src/longlong_asm_gnu.h index 19419ce8d2..2eddff0e4d 100644 --- a/src/longlong_asm_gnu.h +++ b/src/longlong_asm_gnu.h @@ -65,7 +65,7 @@ see https://www.gnu.org/licenses/. */ #if !FLINT_WANT_ASSERT && defined(__amd64__) # define MPN_IORD_U(ptr, incr, aors) \ do { \ - mp_ptr __ptr_dummy; \ + nn_ptr __ptr_dummy; \ if (__builtin_constant_p(incr) && (incr) == 0) \ { \ } \ @@ -77,7 +77,7 @@ see https://www.gnu.org/licenses/. */ "\tlea\t%c2(%0), %0\n" \ "\tjc\t" ASM_L(top) \ : "=r" (__ptr_dummy) \ - : "0" (ptr), "n" (sizeof(mp_limb_t)) \ + : "0" (ptr), "n" (sizeof(ulong)) \ : "memory"); \ } \ else \ @@ -92,23 +92,23 @@ see https://www.gnu.org/licenses/. */ ASM_L(done) ":\n" \ : "=r" (__ptr_dummy) \ : "0" (ptr), \ - "re" ((mp_limb_t) (incr)), "n" (sizeof(mp_limb_t)) \ + "re" ((ulong) (incr)), "n" (sizeof(ulong)) \ : "memory"); \ } \ } while (0) -# if GMP_LIMB_BITS == 32 +# if FLINT_BITS == 32 # define MPN_INCR_U(ptr, size, incr) MPN_IORD_U(ptr, incr, "addl") # define MPN_DECR_U(ptr, size, incr) MPN_IORD_U(ptr, incr, "subl") # else # define MPN_INCR_U(ptr, size, incr) MPN_IORD_U(ptr, incr, "addq") # define MPN_DECR_U(ptr, size, incr) MPN_IORD_U(ptr, incr, "subq") # endif -#elif !FLINT_WANT_ASSERT && (GMP_LIMB_BITS == 64 && defined(__aarch64__)) +#elif !FLINT_WANT_ASSERT && (FLINT_BITS == 64 && defined(__aarch64__)) # define MPN_IORD_U(ptr, incr, aors, cond, anticond) \ do { \ - mp_ptr __ptr_dummy; \ - mp_limb_t __reg_dummy; \ + nn_ptr __ptr_dummy; \ + ulong __reg_dummy; \ if (__builtin_constant_p (incr) && (incr) == 0) \ { \ } \ @@ -138,7 +138,7 @@ see https://www.gnu.org/licenses/. */ "\tb." cond "\t" ASM_L(top) "\n" \ ASM_L(done) ":\n" \ : "=r" (__ptr_dummy), "=&r" (__reg_dummy) \ - : "0" (ptr), "rI" ((mp_limb_t) (incr)) \ + : "0" (ptr), "rI" ((ulong) (incr)) \ : "memory"); \ } \ } while (0) diff --git a/src/longlong_div_gnu.h b/src/longlong_div_gnu.h index 9e4b7637b0..e39e34da0b 100644 --- a/src/longlong_div_gnu.h +++ b/src/longlong_div_gnu.h @@ -12,9 +12,9 @@ #ifndef LONGLONG_DIV_H #define LONGLONG_DIV_H -#if defined (__amd64__) || (GMP_LIMB_BITS == 32 && (defined (__i386__) || defined (__i486__))) +#if defined (__amd64__) || (FLINT_BITS == 32 && (defined (__i386__) || defined (__i486__))) -# if GMP_LIMB_BITS == 64 && defined (__amd64__) +# if FLINT_BITS == 64 && defined (__amd64__) # define _FLINT_ASM_DIV "divq" # define _FLINT_ASM_IDIV "idivq" # else diff --git a/src/mag.h b/src/mag.h index 0122941051..28234a2908 100644 --- a/src/mag.h +++ b/src/mag.h @@ -25,9 +25,9 @@ extern "C" { #endif -#define LIMB_ONE ((mp_limb_t) 1) -#define LIMB_ONES (-(mp_limb_t) 1) -#define LIMB_TOP (((mp_limb_t) 1) << (FLINT_BITS - 1)) +#define LIMB_ONE ((ulong) 1) +#define LIMB_ONES (-(ulong) 1) +#define LIMB_TOP (((ulong) 1) << (FLINT_BITS - 1)) #define MASK_LIMB(n, c) ((n) & (LIMB_ONES << (c))) #define MAG_MAX_LAGOM_EXP (COEFF_MAX / 4) @@ -118,10 +118,10 @@ _fmpz_sub2_fast(fmpz_t z, const fmpz_t x, const fmpz_t y, slong c) #define MAG_ONE_HALF (UWORD(1) << (MAG_BITS - 1)) -static inline mp_limb_t -__mag_fixmul32(mp_limb_t x, mp_limb_t y) +static inline ulong +__mag_fixmul32(ulong x, ulong y) { - mp_limb_t u, v; + ulong u, v; umul_ppmm(u, v, x, y); return (u << (32 - MAG_BITS)) | (v >> MAG_BITS); } @@ -145,7 +145,7 @@ __mag_fixmul32(mp_limb_t x, mp_limb_t y) #define MAG_ADJUST_ONE_TOO_LARGE(x) \ do { \ - mp_limb_t __t = MAG_MAN(x) >> MAG_BITS; \ + ulong __t = MAG_MAN(x) >> MAG_BITS; \ MAG_MAN(x) = (MAG_MAN(x) >> __t) + (__t & MAG_MAN(x)); \ if (__t) \ fmpz_add_ui(MAG_EXPREF(x), MAG_EXPREF(x), __t); \ @@ -153,14 +153,14 @@ __mag_fixmul32(mp_limb_t x, mp_limb_t y) #define MAG_FAST_ADJUST_ONE_TOO_LARGE(x) \ do { \ - mp_limb_t __t = MAG_MAN(x) >> MAG_BITS; \ + ulong __t = MAG_MAN(x) >> MAG_BITS; \ MAG_MAN(x) = (MAG_MAN(x) >> __t) + (__t & MAG_MAN(x)); \ MAG_EXP(x) += __t; \ } while (0) #define MAG_ADJUST_ONE_TOO_SMALL(x) \ do { \ - mp_limb_t __t = !(MAG_MAN(x) >> (MAG_BITS - 1)); \ + ulong __t = !(MAG_MAN(x) >> (MAG_BITS - 1)); \ MAG_MAN(x) = (MAG_MAN(x) << __t); \ if (__t) \ fmpz_sub_ui(MAG_EXPREF(x), MAG_EXPREF(x), __t); \ @@ -168,7 +168,7 @@ __mag_fixmul32(mp_limb_t x, mp_limb_t y) #define MAG_FAST_ADJUST_ONE_TOO_SMALL(x) \ do { \ - mp_limb_t __t = !(MAG_MAN(x) >> (MAG_BITS - 1)); \ + ulong __t = !(MAG_MAN(x) >> (MAG_BITS - 1)); \ MAG_MAN(x) = (MAG_MAN(x) << __t); \ MAG_EXP(x) -= __t; \ } while (0) @@ -446,9 +446,9 @@ mag_fast_add_2exp_si(mag_t z, const mag_t x, slong e) int __cexp; \ double __x; \ int __fix; \ - mp_limb_t __man; \ + ulong __man; \ __x = frexp((x), &__cexp); \ - __man = (mp_limb_t)(__x * (double)(LIMB_ONE << MAG_BITS)) + 1; \ + __man = (ulong)(__x * (double)(LIMB_ONE << MAG_BITS)) + 1; \ __fix = __man >> (MAG_BITS); \ __man = (__man >> __fix) + __fix; \ (man) = __man; \ @@ -461,9 +461,9 @@ mag_fast_add_2exp_si(mag_t z, const mag_t x, slong e) int __cexp; \ double __x; \ int __fix; \ - mp_limb_t __man; \ + ulong __man; \ __x = frexp((x), &__cexp); \ - __man = (mp_limb_t)(__x * (double)(LIMB_ONE << MAG_BITS)) - 1; \ + __man = (ulong)(__x * (double)(LIMB_ONE << MAG_BITS)) - 1; \ __fix = __man < MAG_ONE_HALF; \ __man = (__man << __fix); \ (man) = __man; \ diff --git a/src/mag/div.c b/src/mag/div.c index 54dfda3115..e0bbb62082 100644 --- a/src/mag/div.c +++ b/src/mag/div.c @@ -23,13 +23,13 @@ mag_div(mag_t z, const mag_t x, const mag_t y) } else { - mp_limb_t q; + ulong q; slong fix; #if FLINT_BITS == 64 q = (MAG_MAN(x) << MAG_BITS) / MAG_MAN(y) + 1; #else - mp_limb_t hi, lo, r; + ulong hi, lo, r; lo = MAG_MAN(x) << MAG_BITS; hi = MAG_MAN(x) >> (FLINT_BITS - MAG_BITS); udiv_qrnnd(q, r, hi, lo, MAG_MAN(y)); diff --git a/src/mag/div_lower.c b/src/mag/div_lower.c index aeb8caae5e..f46daf47c8 100644 --- a/src/mag/div_lower.c +++ b/src/mag/div_lower.c @@ -23,13 +23,13 @@ mag_div_lower(mag_t z, const mag_t x, const mag_t y) } else { - mp_limb_t q; + ulong q; slong fix; #if FLINT_BITS == 64 q = (MAG_MAN(x) << MAG_BITS) / MAG_MAN(y); #else - mp_limb_t hi, lo, r; + ulong hi, lo, r; lo = MAG_MAN(x) << MAG_BITS; hi = MAG_MAN(x) >> (FLINT_BITS - MAG_BITS); udiv_qrnnd(q, r, hi, lo, MAG_MAN(y)); diff --git a/src/mag/pow_fmpz.c b/src/mag/pow_fmpz.c index 87701d1f85..19de88968f 100644 --- a/src/mag/pow_fmpz.c +++ b/src/mag/pow_fmpz.c @@ -30,7 +30,7 @@ mag_pow_fmpz(mag_t z, const mag_t x, const fmpz_t e) else { mag_t y; - mp_srcptr elimbs; + nn_srcptr elimbs; slong i, bits; mag_init_set(y, x); @@ -69,7 +69,7 @@ mag_pow_fmpz_lower(mag_t z, const mag_t x, const fmpz_t e) else { mag_t y; - mp_srcptr elimbs; + nn_srcptr elimbs; slong i, bits; mag_init_set(y, x); diff --git a/src/mag/set_fmpz_2exp_fmpz.c b/src/mag/set_fmpz_2exp_fmpz.c index b998b888d6..3d2e68b399 100644 --- a/src/mag/set_fmpz_2exp_fmpz.c +++ b/src/mag/set_fmpz_2exp_fmpz.c @@ -20,7 +20,7 @@ mag_set_fmpz_2exp_fmpz(mag_t z, const fmpz_t man, const fmpz_t exp) } else { - mp_limb_t m; + ulong m; slong cexp; m = fmpz_abs_ubound_ui_2exp(&cexp, man, MAG_BITS); @@ -38,7 +38,7 @@ mag_set_fmpz_2exp_fmpz_lower(mag_t z, const fmpz_t man, const fmpz_t exp) } else { - mp_limb_t m; + ulong m; slong cexp; m = fmpz_abs_lbound_ui_2exp(&cexp, man, MAG_BITS); diff --git a/src/mag/set_ui.c b/src/mag/set_ui.c index 229bf9ceab..61132de3e0 100644 --- a/src/mag/set_ui.c +++ b/src/mag/set_ui.c @@ -24,7 +24,7 @@ mag_set_ui(mag_t z, ulong x) else { slong bits; - mp_limb_t overflow; + ulong overflow; bits = flint_clz(x); bits = FLINT_BITS - bits; diff --git a/src/mag/set_ui_2exp_si.c b/src/mag/set_ui_2exp_si.c index 0ec23c3265..5bc6b27e8c 100644 --- a/src/mag/set_ui_2exp_si.c +++ b/src/mag/set_ui_2exp_si.c @@ -24,7 +24,7 @@ mag_set_ui_2exp_si(mag_t z, ulong x, slong e) else { slong bits; - mp_limb_t overflow; + ulong overflow; bits = flint_clz(x); bits = FLINT_BITS - bits; diff --git a/src/mag/sub_lower.c b/src/mag/sub_lower.c index d67bc848b9..3ce519fe54 100644 --- a/src/mag/sub_lower.c +++ b/src/mag/sub_lower.c @@ -53,7 +53,7 @@ mag_sub_lower(mag_t z, const mag_t x, const mag_t y) { if (shift <= MAG_BITS) { - mp_limb_t c = MAG_MAN(x) - (MAG_MAN(y) >> shift) - 1; + ulong c = MAG_MAN(x) - (MAG_MAN(y) >> shift) - 1; /* too much cancellation -- compute precisely */ if (c < (UWORD(1) << (MAG_BITS - 4))) diff --git a/src/mag/test/main.c b/src/mag/test/main.c index 76e46077fc..4549b01557 100644 --- a/src/mag/test/main.c +++ b/src/mag/test/main.c @@ -9,8 +9,6 @@ (at your option) any later version. See . */ -#include -#include #include /* Include functions *********************************************************/ diff --git a/src/mag/test/t-d_log_lower_bound.c b/src/mag/test/t-d_log_lower_bound.c index 80f93c5b31..27c8bd4d32 100644 --- a/src/mag/test/t-d_log_lower_bound.c +++ b/src/mag/test/t-d_log_lower_bound.c @@ -26,7 +26,7 @@ double d_randtest2(flint_rand_t state) { - mp_limb_t m1, m2; + ulong m1, m2; double t; if (FLINT_BITS == 64) diff --git a/src/mag/test/t-d_log_upper_bound.c b/src/mag/test/t-d_log_upper_bound.c index afc5aeb404..bf01748f5d 100644 --- a/src/mag/test/t-d_log_upper_bound.c +++ b/src/mag/test/t-d_log_upper_bound.c @@ -26,7 +26,7 @@ double d_randtest2(flint_rand_t state) { - mp_limb_t m1, m2; + ulong m1, m2; double t; if (FLINT_BITS == 64) diff --git a/src/mag/test/t-set_d.c b/src/mag/test/t-set_d.c index 2549a712e0..f112ccc270 100644 --- a/src/mag/test/t-set_d.c +++ b/src/mag/test/t-set_d.c @@ -26,7 +26,7 @@ double d_randtest2(flint_rand_t state) { - mp_limb_t m1, m2; + ulong m1, m2; double t; if (FLINT_BITS == 64) diff --git a/src/mag/test/t-set_d_2exp_fmpz.c b/src/mag/test/t-set_d_2exp_fmpz.c index 9855bad71a..ff402e8e1c 100644 --- a/src/mag/test/t-set_d_2exp_fmpz.c +++ b/src/mag/test/t-set_d_2exp_fmpz.c @@ -26,7 +26,7 @@ double d_randtest2(flint_rand_t state) { - mp_limb_t m1, m2; + ulong m1, m2; double t; if (FLINT_BITS == 64) diff --git a/src/mpf-impl.h b/src/mpf-impl.h index bd9726638a..9f0c822be7 100644 --- a/src/mpf-impl.h +++ b/src/mpf-impl.h @@ -16,6 +16,7 @@ #ifndef FLINT_MPF_H #define FLINT_MPF_H +#include #include "flint.h" #ifdef __cplusplus diff --git a/src/mpfr_mat/test/main.c b/src/mpfr_mat/test/main.c index 15ce166c07..d2349a3247 100644 --- a/src/mpfr_mat/test/main.c +++ b/src/mpfr_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-entry.c" diff --git a/src/mpfr_vec/test/main.c b/src/mpfr_vec/test/main.c index 87b416ab82..29d2dd55bb 100644 --- a/src/mpfr_vec/test/main.c +++ b/src/mpfr_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-init_clear.c" diff --git a/src/mpn_extras.h b/src/mpn_extras.h index adef8c0701..0393e85043 100644 --- a/src/mpn_extras.h +++ b/src/mpn_extras.h @@ -21,6 +21,7 @@ #define MPN_EXTRAS_INLINE static inline #endif +#include #include "flint.h" #ifdef __cplusplus diff --git a/src/mpn_extras/get_d.c b/src/mpn_extras/get_d.c index 0632106627..7d86bd0383 100644 --- a/src/mpn_extras/get_d.c +++ b/src/mpn_extras/get_d.c @@ -86,7 +86,7 @@ MA 02110-1301, USA. */ handling routines, and gets the sign wrong. We don't use such a limb to double cast, neither in the IEEE or generic code. */ -#include "flint.h" +#include "mpn_extras.h" /* assumes unsigned int is at least 32 bits */ #if defined (FLINT_BIG_ENDIAN) && FLINT_BIG_ENDIAN == 1 diff --git a/src/mpn_extras/test/main.c b/src/mpn_extras/test/main.c index 999280ff88..ec6c895ced 100644 --- a/src/mpn_extras/test/main.c +++ b/src/mpn_extras/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-2add_n_inplace.c" diff --git a/src/mpn_mod.h b/src/mpn_mod.h index aef162c318..302d635a5e 100644 --- a/src/mpn_mod.h +++ b/src/mpn_mod.h @@ -41,10 +41,10 @@ extern "C" { typedef struct { - mp_size_t nlimbs; - mp_limb_t d[MPN_MOD_MAX_LIMBS]; - mp_limb_t dinv[MPN_MOD_MAX_LIMBS]; - mp_limb_t dnormed[MPN_MOD_MAX_LIMBS]; + slong nlimbs; + ulong d[MPN_MOD_MAX_LIMBS]; + ulong dinv[MPN_MOD_MAX_LIMBS]; + ulong dnormed[MPN_MOD_MAX_LIMBS]; flint_bitcnt_t norm; truth_t is_prime; } @@ -69,7 +69,7 @@ mpn_mod_ctx_set_is_field(gr_ctx_t ctx, truth_t is_field) /* Basic operations and arithmetic */ int gr_ctx_init_mpn_mod(gr_ctx_t ctx, const fmpz_t n); -int _gr_ctx_init_mpn_mod(gr_ctx_t ctx, mp_srcptr n, mp_size_t nlimbs); +int _gr_ctx_init_mpn_mod(gr_ctx_t ctx, nn_srcptr n, slong nlimbs); void gr_ctx_init_mpn_mod_randtest(gr_ctx_t ctx, flint_rand_t state); int mpn_mod_ctx_write(gr_stream_t out, gr_ctx_t ctx); @@ -82,66 +82,66 @@ mpn_mod_ctx_is_field(gr_ctx_t ctx) } MPN_MOD_INLINE void -mpn_mod_init(mp_ptr x, gr_ctx_t ctx) +mpn_mod_init(nn_ptr x, gr_ctx_t ctx) { flint_mpn_zero(x, MPN_MOD_CTX_NLIMBS(ctx)); } MPN_MOD_INLINE void -mpn_mod_clear(mp_ptr FLINT_UNUSED(x), gr_ctx_t FLINT_UNUSED(ctx)) +mpn_mod_clear(nn_ptr FLINT_UNUSED(x), gr_ctx_t FLINT_UNUSED(ctx)) { } MPN_MOD_INLINE void -mpn_mod_swap(mp_ptr x, mp_ptr y, gr_ctx_t ctx) +mpn_mod_swap(nn_ptr x, nn_ptr y, gr_ctx_t ctx) { slong i = 0, n = MPN_MOD_CTX_NLIMBS(ctx); for (i = 0; i < n; i++) - FLINT_SWAP(mp_limb_t, x[i], y[i]); + FLINT_SWAP(ulong, x[i], y[i]); } MPN_MOD_INLINE int -mpn_mod_set(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_set(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) { flint_mpn_copyi(res, x, MPN_MOD_CTX_NLIMBS(ctx)); return GR_SUCCESS; } MPN_MOD_INLINE int -mpn_mod_zero(mp_ptr res, gr_ctx_t ctx) +mpn_mod_zero(nn_ptr res, gr_ctx_t ctx) { flint_mpn_zero(res, MPN_MOD_CTX_NLIMBS(ctx)); return GR_SUCCESS; } MPN_MOD_INLINE -int mpn_mod_one(mp_ptr res, gr_ctx_t ctx) +int mpn_mod_one(nn_ptr res, gr_ctx_t ctx) { res[0] = 1; flint_mpn_zero(res + 1, MPN_MOD_CTX_NLIMBS(ctx) - 1); return GR_SUCCESS; } -int mpn_mod_set_ui(mp_ptr res, ulong x, gr_ctx_t ctx); -int mpn_mod_set_si(mp_ptr res, slong x, gr_ctx_t ctx); -int mpn_mod_neg_one(mp_ptr res, gr_ctx_t ctx); +int mpn_mod_set_ui(nn_ptr res, ulong x, gr_ctx_t ctx); +int mpn_mod_set_si(nn_ptr res, slong x, gr_ctx_t ctx); +int mpn_mod_neg_one(nn_ptr res, gr_ctx_t ctx); -int mpn_mod_set_mpn(mp_ptr res, mp_srcptr x, mp_size_t xn, gr_ctx_t ctx); -int mpn_mod_set_fmpz(mp_ptr res, const fmpz_t x, gr_ctx_t ctx); -int mpn_mod_set_other(mp_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx); -int mpn_mod_randtest(mp_ptr res, flint_rand_t state, gr_ctx_t ctx); -int mpn_mod_write(gr_stream_t out, mp_srcptr x, gr_ctx_t ctx); +int mpn_mod_set_mpn(nn_ptr res, nn_srcptr x, slong xn, gr_ctx_t ctx); +int mpn_mod_set_fmpz(nn_ptr res, const fmpz_t x, gr_ctx_t ctx); +int mpn_mod_set_other(nn_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx); +int mpn_mod_randtest(nn_ptr res, flint_rand_t state, gr_ctx_t ctx); +int mpn_mod_write(gr_stream_t out, nn_srcptr x, gr_ctx_t ctx); -int mpn_mod_get_fmpz(fmpz_t res, mp_srcptr x, gr_ctx_t ctx); +int mpn_mod_get_fmpz(fmpz_t res, nn_srcptr x, gr_ctx_t ctx); MPN_MOD_INLINE truth_t -mpn_mod_is_zero(mp_srcptr x, gr_ctx_t ctx) +mpn_mod_is_zero(nn_srcptr x, gr_ctx_t ctx) { return flint_mpn_zero_p(x, MPN_MOD_CTX_NLIMBS(ctx)) ? T_TRUE : T_FALSE; } MPN_MOD_INLINE truth_t -mpn_mod_is_one(mp_srcptr x, gr_ctx_t ctx) +mpn_mod_is_one(nn_srcptr x, gr_ctx_t ctx) { return (x[0] == 1 && flint_mpn_zero_p(x + 1, MPN_MOD_CTX_NLIMBS(ctx) - 1)) ? T_TRUE : T_FALSE; } @@ -149,59 +149,59 @@ mpn_mod_is_one(mp_srcptr x, gr_ctx_t ctx) truth_t mpn_mod_is_neg_one(gr_srcptr x, gr_ctx_t ctx); MPN_MOD_INLINE truth_t -mpn_mod_equal(mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_equal(nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { return flint_mpn_equal_p(x, y, MPN_MOD_CTX_NLIMBS(ctx)) ? T_TRUE : T_FALSE; } -int mpn_mod_neg(mp_ptr res, mp_srcptr x, gr_ctx_t ctx); -int mpn_mod_add(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); -int mpn_mod_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); -int mpn_mod_add_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx); -int mpn_mod_sub_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx); -int mpn_mod_add_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx); -int mpn_mod_sub_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx); -int mpn_mod_add_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx); -int mpn_mod_sub_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx); - -int mpn_mod_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); - -int mpn_mod_mul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx); -int mpn_mod_mul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx); -int mpn_mod_mul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx); -int mpn_mod_addmul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); -int mpn_mod_addmul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx); -int mpn_mod_addmul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx); -int mpn_mod_addmul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx); -int mpn_mod_submul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); -int mpn_mod_submul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx); -int mpn_mod_submul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx); -int mpn_mod_submul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx); +int mpn_mod_neg(nn_ptr res, nn_srcptr x, gr_ctx_t ctx); +int mpn_mod_add(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); +int mpn_mod_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); +int mpn_mod_add_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx); +int mpn_mod_sub_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx); +int mpn_mod_add_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx); +int mpn_mod_sub_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx); +int mpn_mod_add_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx); +int mpn_mod_sub_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx); + +int mpn_mod_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); + +int mpn_mod_mul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx); +int mpn_mod_mul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx); +int mpn_mod_mul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx); +int mpn_mod_addmul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); +int mpn_mod_addmul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx); +int mpn_mod_addmul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx); +int mpn_mod_addmul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx); +int mpn_mod_submul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); +int mpn_mod_submul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx); +int mpn_mod_submul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx); +int mpn_mod_submul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx); MPN_MOD_INLINE int -mpn_mod_sqr(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_sqr(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) { return mpn_mod_mul(res, x, x, ctx); } -int mpn_mod_inv(mp_ptr res, mp_srcptr x, gr_ctx_t ctx); -int mpn_mod_div(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx); +int mpn_mod_inv(nn_ptr res, nn_srcptr x, gr_ctx_t ctx); +int mpn_mod_div(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx); /* Vector functions */ -int _mpn_mod_vec_zero(mp_ptr res, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_clear(mp_ptr FLINT_UNUSED(res), slong FLINT_UNUSED(len), gr_ctx_t FLINT_UNUSED(ctx)); -int _mpn_mod_vec_set(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx); -void _mpn_mod_vec_swap(mp_ptr vec1, mp_ptr vec2, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_neg(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_add(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_mul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx); -int _mpn_mod_scalar_mul_vec(mp_ptr res, mp_srcptr y, mp_srcptr x, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_addmul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx); -int _mpn_mod_vec_dot(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx); -int _mpn_mod_vec_dot_rev(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_zero(nn_ptr res, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_clear(nn_ptr FLINT_UNUSED(res), slong FLINT_UNUSED(len), gr_ctx_t FLINT_UNUSED(ctx)); +int _mpn_mod_vec_set(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx); +void _mpn_mod_vec_swap(nn_ptr vec1, nn_ptr vec2, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_neg(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_add(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_mul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx); +int _mpn_mod_scalar_mul_vec(nn_ptr res, nn_srcptr y, nn_srcptr x, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_addmul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx); +int _mpn_mod_vec_dot(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx); +int _mpn_mod_vec_dot_rev(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx); /* Matrix algorithms */ @@ -213,26 +213,26 @@ int mpn_mod_mat_nonsingular_solve_tril(gr_mat_t X, const gr_mat_t L, const gr_ma int mpn_mod_mat_nonsingular_solve_triu(gr_mat_t X, const gr_mat_t U, const gr_mat_t B, int unit, gr_ctx_t ctx); int mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const gr_mat_t A_in, int rank_check, gr_ctx_t ctx); int mpn_mod_mat_lu(slong * rank, slong * P, gr_mat_t LU, const gr_mat_t A, int rank_check, gr_ctx_t ctx); -int mpn_mod_mat_det(mp_ptr res, const gr_mat_t A, gr_ctx_t ctx); +int mpn_mod_mat_det(nn_ptr res, const gr_mat_t A, gr_ctx_t ctx); /* Polynomial algorithms */ -int _mpn_mod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_mullow_karatsuba(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx); -int _mpn_mod_poly_mullow_KS(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_mullow_fft_small(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_mullow_karatsuba(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx); +int _mpn_mod_poly_mullow_KS(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_mullow_fft_small(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_inv_series(mp_ptr Q, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_div_series(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_inv_series(nn_ptr Q, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx); +int _mpn_mod_poly_div_series(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx); -int _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, mp_srcptr invL, gr_ctx_t ctx); -int _mpn_mod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); -int _mpn_mod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); -int _mpn_mod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); +int _mpn_mod_poly_divrem_basecase_preinv1(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nn_srcptr invL, gr_ctx_t ctx); +int _mpn_mod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); +int _mpn_mod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); +int _mpn_mod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); -int _mpn_mod_poly_gcd(mp_ptr G, slong * lenG, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); -int _mpn_mod_poly_xgcd(slong * lenG, mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx); +int _mpn_mod_poly_gcd(nn_ptr G, slong * lenG, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); +int _mpn_mod_poly_xgcd(slong * lenG, nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx); #ifdef __cplusplus } diff --git a/src/mpn_mod/ctx.c b/src/mpn_mod/ctx.c index 6eb7e06568..e63ba2a2fd 100644 --- a/src/mpn_mod/ctx.c +++ b/src/mpn_mod/ctx.c @@ -138,14 +138,14 @@ gr_method_tab_input _mpn_mod_methods_input[] = #endif int -_gr_ctx_init_mpn_mod(gr_ctx_t ctx, mp_srcptr n, mp_size_t nlimbs) +_gr_ctx_init_mpn_mod(gr_ctx_t ctx, nn_srcptr n, slong nlimbs) { - mp_bitcnt_t norm; + flint_bitcnt_t norm; if (nlimbs < MPN_MOD_MIN_LIMBS || nlimbs > MPN_MOD_MAX_LIMBS || n[nlimbs - 1] == 0) return GR_UNABLE; ctx->which_ring = GR_CTX_MPN_MOD; - ctx->sizeof_elem = nlimbs * sizeof(mp_limb_t); + ctx->sizeof_elem = nlimbs * sizeof(ulong); GR_CTX_DATA_AS_PTR(ctx) = flint_malloc(sizeof(_mpn_mod_ctx_struct)); diff --git a/src/mpn_mod/mat_det.c b/src/mpn_mod/mat_det.c index b354b1967e..de2a657d1c 100644 --- a/src/mpn_mod/mat_det.c +++ b/src/mpn_mod/mat_det.c @@ -12,7 +12,7 @@ #include "mpn_mod.h" int -mpn_mod_mat_det(mp_ptr res, const gr_mat_t A, gr_ctx_t ctx) +mpn_mod_mat_det(nn_ptr res, const gr_mat_t A, gr_ctx_t ctx) { slong n = A->r; diff --git a/src/mpn_mod/mat_lu_classical_delayed.c b/src/mpn_mod/mat_lu_classical_delayed.c index b76769fc15..eeb132b249 100644 --- a/src/mpn_mod/mat_lu_classical_delayed.c +++ b/src/mpn_mod/mat_lu_classical_delayed.c @@ -21,15 +21,15 @@ int mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const gr_mat_t A_in, int rank_check, gr_ctx_t ctx) { - mp_limb_t d[MPN_MOD_MAX_LIMBS]; - mp_limb_t e[MPN_MOD_MAX_LIMBS]; - mp_limb_t f[MPN_MOD_MAX_LIMBS]; - mp_ptr * a; - mp_ptr tmprow; + ulong d[MPN_MOD_MAX_LIMBS]; + ulong e[MPN_MOD_MAX_LIMBS]; + ulong f[MPN_MOD_MAX_LIMBS]; + nn_ptr * a; + nn_ptr tmprow; slong n = MPN_MOD_CTX_NLIMBS(ctx); slong i, j, nrows, ncols, rank, row, col, pivot_row, tmp_index; int status = GR_SUCCESS; - mp_ptr tmp_ptr, b; + nn_ptr tnn_ptr, b; TMP_INIT; nrows = A->r; @@ -41,7 +41,7 @@ mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const return GR_SUCCESS; } - a = (mp_ptr *) A->rows; + a = (nn_ptr *) A->rows; if (A != A_in) { @@ -55,7 +55,7 @@ mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const P[i] = i; TMP_START; - b = TMP_ALLOC((2 * n + 1) * sizeof(mp_limb_t) * (nrows + 1) * ncols); + b = TMP_ALLOC((2 * n + 1) * sizeof(ulong) * (nrows + 1) * ncols); tmprow = b + (2 * n + 1) * (nrows * ncols); #define UNREDUCED(ii, jj) (b + (2 * n + 1) * ((ii) * ncols + (jj))) @@ -107,9 +107,9 @@ mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const /* swap rows */ if (pivot_row != row) { - tmp_ptr = a[pivot_row]; + tnn_ptr = a[pivot_row]; a[pivot_row] = a[row]; - a[row] = tmp_ptr; + a[row] = tnn_ptr; tmp_index = P[pivot_row]; P[pivot_row] = P[row]; @@ -148,7 +148,7 @@ mpn_mod_mat_lu_classical_delayed(slong * res_rank, slong * P, gr_mat_t A, const { for (j = col + 1; j < ncols; j++) { - mp_limb_t t[4]; + ulong t[4]; FLINT_MPN_MUL_2X2(t[3], t[2], t[1], t[0], REDUCED(row, j)[1], REDUCED(row, j)[0], f[1], f[0]); add_sssssaaaaaaaaaa(UNREDUCED(i, j)[4], UNREDUCED(i, j)[3], UNREDUCED(i, j)[2], UNREDUCED(i, j)[1], UNREDUCED(i, j)[0], UNREDUCED(i, j)[4], UNREDUCED(i, j)[3], UNREDUCED(i, j)[2], UNREDUCED(i, j)[1], UNREDUCED(i, j)[0], diff --git a/src/mpn_mod/mat_mul_multi_mod.c b/src/mpn_mod/mat_mul_multi_mod.c index 0a00062d3b..f64c6a4ec3 100644 --- a/src/mpn_mod/mat_mul_multi_mod.c +++ b/src/mpn_mod/mat_mul_multi_mod.c @@ -29,40 +29,40 @@ typedef struct { slong Bstoprow; slong Cstartrow; slong Cstoprow; - mp_ptr * Arows; - mp_ptr * Brows; - mp_ptr * Crows; + nn_ptr * Arows; + nn_ptr * Brows; + nn_ptr * Crows; nmod_mat_t * mod_A; nmod_mat_t * mod_B; nmod_mat_t * mod_C; slong num_primes; - mp_ptr primes; + nn_ptr primes; gr_ctx_struct * ctx; } _worker_arg; -FLINT_FORCE_INLINE mp_limb_t -nmod_set_mpn_2(mp_srcptr ad, nmod_t mod) +FLINT_FORCE_INLINE ulong +nmod_set_mpn_2(nn_srcptr ad, nmod_t mod) { - mp_limb_t r = 0; + ulong r = 0; NMOD_RED2(r, r, ad[1], mod); NMOD_RED2(r, r, ad[0], mod); return r; } -FLINT_FORCE_INLINE mp_limb_t -nmod_set_mpn_3(mp_srcptr ad, nmod_t mod) +FLINT_FORCE_INLINE ulong +nmod_set_mpn_3(nn_srcptr ad, nmod_t mod) { - mp_limb_t r = 0; + ulong r = 0; NMOD_RED2(r, r, ad[2], mod); NMOD_RED2(r, r, ad[1], mod); NMOD_RED2(r, r, ad[0], mod); return r; } -FLINT_FORCE_INLINE mp_limb_t -nmod_set_mpn_4(mp_srcptr ad, nmod_t mod) +FLINT_FORCE_INLINE ulong +nmod_set_mpn_4(nn_srcptr ad, nmod_t mod) { - mp_limb_t r = 0; + ulong r = 0; NMOD_RED2(r, r, ad[3], mod); NMOD_RED2(r, r, ad[2], mod); NMOD_RED2(r, r, ad[1], mod); @@ -71,8 +71,8 @@ nmod_set_mpn_4(mp_srcptr ad, nmod_t mod) } /* todo: precomputed inverse */ -FLINT_FORCE_INLINE mp_limb_t -nmod_set_mpn(mp_srcptr ad, mp_size_t an, nmod_t mod) +FLINT_FORCE_INLINE ulong +nmod_set_mpn(nn_srcptr ad, slong an, nmod_t mod) { return mpn_mod_1(ad, an, mod.n); } @@ -87,15 +87,15 @@ static void _mod_worker(void * varg) slong Astoprow = arg->Astoprow; slong Bstartrow = arg->Bstartrow; slong Bstoprow = arg->Bstoprow; - mp_ptr * Arows = arg->Arows; - mp_ptr * Brows = arg->Brows; + nn_ptr * Arows = arg->Arows; + nn_ptr * Brows = arg->Brows; nmod_mat_t * mod_A = arg->mod_A; nmod_mat_t * mod_B = arg->mod_B; slong num_primes = arg->num_primes; slong nlimbs = MPN_MOD_CTX_NLIMBS(arg->ctx); - mp_limb_t first_prime = UWORD(1) << (FLINT_BITS - 1); + ulong first_prime = UWORD(1) << (FLINT_BITS - 1); if (nlimbs == 2 && arg->primes[0] == first_prime) { @@ -144,9 +144,9 @@ static void _crt_worker(void * varg) slong n = arg->n; slong Cstartrow = arg->Cstartrow; slong Cstoprow = arg->Cstoprow; - mp_ptr * Crows = arg->Crows; + nn_ptr * Crows = arg->Crows; nmod_mat_t * mod_C = arg->mod_C; - mp_limb_t * primes = arg->primes; + ulong * primes = arg->primes; slong num_primes = arg->num_primes; gr_ctx_struct * ctx = arg->ctx; slong nlimbs = MPN_MOD_CTX_NLIMBS(ctx); @@ -156,11 +156,11 @@ static void _crt_worker(void * varg) for small entries */ { - mp_ptr M, Ns, T, U; - mp_size_t Msize, Nsize; - mp_limb_t cy, ri; + nn_ptr M, Ns, T, U; + slong Msize, Nsize; + ulong cy, ri; - M = FLINT_ARRAY_ALLOC(num_primes + 1, mp_limb_t); + M = FLINT_ARRAY_ALLOC(num_primes + 1, ulong); M[0] = primes[0]; Msize = 1; @@ -176,9 +176,9 @@ static void _crt_worker(void * varg) do not require an extra limb. */ Nsize = Msize + 2; - Ns = FLINT_ARRAY_ALLOC(Nsize*num_primes, mp_limb_t); - T = FLINT_ARRAY_ALLOC(Nsize, mp_limb_t); - U = FLINT_ARRAY_ALLOC(Nsize, mp_limb_t); + Ns = FLINT_ARRAY_ALLOC(Nsize*num_primes, ulong); + T = FLINT_ARRAY_ALLOC(Nsize, ulong); + U = FLINT_ARRAY_ALLOC(Nsize, ulong); for (i = 0; i < num_primes; i++) { @@ -252,9 +252,9 @@ int mpn_mod_mat_mul_multi_mod(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr mainarg.ctx = ctx; - mainarg.Arows = (mp_ptr *) A->rows; - mainarg.Brows = (mp_ptr *) B->rows; - mainarg.Crows = (mp_ptr *) C->rows; + mainarg.Arows = (nn_ptr *) A->rows; + mainarg.Brows = (nn_ptr *) B->rows; + mainarg.Crows = (nn_ptr *) C->rows; /* TUNING */ primes_bits = NMOD_MAT_OPTIMAL_MODULUS_BITS; @@ -272,7 +272,7 @@ int mpn_mod_mat_mul_multi_mod(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr } /* Initialize */ - mainarg.primes = FLINT_ARRAY_ALLOC(mainarg.num_primes, mp_limb_t); + mainarg.primes = FLINT_ARRAY_ALLOC(mainarg.num_primes, ulong); mainarg.primes[0] = first_prime; if (mainarg.num_primes > 1) { diff --git a/src/mpn_mod/mat_mul_waksman.c b/src/mpn_mod/mat_mul_waksman.c index 9fd9fcae0a..abfd6c2860 100644 --- a/src/mpn_mod/mat_mul_waksman.c +++ b/src/mpn_mod/mat_mul_waksman.c @@ -16,7 +16,7 @@ /* compute c += (a1 + b1) * (a2 + b2) */ /* val0, val1, val2 are scratch space */ FLINT_FORCE_INLINE void -addmul_addadd(mp_ptr val0, mp_ptr val1, mp_ptr val2, mp_ptr c, mp_srcptr a1, mp_srcptr b1, mp_srcptr a2, mp_srcptr b2, mp_size_t nlimbs, int add_can_overflow_nlimbs) +addmul_addadd(nn_ptr val0, nn_ptr val1, nn_ptr val2, nn_ptr c, nn_srcptr a1, nn_srcptr b1, nn_srcptr a2, nn_srcptr b2, slong nlimbs, int add_can_overflow_nlimbs) { if (!add_can_overflow_nlimbs) { @@ -39,7 +39,7 @@ addmul_addadd(mp_ptr val0, mp_ptr val1, mp_ptr val2, mp_ptr c, mp_srcptr a1, mp_ /* compute c += (a1 - b1) * (a2 - b2) */ /* val0, val1, val2 are scratch space */ FLINT_FORCE_INLINE void -addmul_subsub(mp_ptr val0, mp_ptr val1, mp_ptr val2, mp_ptr c, mp_srcptr a1, mp_srcptr b1, mp_srcptr a2, mp_srcptr b2, mp_size_t nlimbs) +addmul_subsub(nn_ptr val0, nn_ptr val1, nn_ptr val2, nn_ptr c, nn_srcptr a1, nn_srcptr b1, nn_srcptr a2, nn_srcptr b2, slong nlimbs) { int neg; neg = flint_mpn_signed_sub_n(val1, a1, b1, nlimbs); @@ -69,17 +69,17 @@ int mpn_mod_mat_mul_waksman(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_c slong i, l, j, k; - mp_ptr Ctmp = flint_calloc(slimbs * ((m * p) + (p + m) + 5), sizeof(mp_limb_t)); + nn_ptr Ctmp = flint_calloc(slimbs * ((m * p) + (p + m) + 5), sizeof(ulong)); /* Ctmp itself has m * p entries */ - mp_ptr Crow = Ctmp + slimbs * (m * p); /* Crow has p entries */ - mp_ptr Ccol = Crow + slimbs * p; /* Ccol has m entries */ - mp_ptr val0 = Ccol + slimbs * m; /* val0 has room for 2 sums */ - mp_ptr val1 = val0 + 2 * slimbs; /* val1 has room for 1 sum */ - mp_ptr val2 = val1 + slimbs; /* val2 has room for 1 sum */ - mp_ptr crow = val2 + slimbs; /* crow has room for 1 sum */ + nn_ptr Crow = Ctmp + slimbs * (m * p); /* Crow has p entries */ + nn_ptr Ccol = Crow + slimbs * p; /* Ccol has m entries */ + nn_ptr val0 = Ccol + slimbs * m; /* val0 has room for 2 sums */ + nn_ptr val1 = val0 + 2 * slimbs; /* val1 has room for 1 sum */ + nn_ptr val2 = val1 + slimbs; /* val2 has room for 1 sum */ + nn_ptr crow = val2 + slimbs; /* crow has room for 1 sum */ -#define A_ENTRY(ii, jj) (((mp_srcptr) A->rows[ii]) + (jj) * nlimbs) -#define B_ENTRY(ii, jj) (((mp_srcptr) B->rows[ii]) + (jj) * nlimbs) +#define A_ENTRY(ii, jj) (((nn_srcptr) A->rows[ii]) + (jj) * nlimbs) +#define B_ENTRY(ii, jj) (((nn_srcptr) B->rows[ii]) + (jj) * nlimbs) #define C_ENTRY(ii, jj) (Ctmp + ((ii) * p + (jj)) * slimbs) #define Crow_ENTRY(ii) (Crow + (ii) * slimbs) @@ -154,8 +154,8 @@ int mpn_mod_mat_mul_waksman(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_c { for (k = 0; k < p; k++) { - mp_size_t d; - mp_ptr Cptr = ((mp_ptr) C->rows[i]) + k * nlimbs; + slong d; + nn_ptr Cptr = ((nn_ptr) C->rows[i]) + k * nlimbs; /* As currently implemented, there is no wraparound arithmetic. Were that the case, we would need something like diff --git a/src/mpn_mod/poly.c b/src/mpn_mod/poly.c index 3b47092e09..ab832e1ffa 100644 --- a/src/mpn_mod/poly.c +++ b/src/mpn_mod/poly.c @@ -19,7 +19,7 @@ static const short div_series_cutoff_tab[] = {231, 306, 321, 370, 166, 182, 220, static const short divrem_cutoff_tab[] = {166, 139, 139, 139, 69, 75, 89, 139, 127, 111, 111, 127, 116, 111, 106, 101, 97, 93, 106, 106, 85, 81, 78, 101, 54, 75, 85, 81, 63, 60, 58, 75, 52, 58, 58, 66, 52, 58, 52, 56, 54, 54, 46, 54, 52, 50, 46, 50, 39, 46, 48, 46, 46, 38, 44, 44, 40, 40, 42, 44, }; int -_mpn_mod_poly_inv_series(mp_ptr Q, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx) +_mpn_mod_poly_inv_series(nn_ptr Q, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx) { slong tab_i, cutoff, bits; @@ -39,7 +39,7 @@ _mpn_mod_poly_inv_series(mp_ptr Q, mp_srcptr B, slong lenB, slong len, gr_ctx_t } int -_mpn_mod_poly_div_series(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, slong len, gr_ctx_t ctx) +_mpn_mod_poly_div_series(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, slong len, gr_ctx_t ctx) { slong tab_i, cutoff, bits; @@ -60,7 +60,7 @@ _mpn_mod_poly_div_series(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong l } int -_mpn_mod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +_mpn_mod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { slong tab_i, cutoff, bits; @@ -77,7 +77,7 @@ _mpn_mod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr /* note: we don't define _mpn_mod_poly_divexact because the default algorithm is currently fine */ /* todo: check unbalanced tuning */ -int _mpn_mod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +int _mpn_mod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { slong tab_i, cutoff, bits; @@ -91,7 +91,7 @@ int _mpn_mod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr return _gr_poly_divrem_newton(Q, R, A, lenA, B, lenB, ctx); } -int _mpn_mod_poly_gcd(mp_ptr G, slong * lenG, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +int _mpn_mod_poly_gcd(nn_ptr G, slong * lenG, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { slong cutoff = 240; @@ -101,7 +101,7 @@ int _mpn_mod_poly_gcd(mp_ptr G, slong * lenG, mp_srcptr A, slong lenA, mp_srcptr return _gr_poly_gcd_hgcd(G, lenG, A, lenA, B, lenB, cutoff / 3, cutoff, ctx); } -int _mpn_mod_poly_xgcd(slong * lenG, mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, gr_ctx_t ctx) +int _mpn_mod_poly_xgcd(slong * lenG, nn_ptr G, nn_ptr S, nn_ptr T, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, gr_ctx_t ctx) { slong cutoff = 240; diff --git a/src/mpn_mod/poly_divrem_basecase.c b/src/mpn_mod/poly_divrem_basecase.c index ce4fcf1346..ae4ae12643 100644 --- a/src/mpn_mod/poly_divrem_basecase.c +++ b/src/mpn_mod/poly_divrem_basecase.c @@ -22,7 +22,7 @@ #endif static void -mpn_mod_set_mpn2(mp_ptr res, mp_srcptr s, mp_size_t l, gr_ctx_t ctx) +mpn_mod_set_mpn2(nn_ptr res, nn_srcptr s, slong l, gr_ctx_t ctx) { MPN_NORM(s, l); mpn_mod_set_mpn(res, s, l, ctx); @@ -31,7 +31,7 @@ mpn_mod_set_mpn2(mp_ptr res, mp_srcptr s, mp_size_t l, gr_ctx_t ctx) #define FLINT_MPN_MUL_3_2X2(R2, R1, R0, a1, a0, b1, b0) \ do \ { \ - mp_limb_t __tmp2, __tmp1; \ + ulong __tmp2, __tmp1; \ umul_ppmm(R1, R0, a0, b0); \ (R2) = (a1) * (b1); \ umul_ppmm(__tmp2, __tmp1, a0, b1); \ @@ -41,13 +41,13 @@ mpn_mod_set_mpn2(mp_ptr res, mp_srcptr s, mp_size_t l, gr_ctx_t ctx) } \ while (0) \ -static int _mpn_mod_poly_divrem_q0_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, mp_srcptr B, slong lenA, mp_srcptr invL, gr_ctx_t ctx) +static int _mpn_mod_poly_divrem_q0_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, nn_srcptr B, slong lenA, nn_srcptr invL, gr_ctx_t ctx) { /* special case for lenA == 1 omitted since this is dealt with in the calling function */ - mp_size_t nlimbs = MPN_MOD_CTX_NLIMBS(ctx); + slong nlimbs = MPN_MOD_CTX_NLIMBS(ctx); int monic = mpn_mod_is_one(invL, ctx) == T_TRUE; @@ -58,7 +58,7 @@ static int _mpn_mod_poly_divrem_q0_preinv1(mp_ptr Q, mp_ptr R, if (R == A) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_neg(t, Q, ctx); _mpn_mod_vec_addmul_scalar(R, B, lenA - 1, t, ctx); @@ -72,16 +72,16 @@ static int _mpn_mod_poly_divrem_q0_preinv1(mp_ptr Q, mp_ptr R, return GR_SUCCESS; } -static int _mpn_mod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_srcptr invL, gr_ctx_t ctx) +static int _mpn_mod_poly_divrem_q1_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + nn_srcptr invL, gr_ctx_t ctx) { - mp_limb_t q0[MPN_MOD_MAX_LIMBS]; - mp_limb_t q1[MPN_MOD_MAX_LIMBS]; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 1]; - mp_limb_t u[2 * MPN_MOD_MAX_LIMBS]; + ulong q0[MPN_MOD_MAX_LIMBS]; + ulong q1[MPN_MOD_MAX_LIMBS]; + ulong t[2 * MPN_MOD_MAX_LIMBS + 1]; + ulong u[2 * MPN_MOD_MAX_LIMBS]; slong i; - mp_size_t nlimbs = MPN_MOD_CTX_NLIMBS(ctx); + slong nlimbs = MPN_MOD_CTX_NLIMBS(ctx); int monic = mpn_mod_is_one(invL, ctx) == T_TRUE; /* special case for lenB == 1 omitted since this is dealt with @@ -117,9 +117,9 @@ static int _mpn_mod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, { for (i = 1; i < lenB - 1; i++) { - mp_srcptr B1ptr = B + (i - 1) * nlimbs; - mp_srcptr Bptr = B + i * nlimbs; - mp_srcptr Aptr = A + i * nlimbs; + nn_srcptr B1ptr = B + (i - 1) * nlimbs; + nn_srcptr Bptr = B + i * nlimbs; + nn_srcptr Aptr = A + i * nlimbs; FLINT_MPN_MUL_3_2X2(t[2], t[1], t[0], q1[1], q1[0], B1ptr[1], B1ptr[0]); add_sssaaaaaa(t[2], t[1], t[0], t[2], t[1], t[0], 0, Aptr[1], Aptr[0]); @@ -132,9 +132,9 @@ static int _mpn_mod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, { for (i = 1; i < lenB - 1; i++) { - mp_srcptr B1ptr = B + (i - 1) * nlimbs; - mp_srcptr Bptr = B + i * nlimbs; - mp_srcptr Aptr = A + i * nlimbs; + nn_srcptr B1ptr = B + (i - 1) * nlimbs; + nn_srcptr Bptr = B + i * nlimbs; + nn_srcptr Aptr = A + i * nlimbs; FLINT_MPN_MUL_2X2(t[3], t[2], t[1], t[0], q1[1], q1[0], B1ptr[1], B1ptr[0]); add_ssssaaaaaaaa(t[3], t[2], t[1], t[0], t[3], t[2], t[1], t[0], 0, 0, Aptr[1], Aptr[0]); @@ -152,7 +152,7 @@ static int _mpn_mod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, flint_mpn_mul_n(t, q1, B + (i - 1) * nlimbs, nlimbs); flint_mpn_mul_n(u, q0, B + i * nlimbs, nlimbs); t[2 * nlimbs] = mpn_add_n(t, t, u, 2 * nlimbs); - mp_limb_t cy = mpn_add_n(t, t, A + i * nlimbs, nlimbs); + ulong cy = mpn_add_n(t, t, A + i * nlimbs, nlimbs); mpn_add_1(t + nlimbs, t + nlimbs, nlimbs + 1, cy); mpn_mod_set_mpn2(R + i * nlimbs, t, 2 * nlimbs + 1, ctx); } @@ -162,18 +162,18 @@ static int _mpn_mod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, } int -_mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_srcptr invL, +_mpn_mod_poly_divrem_basecase_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + nn_srcptr invL, gr_ctx_t ctx) { slong iR, i, j; slong bits; - mp_limb_t r[MPN_MOD_MAX_LIMBS]; - mp_limb_t c[MPN_MOD_MAX_LIMBS]; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS]; - mp_size_t slimbs, nlimbs; - mp_ptr W; + ulong r[MPN_MOD_MAX_LIMBS]; + ulong c[MPN_MOD_MAX_LIMBS]; + ulong t[2 * MPN_MOD_MAX_LIMBS]; + slong slimbs, nlimbs; + nn_ptr W; int monic; TMP_INIT; @@ -205,10 +205,10 @@ _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, FLINT_ASSERT((slimbs == 2 * nlimbs) || (slimbs == 2 * nlimbs + 1) || (slimbs == 2 * nlimbs - 1)); TMP_START; - W = TMP_ALLOC(lenA * slimbs * sizeof(mp_limb_t)); + W = TMP_ALLOC(lenA * slimbs * sizeof(ulong)); - mp_ptr R3 = W; - mp_ptr ptrQ = Q - nlimbs * (lenB - 1); + nn_ptr R3 = W; + nn_ptr ptrQ = Q - nlimbs * (lenB - 1); for (i = 0; i < lenA; i++) { @@ -247,14 +247,14 @@ _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, #if defined(add_sssssaaaaaaaaaa) if (nlimbs == 2) { - mp_limb_t t[4]; + ulong t[4]; if (slimbs == 5) { for (j = 0; j < lenB - 1; j++) { - mp_ptr a = R3 + slimbs * (iR - lenB + 1 + j); - mp_srcptr b = B + j * nlimbs; + nn_ptr a = R3 + slimbs * (iR - lenB + 1 + j); + nn_srcptr b = B + j * nlimbs; FLINT_MPN_MUL_2X2(t[3], t[2], t[1], t[0], b[1], b[0], c[1], c[0]); add_sssssaaaaaaaaaa(a[4], a[3], a[2], a[1], a[0], a[4], a[3], a[2], a[1], a[0], 0, t[3], t[2], t[1], t[0]); } @@ -263,8 +263,8 @@ _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, { for (j = 0; j < lenB - 1; j++) { - mp_ptr a = R3 + slimbs * (iR - lenB + 1 + j); - mp_srcptr b = B + j * nlimbs; + nn_ptr a = R3 + slimbs * (iR - lenB + 1 + j); + nn_srcptr b = B + j * nlimbs; FLINT_MPN_MUL_2X2(t[3], t[2], t[1], t[0], b[1], b[0], c[1], c[0]); add_ssssaaaaaaaa(a[3], a[2], a[1], a[0], a[3], a[2], a[1], a[0], t[3], t[2], t[1], t[0]); } @@ -273,8 +273,8 @@ _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, { for (j = 0; j < lenB - 1; j++) { - mp_ptr a = R3 + slimbs * (iR - lenB + 1 + j); - mp_srcptr b = B + j * nlimbs; + nn_ptr a = R3 + slimbs * (iR - lenB + 1 + j); + nn_srcptr b = B + j * nlimbs; FLINT_MPN_MUL_3_2X2(t[2], t[1], t[0], b[1], b[0], c[1], c[0]); add_sssaaaaaa(a[2], a[1], a[0], a[2], a[1], a[0], t[2], t[1], t[0]); } @@ -315,10 +315,10 @@ _mpn_mod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, } int -_mpn_mod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, gr_ctx_t ctx) +_mpn_mod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, gr_ctx_t ctx) { - mp_limb_t invB[MPN_MOD_MAX_LIMBS]; + ulong invB[MPN_MOD_MAX_LIMBS]; int status; status = mpn_mod_inv(invB, B + (lenB - 1) * MPN_MOD_CTX_NLIMBS(ctx), ctx); diff --git a/src/mpn_mod/poly_mullow.c b/src/mpn_mod/poly_mullow.c index 7130139696..bfc9765063 100644 --- a/src/mpn_mod/poly_mullow.c +++ b/src/mpn_mod/poly_mullow.c @@ -335,7 +335,7 @@ static const uint8_t sqrlow_cutoffs[][2] = { }; int -_mpn_mod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { slong n; slong bits, cutoff_karatsuba, cutoff_fft_KS, tab_i; diff --git a/src/mpn_mod/poly_mullow_KS.c b/src/mpn_mod/poly_mullow_KS.c index 09c9e74989..06516e53ee 100644 --- a/src/mpn_mod/poly_mullow_KS.c +++ b/src/mpn_mod/poly_mullow_KS.c @@ -15,7 +15,7 @@ /* assumes that we can write one (zeroed) limb too much */ /* assumes bits >= FLINT_BITS */ static void -_mpn_mod_poly_bit_pack(mp_ptr res, mp_srcptr x, slong len, mp_bitcnt_t bits, mp_size_t nlimbs) +_mpn_mod_poly_bit_pack(nn_ptr res, nn_srcptr x, slong len, flint_bitcnt_t bits, slong nlimbs) { slong i, l, shift; @@ -32,12 +32,12 @@ _mpn_mod_poly_bit_pack(mp_ptr res, mp_srcptr x, slong len, mp_bitcnt_t bits, mp_ } static void -_mpn_mod_poly_bit_unpack(mp_ptr res, mp_srcptr x, slong len, mp_bitcnt_t bits, mp_size_t nlimbs, gr_ctx_t ctx) +_mpn_mod_poly_bit_unpack(nn_ptr res, nn_srcptr x, slong len, flint_bitcnt_t bits, slong nlimbs, gr_ctx_t ctx) { slong i, i1, i2, l1, shift, l2; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 3]; - mp_limb_t mask; - mp_size_t blimbs, tn; + ulong t[2 * MPN_MOD_MAX_LIMBS + 3]; + ulong mask; + slong blimbs, tn; blimbs = (bits + FLINT_BITS - 1) / FLINT_BITS; @@ -72,10 +72,10 @@ _mpn_mod_poly_bit_unpack(mp_ptr res, mp_srcptr x, slong len, mp_bitcnt_t bits, m } int -_mpn_mod_poly_mullow_KS(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_KS(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { slong bits, nbits, nlimbs, limbs1, limbs2; - mp_ptr arr1, arr2, arr; + nn_ptr arr1, arr2, arr; int squaring; len1 = FLINT_MIN(len1, len); @@ -94,14 +94,14 @@ _mpn_mod_poly_mullow_KS(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2 FLINT_ASSERT(limbs1 >= (bits * (len1 - 1) / FLINT_BITS + nlimbs + 1)); FLINT_ASSERT(limbs2 >= (bits * (len2 - 1) / FLINT_BITS + nlimbs + 1)); - arr1 = flint_calloc(squaring ? limbs1 : limbs1 + limbs2, sizeof(mp_limb_t)); + arr1 = flint_calloc(squaring ? limbs1 : limbs1 + limbs2, sizeof(ulong)); arr2 = squaring ? arr1 : arr1 + limbs1; _mpn_mod_poly_bit_pack(arr1, poly1, len1, bits, nlimbs); if (!squaring) _mpn_mod_poly_bit_pack(arr2, poly2, len2, bits, nlimbs); - arr = flint_malloc((limbs1 + limbs2) * sizeof(mp_limb_t)); + arr = flint_malloc((limbs1 + limbs2) * sizeof(ulong)); if (squaring) flint_mpn_sqr(arr, arr1, limbs1); diff --git a/src/mpn_mod/poly_mullow_classical.c b/src/mpn_mod/poly_mullow_classical.c index 76d8ed7f69..87ae7c19c7 100644 --- a/src/mpn_mod/poly_mullow_classical.c +++ b/src/mpn_mod/poly_mullow_classical.c @@ -12,28 +12,28 @@ #include "mpn_mod.h" /* currently defined in poly_mullow_karatsuba.c */ -void _mpn_dot_rev_2x2_3(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len); -void _mpn_dot_rev_2x2_4(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len); -void _mpn_dot_rev_2x2_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len); -void _mpn_dot_rev_3x3_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len); -void _mpn_dot_rev_nxn_2n(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs); -void _mpn_dot_rev_nxn_2nm1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs); -void _mpn_dot_rev_nxn_2np1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs); +void _mpn_dot_rev_2x2_3(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len); +void _mpn_dot_rev_2x2_4(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len); +void _mpn_dot_rev_2x2_5(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len); +void _mpn_dot_rev_3x3_5(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len); +void _mpn_dot_rev_nxn_2n(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs); +void _mpn_dot_rev_nxn_2nm1(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs); +void _mpn_dot_rev_nxn_2np1(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs); static void -mpn_mod_set_mpn2(mp_ptr res, mp_srcptr s, mp_size_t l, gr_ctx_t ctx) +mpn_mod_set_mpn2(nn_ptr res, nn_srcptr s, slong l, gr_ctx_t ctx) { MPN_NORM(s, l); mpn_mod_set_mpn(res, s, l, ctx); } int -_mpn_mod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { slong i, top1, top2; - mp_size_t nlimbs, slimbs; - mp_bitcnt_t sbits; - mp_limb_t s[2 * MPN_MOD_MAX_LIMBS + 1]; + slong nlimbs, slimbs; + flint_bitcnt_t sbits; + ulong s[2 * MPN_MOD_MAX_LIMBS + 1]; int squaring; if (len == 1) @@ -70,7 +70,7 @@ _mpn_mod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcpt if (squaring) { - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS]; + ulong t[2 * MPN_MOD_MAX_LIMBS]; slong start, stop; if (slimbs == 2 * nlimbs - 1) diff --git a/src/mpn_mod/poly_mullow_fft_small.c b/src/mpn_mod/poly_mullow_fft_small.c index 3405769b9c..050408fa36 100644 --- a/src/mpn_mod/poly_mullow_fft_small.c +++ b/src/mpn_mod/poly_mullow_fft_small.c @@ -23,19 +23,19 @@ #include "crt_helpers.h" #include "fft_small.h" -static mp_limb_t -nmod_set_mpn_2(mp_srcptr ad, nmod_t mod) +static ulong +nmod_set_mpn_2(nn_srcptr ad, nmod_t mod) { - mp_limb_t r = 0; + ulong r = 0; NMOD_RED2(r, r, ad[1], mod); NMOD_RED2(r, r, ad[0], mod); return r; } -static mp_limb_t -nmod_set_mpn_3(mp_srcptr ad, nmod_t mod) +static ulong +nmod_set_mpn_3(nn_srcptr ad, nmod_t mod) { - mp_limb_t r = 0; + ulong r = 0; NMOD_RED2(r, r, ad[2], mod); NMOD_RED2(r, r, ad[1], mod); NMOD_RED2(r, r, ad[0], mod); @@ -43,16 +43,16 @@ nmod_set_mpn_3(mp_srcptr ad, nmod_t mod) } /* todo: precomputed inverse */ -static mp_limb_t -nmod_set_mpn(mp_srcptr ad, mp_size_t an, nmod_t mod) +static ulong +nmod_set_mpn(nn_srcptr ad, slong an, nmod_t mod) { return mpn_mod_1(ad, an, mod.n); } static void _mod( double* abuf, ulong atrunc, - mp_srcptr a, ulong an, - mp_size_t nlimbs, + nn_srcptr a, ulong an, + slong nlimbs, const sd_fft_ctx_struct* fft) { double* aI; @@ -123,9 +123,9 @@ static void _mod( #define DEFINE_IT(NP, N, M) \ static void CAT(_crt, NP)( \ - mp_ptr z, ulong zl, ulong zi_start, ulong zi_stop, \ + nn_ptr z, ulong zl, ulong zi_start, ulong zi_stop, \ sd_fft_ctx_struct* Rffts, double* d, ulong dstride, \ - crt_data_struct* Rcrts, mp_size_t nlimbs, gr_ctx_t ctx) \ + crt_data_struct* Rcrts, slong nlimbs, gr_ctx_t ctx) \ { \ ulong np = NP; \ ulong n = N; \ @@ -188,9 +188,9 @@ DEFINE_IT(8, 7, 6) /* 400 bits */ /* 50 bits (unused) */ static void _crt_1( - mp_ptr FLINT_UNUSED(z), ulong FLINT_UNUSED(zl), ulong FLINT_UNUSED(zi_start), ulong FLINT_UNUSED(zi_stop), + nn_ptr FLINT_UNUSED(z), ulong FLINT_UNUSED(zl), ulong FLINT_UNUSED(zi_start), ulong FLINT_UNUSED(zi_stop), sd_fft_ctx_struct* FLINT_UNUSED(Rffts), double* FLINT_UNUSED(d), ulong FLINT_UNUSED(dstride), - crt_data_struct* FLINT_UNUSED(Rcrts), mp_size_t FLINT_UNUSED(nlimbs), gr_ctx_t FLINT_UNUSED(ctx)) + crt_data_struct* FLINT_UNUSED(Rcrts), slong FLINT_UNUSED(nlimbs), gr_ctx_t FLINT_UNUSED(ctx)) { flint_abort(); } @@ -207,11 +207,11 @@ typedef struct { ulong atrunc; ulong btrunc; ulong ztrunc; - mp_srcptr a; + nn_srcptr a; ulong an; - mp_srcptr b; + nn_srcptr b; ulong bn; - mp_size_t nlimbs; + slong nlimbs; gr_ctx_struct * mpn_mod_ctx; sd_fft_ctx_struct* ffts; crt_data_struct* crts; @@ -292,7 +292,7 @@ static void s1worker_func(void* varg) } typedef struct { - mp_ptr z; + nn_ptr z; ulong zl; ulong start_zi; ulong stop_zi; @@ -302,12 +302,12 @@ typedef struct { sd_fft_ctx_struct* ffts; crt_data_struct* crts; nmod_t mod; - mp_size_t nlimbs; + slong nlimbs; gr_ctx_struct * mpn_mod_ctx; void (*f)( - mp_ptr z, ulong zl, ulong zi_start, ulong zi_stop, + nn_ptr z, ulong zl, ulong zi_start, ulong zi_stop, sd_fft_ctx_struct* Rffts, double* d, ulong dstride, - crt_data_struct* Rcrts, mp_size_t nlimbs, gr_ctx_t ctx); + crt_data_struct* Rcrts, slong nlimbs, gr_ctx_t ctx); } s2worker_struct; static void s2worker_func(void* varg) @@ -318,9 +318,9 @@ static void s2worker_func(void* varg) X->stride, X->crts + X->offset, X->nlimbs, X->mpn_mod_ctx); } -int _mpn_mod_poly_mulmid_fft_small_internal(mp_ptr z, ulong zl, ulong zh, - mp_srcptr a, ulong an, - mp_srcptr b, ulong bn, +int _mpn_mod_poly_mulmid_fft_small_internal(nn_ptr z, ulong zl, ulong zh, + nn_srcptr a, ulong an, + nn_srcptr b, ulong bn, mpn_ctx_t R, gr_ctx_t ctx) { ulong modbits; @@ -332,8 +332,8 @@ int _mpn_mod_poly_mulmid_fft_small_internal(mp_ptr z, ulong zl, ulong zh, int squaring; slong bits1, bits2; int sign = 0; - mp_size_t nlimbs = MPN_MOD_CTX_NLIMBS(ctx); - mp_bitcnt_t nbits = MPN_MOD_CTX_MODULUS_BITS(ctx); + slong nlimbs = MPN_MOD_CTX_NLIMBS(ctx); + flint_bitcnt_t nbits = MPN_MOD_CTX_MODULUS_BITS(ctx); FLINT_ASSERT(an > 0); FLINT_ASSERT(bn > 0); @@ -500,7 +500,7 @@ int _mpn_mod_poly_mulmid_fft_small_internal(mp_ptr z, ulong zl, ulong zh, } int -_mpn_mod_poly_mullow_fft_small(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_fft_small(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { if (len1 >= len2) return _mpn_mod_poly_mulmid_fft_small_internal(res, 0, len, poly1, len1, poly2, len2, get_default_mpn_ctx(), ctx); @@ -511,7 +511,7 @@ _mpn_mod_poly_mullow_fft_small(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcpt #else /* FLINT_HAVE_FFT_SMALL */ int -_mpn_mod_poly_mullow_fft_small(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_fft_small(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { return GR_UNABLE; } diff --git a/src/mpn_mod/poly_mullow_karatsuba.c b/src/mpn_mod/poly_mullow_karatsuba.c index 58f1f759a3..afbe1388a6 100644 --- a/src/mpn_mod/poly_mullow_karatsuba.c +++ b/src/mpn_mod/poly_mullow_karatsuba.c @@ -11,13 +11,13 @@ #include "mpn_mod.h" -void _mpn_dot_rev_2x2_3(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) +void _mpn_dot_rev_2x2_3(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len) { - mp_limb_t A0, A1, B0, B1; - mp_limb_t p2, p1, p0; - mp_limb_t s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v2; + ulong A0, A1, B0, B1; + ulong p2, p1, p0; + ulong s2, s1, s0; + ulong u2, u1; + ulong v2; slong k; s2 = s1 = s0 = 0; @@ -50,13 +50,13 @@ void _mpn_dot_rev_2x2_3(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) s[2] = s2; } -void _mpn_dot_rev_2x2_4(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) +void _mpn_dot_rev_2x2_4(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len) { - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s3, s2, s1, s0; - mp_limb_t u3, u2, u1; - mp_limb_t v3, v2; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s3, s2, s1, s0; + ulong u3, u2, u1; + ulong v3, v2; slong k; s3 = s2 = s1 = s0 = 0; @@ -90,13 +90,13 @@ void _mpn_dot_rev_2x2_4(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) s[3] = s3; } -void _mpn_dot_rev_2x2_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) +void _mpn_dot_rev_2x2_5(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len) { - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v3, v2; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s4, s3, s2, s1, s0; + ulong u2, u1; + ulong v3, v2; slong k; s4 = s3 = s2 = s1 = s0 = 0; @@ -131,13 +131,13 @@ void _mpn_dot_rev_2x2_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) s[4] = s4; } -void _mpn_dot_rev_3x3_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) +void _mpn_dot_rev_3x3_5(nn_ptr s, nn_srcptr a, nn_srcptr b, slong len) { - mp_limb_t A0, A1, A2, B0, B1, B2; - mp_limb_t p4, p3, p2, p1, p0; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v3, v2; + ulong A0, A1, A2, B0, B1, B2; + ulong p4, p3, p2, p1, p0; + ulong s4, s3, s2, s1, s0; + ulong u2, u1; + ulong v3, v2; slong k; s4 = s3 = s2 = s1 = s0 = 0; @@ -184,10 +184,10 @@ void _mpn_dot_rev_3x3_5(mp_ptr s, mp_srcptr a, mp_srcptr b, slong len) } void -_mpn_dot_rev_nxn_2n(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs) +_mpn_dot_rev_nxn_2n(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs) { - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 3]; - mp_size_t slimbs = 2 * nlimbs; + ulong t[2 * MPN_MOD_MAX_LIMBS + 3]; + slong slimbs = 2 * nlimbs; slong j; flint_mpn_mul_n(res, a, b + (len - 1) * nlimbs, nlimbs); @@ -200,10 +200,10 @@ _mpn_dot_rev_nxn_2n(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t n } void -_mpn_dot_rev_nxn_2nm1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs) +_mpn_dot_rev_nxn_2nm1(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs) { - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 3]; - mp_size_t slimbs = 2 * nlimbs - 1; + ulong t[2 * MPN_MOD_MAX_LIMBS + 3]; + slong slimbs = 2 * nlimbs - 1; slong j; flint_mpn_mul_n(t, a, b + (len - 1) * nlimbs, nlimbs); @@ -217,10 +217,10 @@ _mpn_dot_rev_nxn_2nm1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t } void -_mpn_dot_rev_nxn_2np1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t nlimbs) +_mpn_dot_rev_nxn_2np1(nn_ptr res, nn_srcptr a, nn_srcptr b, slong len, slong nlimbs) { - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 3]; - mp_size_t slimbs = 2 * nlimbs + 1; + ulong t[2 * MPN_MOD_MAX_LIMBS + 3]; + slong slimbs = 2 * nlimbs + 1; slong j; flint_mpn_mul_n(res, a, b + (len - 1) * nlimbs, nlimbs); @@ -238,7 +238,7 @@ _mpn_dot_rev_nxn_2np1(mp_ptr res, mp_srcptr a, mp_srcptr b, slong len, mp_size_t to the product of {poly1,len1*nlimbs} and {poly2,len2*nlimbs} viewed as polynomials with length-nlimbs coefficients. */ static void -_mpn_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, mp_size_t nlimbs, mp_size_t slimbs) +_mpn_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, slong nlimbs, slong slimbs) { slong i, top1, top2; @@ -313,11 +313,11 @@ _mpn_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr po } static void -_mpn_poly_sqrlow_classical(mp_ptr res, mp_srcptr poly1, slong len1, slong len, mp_size_t nlimbs, mp_size_t slimbs) +_mpn_poly_sqrlow_classical(nn_ptr res, nn_srcptr poly1, slong len1, slong len, slong nlimbs, slong slimbs) { slong i, start, stop; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS + 3]; - mp_ptr rp; + ulong t[2 * MPN_MOD_MAX_LIMBS + 3]; + nn_ptr rp; FLINT_ASSERT((slimbs == 2 * nlimbs) || (slimbs == 2 * nlimbs + 1) || (slimbs == 2 * nlimbs - 1)); @@ -410,7 +410,7 @@ _mpn_poly_sqrlow_classical(mp_ptr res, mp_srcptr poly1, slong len1, slong len, m } FLINT_FORCE_INLINE void -_mpn_poly_add_n(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong glen, mp_size_t nlimbs) +_mpn_poly_add_n(nn_ptr res, nn_srcptr f, slong flen, nn_srcptr g, slong glen, slong nlimbs) { slong m = FLINT_MIN(flen, glen); @@ -424,11 +424,11 @@ _mpn_poly_add_n(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong glen, mp /* inputs have nlimbs, output has nlimbs+1 */ FLINT_FORCE_INLINE void -_mpn_poly_add_n_carry(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong glen, mp_size_t nlimbs) +_mpn_poly_add_n_carry(nn_ptr res, nn_srcptr f, slong flen, nn_srcptr g, slong glen, slong nlimbs) { slong m = FLINT_MIN(flen, glen); slong i; - mp_size_t nlimbs2 = nlimbs + 1; + slong nlimbs2 = nlimbs + 1; if (nlimbs == 2) { @@ -485,11 +485,11 @@ _mpn_poly_add_n_carry(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong gl of incrementing nlimbs. */ static void -_mpn_poly_mul_karatsuba(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong glen, mp_size_t nlimbs, mp_size_t slimbs, slong cutoff, int norm) +_mpn_poly_mul_karatsuba(nn_ptr res, nn_srcptr f, slong flen, nn_srcptr g, slong glen, slong nlimbs, slong slimbs, slong cutoff, int norm) { slong m, f1len, g1len, tlen, ulen, vlen, alloc; - mp_ptr t, u, v; - mp_srcptr f0, f1, g0, g1; + nn_ptr t, u, v; + nn_srcptr f0, f1, g0, g1; int squaring = (f == g) && (flen == glen); TMP_INIT; @@ -530,7 +530,7 @@ _mpn_poly_mul_karatsuba(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong alloc = tlen * (nlimbs + 1) + ulen * (nlimbs + 1) + vlen * slimbs; TMP_START; - t = TMP_ALLOC(alloc * sizeof(mp_limb_t)); + t = TMP_ALLOC(alloc * sizeof(ulong)); u = t + tlen * (nlimbs + 1); v = u + ulen * (nlimbs + 1); @@ -582,12 +582,12 @@ _mpn_poly_mul_karatsuba(mp_ptr res, mp_srcptr f, slong flen, mp_srcptr g, slong } int -_mpn_mod_poly_mullow_karatsuba(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx) +_mpn_mod_poly_mullow_karatsuba(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, slong cutoff, gr_ctx_t ctx) { - mp_ptr t; + nn_ptr t; slong i, l; - mp_size_t nlimbs, slimbs; - mp_bitcnt_t sbits; + slong nlimbs, slimbs; + flint_bitcnt_t sbits; int norm; TMP_INIT; TMP_START; @@ -637,7 +637,7 @@ _mpn_mod_poly_mullow_karatsuba(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcpt sbits = 2 * sbits + 2 * FLINT_BIT_COUNT(FLINT_MIN(len1, len2)); slimbs = (sbits + FLINT_BITS - 1) / FLINT_BITS; - t = TMP_ALLOC(sizeof(mp_limb_t) * slimbs * (len1 + len2 - 1)); + t = TMP_ALLOC(sizeof(ulong) * slimbs * (len1 + len2 - 1)); _mpn_poly_mul_karatsuba(t, poly1, len1, poly2, len2, nlimbs, slimbs, cutoff, norm); diff --git a/src/mpn_mod/ring.c b/src/mpn_mod/ring.c index 60a419da94..10493d0a31 100644 --- a/src/mpn_mod/ring.c +++ b/src/mpn_mod/ring.c @@ -40,7 +40,7 @@ mpn_mod_ctx_clear(gr_ctx_t ctx) } int -mpn_mod_set_ui(mp_ptr res, ulong x, gr_ctx_t ctx) +mpn_mod_set_ui(nn_ptr res, ulong x, gr_ctx_t ctx) { FLINT_ASSERT(MPN_MOD_CTX_NLIMBS(ctx) >= 2); @@ -50,9 +50,9 @@ mpn_mod_set_ui(mp_ptr res, ulong x, gr_ctx_t ctx) } int -mpn_mod_set_si(mp_ptr res, slong x, gr_ctx_t ctx) +mpn_mod_set_si(nn_ptr res, slong x, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); FLINT_ASSERT(n >= 2); if (x >= 0) @@ -69,7 +69,7 @@ mpn_mod_set_si(mp_ptr res, slong x, gr_ctx_t ctx) } int -mpn_mod_neg_one(mp_ptr res, gr_ctx_t ctx) +mpn_mod_neg_one(nn_ptr res, gr_ctx_t ctx) { return mpn_mod_set_si(res, -1, ctx); } @@ -77,11 +77,11 @@ mpn_mod_neg_one(mp_ptr res, gr_ctx_t ctx) /* fixme: flint_mpn_mod_preinvn is misdocumented */ int -mpn_mod_set_mpn(mp_ptr res, mp_srcptr x, mp_size_t xn, gr_ctx_t ctx) +mpn_mod_set_mpn(nn_ptr res, nn_srcptr x, slong xn, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); - mp_size_t rn; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + flint_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); + slong rn; if (xn < n || (xn == n && mpn_cmp(x, MPN_MOD_CTX_MODULUS(ctx), n) < 0)) { @@ -90,11 +90,11 @@ mpn_mod_set_mpn(mp_ptr res, mp_srcptr x, mp_size_t xn, gr_ctx_t ctx) } else { - mp_ptr r; + nn_ptr r; TMP_INIT; TMP_START; - r = TMP_ALLOC((xn + 1) * sizeof(mp_limb_t)); + r = TMP_ALLOC((xn + 1) * sizeof(ulong)); if (norm == 0) { @@ -144,13 +144,13 @@ mpn_mod_set_mpn(mp_ptr res, mp_srcptr x, mp_size_t xn, gr_ctx_t ctx) } int -mpn_mod_set_fmpz(mp_ptr res, const fmpz_t x, gr_ctx_t ctx) +mpn_mod_set_fmpz(nn_ptr res, const fmpz_t x, gr_ctx_t ctx) { if (!COEFF_IS_MPZ(*x)) mpn_mod_set_si(res, *x, ctx); else { - mp_size_t nd = FLINT_ABS(COEFF_TO_PTR(*x)->_mp_size); + slong nd = FLINT_ABS(COEFF_TO_PTR(*x)->_mp_size); int neg = COEFF_TO_PTR(*x)->_mp_size < 0; mpn_mod_set_mpn(res, COEFF_TO_PTR(*x)->_mp_d, nd, ctx); @@ -172,14 +172,14 @@ _gr_fmpz_mod_ctx_struct; #define FMPZ_MOD_CTX(ring_ctx) ((((_gr_fmpz_mod_ctx_struct *)(ring_ctx))->ctx)) int -mpn_mod_set_other(mp_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) +mpn_mod_set_other(nn_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) { if (v_ctx == ctx) return mpn_mod_set(res, v, ctx); if (v_ctx->which_ring == GR_CTX_MPN_MOD) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); if (MPN_MOD_CTX_NLIMBS(v_ctx) == n && flint_mpn_equal_p(MPN_MOD_CTX_MODULUS(v_ctx), MPN_MOD_CTX_MODULUS(ctx), n)) @@ -191,11 +191,11 @@ mpn_mod_set_other(mp_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) if (v_ctx->which_ring == GR_CTX_FMPZ_MOD) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); if (fmpz_size(FMPZ_MOD_CTX(v_ctx)->n) == n) { - mp_srcptr vd = COEFF_TO_PTR(*(FMPZ_MOD_CTX(v_ctx)->n))->_mp_d; + nn_srcptr vd = COEFF_TO_PTR(*(FMPZ_MOD_CTX(v_ctx)->n))->_mp_d; if (flint_mpn_equal_p(vd, MPN_MOD_CTX_MODULUS(ctx), n)) return mpn_mod_set_fmpz(res, v, ctx); @@ -206,7 +206,7 @@ mpn_mod_set_other(mp_ptr res, gr_ptr v, gr_ctx_t v_ctx, gr_ctx_t ctx) } int -mpn_mod_randtest(mp_ptr res, flint_rand_t state, gr_ctx_t ctx) +mpn_mod_randtest(nn_ptr res, flint_rand_t state, gr_ctx_t ctx) { fmpz_t t; fmpz_init(t); @@ -218,14 +218,14 @@ mpn_mod_randtest(mp_ptr res, flint_rand_t state, gr_ctx_t ctx) } int -mpn_mod_write(gr_stream_t out, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_write(gr_stream_t out, nn_srcptr x, gr_ctx_t ctx) { gr_stream_write_free(out, _flint_mpn_get_str(x, MPN_MOD_CTX_NLIMBS(ctx))); return GR_SUCCESS; } int -mpn_mod_get_fmpz(fmpz_t res, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_get_fmpz(fmpz_t res, nn_srcptr x, gr_ctx_t ctx) { fmpz_set_ui_array(res, x, MPN_MOD_CTX_NLIMBS(ctx)); return GR_SUCCESS; @@ -235,13 +235,13 @@ mpn_mod_get_fmpz(fmpz_t res, mp_srcptr x, gr_ctx_t ctx) truth_t mpn_mod_is_neg_one(gr_srcptr x, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_neg_one(t, ctx); return flint_mpn_equal_p(x, t, MPN_MOD_CTX_NLIMBS(ctx)) ? T_TRUE : T_FALSE; } int -mpn_mod_neg(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_neg(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) { if (MPN_MOD_CTX_NLIMBS(ctx) == 2) flint_mpn_negmod_2(res, x, MPN_MOD_CTX_MODULUS(ctx)); @@ -251,7 +251,7 @@ mpn_mod_neg(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) } int -mpn_mod_add(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_add(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { if (MPN_MOD_CTX_NLIMBS(ctx) == 2) flint_mpn_addmod_2(res, x, y, MPN_MOD_CTX_MODULUS(ctx)); @@ -261,7 +261,7 @@ mpn_mod_add(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) } int -mpn_mod_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { if (MPN_MOD_CTX_NLIMBS(ctx) == 2) flint_mpn_submod_2(res, x, y, MPN_MOD_CTX_MODULUS(ctx)); @@ -271,11 +271,11 @@ mpn_mod_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) } int -mpn_mod_add_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) +mpn_mod_add_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) { if (MPN_MOD_CTX_NLIMBS(ctx) == 2) { - mp_limb_t t[2]; + ulong t[2]; t[0] = y; t[1] = 0; flint_mpn_addmod_2(res, x, t, MPN_MOD_CTX_MODULUS(ctx)); @@ -288,11 +288,11 @@ mpn_mod_add_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) } int -mpn_mod_sub_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) +mpn_mod_sub_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) { if (MPN_MOD_CTX_NLIMBS(ctx) == 2) { - mp_limb_t t[2]; + ulong t[2]; t[0] = y; t[1] = 0; flint_mpn_submod_2(res, x, t, MPN_MOD_CTX_MODULUS(ctx)); @@ -305,7 +305,7 @@ mpn_mod_sub_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) } int -mpn_mod_add_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) +mpn_mod_add_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) { if (y >= 0) mpn_mod_add_ui(res, x, (ulong) y, ctx); @@ -315,7 +315,7 @@ mpn_mod_add_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) } int -mpn_mod_sub_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) +mpn_mod_sub_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) { if (y >= 0) mpn_mod_sub_ui(res, x, (ulong) y, ctx); @@ -325,7 +325,7 @@ mpn_mod_sub_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) } int -mpn_mod_add_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) +mpn_mod_add_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) { if (!COEFF_IS_MPZ(*y)) { @@ -333,14 +333,14 @@ mpn_mod_add_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) } else { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; - mp_srcptr m = MPN_MOD_CTX_MODULUS(ctx); - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + ulong t[MPN_MOD_MAX_LIMBS]; + nn_srcptr m = MPN_MOD_CTX_MODULUS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); mpz_ptr z = COEFF_TO_PTR(*y); - mp_size_t ssize = z->_mp_size; - mp_size_t zn = FLINT_ABS(ssize); - mp_srcptr zd = z->_mp_d; + slong ssize = z->_mp_size; + slong zn = FLINT_ABS(ssize); + nn_srcptr zd = z->_mp_d; if (zn < n || (zn == n && mpn_cmp(zd, m, n) < 0)) { @@ -363,7 +363,7 @@ mpn_mod_add_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) } int -mpn_mod_sub_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) +mpn_mod_sub_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) { if (!COEFF_IS_MPZ(*y)) { @@ -371,14 +371,14 @@ mpn_mod_sub_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) } else { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; - mp_srcptr m = MPN_MOD_CTX_MODULUS(ctx); - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + ulong t[MPN_MOD_MAX_LIMBS]; + nn_srcptr m = MPN_MOD_CTX_MODULUS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); mpz_ptr z = COEFF_TO_PTR(*y); - mp_size_t ssize = z->_mp_size; - mp_size_t zn = FLINT_ABS(ssize); - mp_srcptr zd = z->_mp_d; + slong ssize = z->_mp_size; + slong zn = FLINT_ABS(ssize); + nn_srcptr zd = z->_mp_d; if (zn < n || (zn == n && mpn_cmp(zd, m, n) < 0)) { @@ -403,9 +403,9 @@ mpn_mod_sub_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) /* should mirror flint_mpn_mulmod_preinvn (backport any improvements) */ int -mpn_mod_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); if (n == 2) { @@ -413,11 +413,11 @@ mpn_mod_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) } else { - mp_limb_t t[5 * MPN_MOD_MAX_LIMBS]; - mp_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); - mp_ptr d = MPN_MOD_CTX_MODULUS_NORMED(ctx); - mp_ptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); - mp_limb_t cy; + ulong t[5 * MPN_MOD_MAX_LIMBS]; + flint_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); + nn_ptr d = MPN_MOD_CTX_MODULUS_NORMED(ctx); + nn_ptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); + ulong cy; if (norm) { @@ -465,11 +465,11 @@ mpn_mod_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) /* todo: check for 0? */ int -mpn_mod_mul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) +mpn_mod_mul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS + 1]; - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_size_t tn; + ulong t[MPN_MOD_MAX_LIMBS + 1]; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + slong tn; t[n] = mpn_mul_1(t, x, n, y); tn = n + (t[n] != 0); @@ -481,11 +481,11 @@ mpn_mod_mul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) #define UI_ABS_SI(x) (((slong)(x) < 0) ? (-(ulong)(x)) : ((ulong)(x))) int -mpn_mod_mul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) +mpn_mod_mul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS + 1]; - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_size_t tn; + ulong t[MPN_MOD_MAX_LIMBS + 1]; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + slong tn; t[n] = mpn_mul_1(t, x, n, UI_ABS_SI(y)); tn = n + (t[n] != 0); @@ -498,7 +498,7 @@ mpn_mod_mul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) /* todo: check for 0? */ int -mpn_mod_mul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) +mpn_mod_mul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) { if (!COEFF_IS_MPZ(*y)) { @@ -506,13 +506,13 @@ mpn_mod_mul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) } else { - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS], cy; - mp_size_t tn, n = MPN_MOD_CTX_NLIMBS(ctx); + ulong t[2 * MPN_MOD_MAX_LIMBS], cy; + slong tn, n = MPN_MOD_CTX_NLIMBS(ctx); mpz_ptr z = COEFF_TO_PTR(*y); - mp_size_t ssize = z->_mp_size; - mp_size_t zn = FLINT_ABS(ssize); - mp_srcptr zd = z->_mp_d; + slong ssize = z->_mp_size; + slong zn = FLINT_ABS(ssize); + nn_srcptr zd = z->_mp_d; if (zn <= n) { @@ -535,86 +535,86 @@ mpn_mod_mul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) } int -mpn_mod_addmul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_addmul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul(t, x, y, ctx); mpn_mod_add(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_addmul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) +mpn_mod_addmul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_ui(t, x, y, ctx); mpn_mod_add(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_addmul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) +mpn_mod_addmul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_si(t, x, y, ctx); mpn_mod_add(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_addmul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) +mpn_mod_addmul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_fmpz(t, x, y, ctx); mpn_mod_add(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_submul(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_submul(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul(t, x, y, ctx); mpn_mod_sub(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_submul_ui(mp_ptr res, mp_srcptr x, ulong y, gr_ctx_t ctx) +mpn_mod_submul_ui(nn_ptr res, nn_srcptr x, ulong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_ui(t, x, y, ctx); mpn_mod_sub(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_submul_si(mp_ptr res, mp_srcptr x, slong y, gr_ctx_t ctx) +mpn_mod_submul_si(nn_ptr res, nn_srcptr x, slong y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_si(t, x, y, ctx); mpn_mod_sub(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_submul_fmpz(mp_ptr res, mp_srcptr x, const fmpz_t y, gr_ctx_t ctx) +mpn_mod_submul_fmpz(nn_ptr res, nn_srcptr x, const fmpz_t y, gr_ctx_t ctx) { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; mpn_mod_mul_fmpz(t, x, y, ctx); mpn_mod_sub(res, res, t, ctx); return GR_SUCCESS; } int -mpn_mod_inv(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) +mpn_mod_inv(nn_ptr res, nn_srcptr x, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_srcptr d = MPN_MOD_CTX_MODULUS(ctx); - mp_limb_t g[MPN_MOD_MAX_LIMBS]; - mp_limb_t s[MPN_MOD_MAX_LIMBS]; - mp_limb_t t[MPN_MOD_MAX_LIMBS]; - mp_limb_t u[MPN_MOD_MAX_LIMBS]; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + nn_srcptr d = MPN_MOD_CTX_MODULUS(ctx); + ulong g[MPN_MOD_MAX_LIMBS]; + ulong s[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; + ulong u[MPN_MOD_MAX_LIMBS]; mp_size_t gsize, ssize; if (mpn_mod_is_one(x, ctx) == T_TRUE || mpn_mod_is_neg_one(x, ctx) == T_TRUE) @@ -623,6 +623,8 @@ mpn_mod_inv(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) flint_mpn_copyi(t, x, n); flint_mpn_copyi(u, d, n); /* todo: does mpn_gcdext allow aliasing? */ + /* NOTE: ssize must be mp_size_t since it is strictly different from slong + * on Windows systems. */ gsize = mpn_gcdext(g, s, &ssize, t, n, u, n); if (gsize != 1 || g[0] != 1) @@ -639,10 +641,10 @@ mpn_mod_inv(mp_ptr res, mp_srcptr x, gr_ctx_t ctx) } int -mpn_mod_div(mp_ptr res, mp_srcptr x, mp_srcptr y, gr_ctx_t ctx) +mpn_mod_div(nn_ptr res, nn_srcptr x, nn_srcptr y, gr_ctx_t ctx) { int status; - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; status = mpn_mod_inv(t, y, ctx); if (status == GR_SUCCESS) diff --git a/src/mpn_mod/test/main.c b/src/mpn_mod/test/main.c index 025f0fd5ad..977b0b39cb 100644 --- a/src/mpn_mod/test/main.c +++ b/src/mpn_mod/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-mpn_mod.c" diff --git a/src/mpn_mod/test/t-poly_mullow_karatsuba.c b/src/mpn_mod/test/t-poly_mullow_karatsuba.c index d9061437a5..3e59114f6e 100644 --- a/src/mpn_mod/test/t-poly_mullow_karatsuba.c +++ b/src/mpn_mod/test/t-poly_mullow_karatsuba.c @@ -14,13 +14,13 @@ #include "gr_poly.h" static int -_mpn_mod_poly_mullow_karatsuba_deep(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_karatsuba_deep(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { return _mpn_mod_poly_mullow_karatsuba(res, poly1, len1, poly2, len2, len, 2, ctx); } static int -_mpn_mod_poly_mullow_karatsuba_shallow(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) +_mpn_mod_poly_mullow_karatsuba_shallow(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, slong len, gr_ctx_t ctx) { return _mpn_mod_poly_mullow_karatsuba(res, poly1, len1, poly2, len2, len, FLINT_MIN(len1, len2), ctx); } diff --git a/src/mpn_mod/vec.c b/src/mpn_mod/vec.c index 439cc4be48..b39730052e 100644 --- a/src/mpn_mod/vec.c +++ b/src/mpn_mod/vec.c @@ -13,45 +13,45 @@ #include "mpn_mod.h" int -_mpn_mod_vec_clear(mp_ptr FLINT_UNUSED(res), slong FLINT_UNUSED(len), gr_ctx_t FLINT_UNUSED(ctx)) +_mpn_mod_vec_clear(nn_ptr FLINT_UNUSED(res), slong FLINT_UNUSED(len), gr_ctx_t FLINT_UNUSED(ctx)) { return GR_SUCCESS; } int -_mpn_mod_vec_zero(mp_ptr res, slong len, gr_ctx_t ctx) +_mpn_mod_vec_zero(nn_ptr res, slong len, gr_ctx_t ctx) { flint_mpn_zero(res, len * MPN_MOD_CTX_NLIMBS(ctx)); return GR_SUCCESS; } int -_mpn_mod_vec_set(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx) +_mpn_mod_vec_set(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx) { flint_mpn_copyi(res, x, len * MPN_MOD_CTX_NLIMBS(ctx)); return GR_SUCCESS; } void -_mpn_mod_vec_swap(mp_ptr vec1, mp_ptr vec2, slong len, gr_ctx_t ctx) +_mpn_mod_vec_swap(nn_ptr vec1, nn_ptr vec2, slong len, gr_ctx_t ctx) { slong i; - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); for (i = 0; i < len * n; i++) - FLINT_SWAP(mp_limb_t, vec1[i], vec2[i]); + FLINT_SWAP(ulong, vec1[i], vec2[i]); } int -_mpn_mod_vec_neg(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx) +_mpn_mod_vec_neg(nn_ptr res, nn_srcptr x, slong len, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_srcptr d = MPN_MOD_CTX_MODULUS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); + nn_srcptr d = MPN_MOD_CTX_MODULUS(ctx); slong i; if (n == 2) { /* Only read to registers once */ - mp_limb_t dd[2]; + ulong dd[2]; dd[0] = d[0]; dd[1] = d[1]; @@ -66,16 +66,16 @@ _mpn_mod_vec_neg(mp_ptr res, mp_srcptr x, slong len, gr_ctx_t ctx) } int -_mpn_mod_vec_add(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) +_mpn_mod_vec_add(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_srcptr d = MPN_MOD_CTX_MODULUS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); + nn_srcptr d = MPN_MOD_CTX_MODULUS(ctx); slong i; if (n == 2) { /* Only read to registers once */ - mp_limb_t dd[2]; + ulong dd[2]; dd[0] = d[0]; dd[1] = d[1]; @@ -94,16 +94,16 @@ _mpn_mod_vec_add(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) } int -_mpn_mod_vec_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) +_mpn_mod_vec_sub(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_srcptr d = MPN_MOD_CTX_MODULUS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); + nn_srcptr d = MPN_MOD_CTX_MODULUS(ctx); slong i; if (n == 2) { /* Only read to registers once */ - mp_limb_t dd[2]; + ulong dd[2]; dd[0] = d[0]; dd[1] = d[1]; @@ -118,16 +118,16 @@ _mpn_mod_vec_sub(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) } int -_mpn_mod_vec_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) +_mpn_mod_vec_mul(nn_ptr res, nn_srcptr x, nn_srcptr y, slong len, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); slong i; if (n == 2) { - mp_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); - mp_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); - mp_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); + flint_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); + nn_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); + nn_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); for (i = 0; i < len; i++) flint_mpn_mulmod_preinvn_2(res + i * n, x + i * n, y + i * n, dnormed, dinv, norm); @@ -142,16 +142,16 @@ _mpn_mod_vec_mul(mp_ptr res, mp_srcptr x, mp_srcptr y, slong len, gr_ctx_t ctx) /* todo: worth it to check for special cases (0, 1)? */ /* todo: shoup multiplication */ int -_mpn_mod_vec_mul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx) +_mpn_mod_vec_mul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); slong i; if (n == 2) { - mp_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); - mp_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); - mp_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); + flint_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); + nn_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); + nn_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); for (i = 0; i < len; i++) flint_mpn_mulmod_preinvn_2(res + i * n, x + i * n, y, dnormed, dinv, norm); @@ -164,7 +164,7 @@ _mpn_mod_vec_mul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_ } int -_mpn_mod_scalar_mul_vec(mp_ptr res, mp_srcptr y, mp_srcptr x, slong len, gr_ctx_t ctx) +_mpn_mod_scalar_mul_vec(nn_ptr res, nn_srcptr y, nn_srcptr x, slong len, gr_ctx_t ctx) { return _mpn_mod_vec_mul_scalar(res, x, len, y, ctx); } @@ -172,18 +172,18 @@ _mpn_mod_scalar_mul_vec(mp_ptr res, mp_srcptr y, mp_srcptr x, slong len, gr_ctx_ /* todo: worth it to check for special cases (0, 1)? */ /* todo: shoup multiplication */ int -_mpn_mod_vec_addmul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_ctx_t ctx) +_mpn_mod_vec_addmul_scalar(nn_ptr res, nn_srcptr x, slong len, nn_srcptr y, gr_ctx_t ctx) { - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); + slong n = MPN_MOD_CTX_NLIMBS(ctx); slong i; if (n == 2) { - mp_limb_t t[2]; - mp_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); - mp_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); - mp_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); - mp_srcptr d = MPN_MOD_CTX_MODULUS(ctx); + ulong t[2]; + flint_bitcnt_t norm = MPN_MOD_CTX_NORM(ctx); + nn_srcptr dnormed = MPN_MOD_CTX_MODULUS_NORMED(ctx); + nn_srcptr dinv = MPN_MOD_CTX_MODULUS_PREINV(ctx); + nn_srcptr d = MPN_MOD_CTX_MODULUS(ctx); for (i = 0; i < len; i++) { @@ -193,7 +193,7 @@ _mpn_mod_vec_addmul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_c } else { - mp_limb_t t[MPN_MOD_MAX_LIMBS]; + ulong t[MPN_MOD_MAX_LIMBS]; for (i = 0; i < len; i++) { @@ -208,12 +208,12 @@ _mpn_mod_vec_addmul_scalar(mp_ptr res, mp_srcptr x, slong len, mp_srcptr y, gr_c /* todo: optimize for length 1, 2 */ /* todo: optimize for when 2n rather than 2n+1 limbs suffice */ int -_mpn_mod_vec_dot(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx) +_mpn_mod_vec_dot(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx) { - mp_limb_t s[2 * MPN_MOD_MAX_LIMBS + 1]; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS]; - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_size_t sn; + ulong s[2 * MPN_MOD_MAX_LIMBS + 1]; + ulong t[2 * MPN_MOD_MAX_LIMBS]; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + slong sn; slong i; if (len <= 0) @@ -227,11 +227,11 @@ _mpn_mod_vec_dot(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp if (n == 2) { - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v3, v2; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s4, s3, s2, s1, s0; + ulong u2, u1; + ulong v3, v2; s4 = s3 = s2 = s1 = s0 = 0; u2 = u1 = 0; @@ -306,12 +306,12 @@ _mpn_mod_vec_dot(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp /* todo: optimize for length 1, 2 */ /* todo: optimize for when 2n rather than 2n+1 limbs suffice */ int -_mpn_mod_vec_dot_rev(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1, mp_srcptr vec2, slong len, gr_ctx_t ctx) +_mpn_mod_vec_dot_rev(nn_ptr res, nn_srcptr initial, int subtract, nn_srcptr vec1, nn_srcptr vec2, slong len, gr_ctx_t ctx) { - mp_limb_t s[2 * MPN_MOD_MAX_LIMBS + 1]; - mp_limb_t t[2 * MPN_MOD_MAX_LIMBS]; - mp_size_t n = MPN_MOD_CTX_NLIMBS(ctx); - mp_size_t sn; + ulong s[2 * MPN_MOD_MAX_LIMBS + 1]; + ulong t[2 * MPN_MOD_MAX_LIMBS]; + slong n = MPN_MOD_CTX_NLIMBS(ctx); + slong sn; slong i; if (len <= 0) @@ -325,11 +325,11 @@ _mpn_mod_vec_dot_rev(mp_ptr res, mp_srcptr initial, int subtract, mp_srcptr vec1 if (n == 2) { - mp_limb_t A0, A1, B0, B1; - mp_limb_t p3, p2, p1, p0; - mp_limb_t s4, s3, s2, s1, s0; - mp_limb_t u2, u1; - mp_limb_t v3, v2; + ulong A0, A1, B0, B1; + ulong p3, p2, p1, p0; + ulong s4, s3, s2, s1, s0; + ulong u2, u1; + ulong v3, v2; s4 = s3 = s2 = s1 = s0 = 0; u2 = u1 = 0; diff --git a/src/mpoly.h b/src/mpoly.h index b4bb5d9aeb..45ac5175ef 100644 --- a/src/mpoly.h +++ b/src/mpoly.h @@ -20,6 +20,7 @@ #endif #include +#include #include "mpoly_types.h" #ifdef __cplusplus diff --git a/src/mpoly/test/main.c b/src/mpoly/test/main.c index fcc7b32d4b..0589297542 100644 --- a/src/mpoly/test/main.c +++ b/src/mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-max_degrees_tight.c" diff --git a/src/mpoly/test_irreducible.c b/src/mpoly/test_irreducible.c index e996345f64..d2dbebff4e 100644 --- a/src/mpoly/test_irreducible.c +++ b/src/mpoly/test_irreducible.c @@ -635,7 +635,7 @@ static int convex_hull_is_indecomposable( static void z_rand_vec_primitive( slong * v, slong len, flint_rand_t state, - mp_limb_t bound) + ulong bound) { slong i, g; diff --git a/src/n_poly.h b/src/n_poly.h index be4bc09bbf..2a06829130 100644 --- a/src/n_poly.h +++ b/src/n_poly.h @@ -61,7 +61,7 @@ void n_poly_init2(n_poly_t A, slong alloc) A->alloc = alloc; A->coeffs = NULL; if (alloc > 0) - A->coeffs = (mp_limb_t *) flint_malloc(alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_malloc(alloc*sizeof(ulong)); } FLINT_FORCE_INLINE @@ -140,7 +140,7 @@ int n_poly_is_one(const n_poly_t A) } FLINT_FORCE_INLINE -mp_limb_t n_poly_lead(const n_poly_t A) +ulong n_poly_lead(const n_poly_t A) { FLINT_ASSERT(A->length > 0); return A->coeffs[A->length - 1]; @@ -155,7 +155,7 @@ void n_poly_one(n_poly_t A) } FLINT_FORCE_INLINE -void n_poly_set_ui(n_poly_t A, mp_limb_t c) +void n_poly_set_ui(n_poly_t A, ulong c) { n_poly_fit_length(A, 1); A->coeffs[0] = c; @@ -203,7 +203,7 @@ void n_poly_mod_make_monic(n_poly_t A, const n_poly_t B, nmod_t mod) } FLINT_FORCE_INLINE -void n_poly_mod_taylor_shift(n_poly_t g, mp_limb_t c, nmod_t mod) +void n_poly_mod_taylor_shift(n_poly_t g, ulong c, nmod_t mod) { _nmod_poly_taylor_shift(g->coeffs, c, g->length, mod); } @@ -282,7 +282,7 @@ void n_poly_truncate(n_poly_t poly, slong len) } FLINT_FORCE_INLINE -void _n_poly_mod_scalar_mul_nmod(n_poly_t A, const n_poly_t B, mp_limb_t c, +void _n_poly_mod_scalar_mul_nmod(n_poly_t A, const n_poly_t B, ulong c, nmod_t mod) { FLINT_ASSERT(B->length <= B->alloc); @@ -292,19 +292,19 @@ void _n_poly_mod_scalar_mul_nmod(n_poly_t A, const n_poly_t B, mp_limb_t c, } FLINT_FORCE_INLINE -void _n_poly_mod_scalar_mul_nmod_inplace(n_poly_t A, mp_limb_t c, nmod_t mod) +void _n_poly_mod_scalar_mul_nmod_inplace(n_poly_t A, ulong c, nmod_t mod) { _nmod_vec_scalar_mul_nmod(A->coeffs, A->coeffs, A->length, c, mod); } void n_poly_mod_scalar_mul_ui(n_poly_t A, const n_poly_t B, - mp_limb_t c, nmod_t ctx); + ulong c, nmod_t ctx); -mp_limb_t n_poly_mod_eval_step2(n_poly_t Acur, const n_poly_t Ainc, +ulong n_poly_mod_eval_step2(n_poly_t Acur, const n_poly_t Ainc, nmod_t mod); FLINT_FORCE_INLINE -mp_limb_t n_poly_mod_evaluate_nmod(const n_poly_t A, mp_limb_t c, nmod_t mod) +ulong n_poly_mod_evaluate_nmod(const n_poly_t A, ulong c, nmod_t mod) { return _nmod_poly_evaluate_nmod(A->coeffs, A->length, c, mod); } @@ -341,7 +341,7 @@ void n_poly_mod_sub(n_poly_t A, const n_poly_t B, const n_poly_t C, nmod_t mod) } FLINT_FORCE_INLINE -void n_poly_mod_product_roots_nmod_vec(n_poly_t A, mp_srcptr r, slong n, nmod_t mod) +void n_poly_mod_product_roots_nmod_vec(n_poly_t A, nn_srcptr r, slong n, nmod_t mod) { n_poly_fit_length(A, n + 1); A->length = n + 1; @@ -349,25 +349,25 @@ void n_poly_mod_product_roots_nmod_vec(n_poly_t A, mp_srcptr r, slong n, nmod_t } void n_poly_mod_shift_left_scalar_addmul(n_poly_t A, slong k, - mp_limb_t c, nmod_t mod); + ulong c, nmod_t mod); void n_poly_mod_addmul_linear(n_poly_t A, const n_poly_t B, - const n_poly_t C, mp_limb_t d1, mp_limb_t d0, nmod_t mod); + const n_poly_t C, ulong d1, ulong d0, nmod_t mod); void n_poly_mod_scalar_addmul_nmod(n_poly_t A, const n_poly_t B, - const n_poly_t C, mp_limb_t d0, nmod_t ctx); + const n_poly_t C, ulong d0, nmod_t ctx); -mp_limb_t _n_poly_eval_pow(n_poly_t P, n_poly_t alphapow, int nlimbs, +ulong _n_poly_eval_pow(n_poly_t P, n_poly_t alphapow, int nlimbs, nmod_t ctx); -mp_limb_t n_poly_mod_eval_pow(n_poly_t P, n_poly_t alphapow, +ulong n_poly_mod_eval_pow(n_poly_t P, n_poly_t alphapow, nmod_t ctx); -void n_poly_mod_eval2_pow(mp_limb_t * vp, mp_limb_t * vm, +void n_poly_mod_eval2_pow(ulong * vp, ulong * vm, const n_poly_t P, n_poly_t alphapow, nmod_t ctx); -mp_limb_t n_poly_mod_div_root(n_poly_t Q, - const n_poly_t A, mp_limb_t c, nmod_t ctx); +ulong n_poly_mod_div_root(n_poly_t Q, + const n_poly_t A, ulong c, nmod_t ctx); FLINT_FORCE_INLINE void _n_poly_mod_mul(n_poly_t A, const n_poly_t B, const n_poly_t C, nmod_t ctx) @@ -523,7 +523,7 @@ nmod_t fq_nmod_ctx_mod(const fq_nmod_ctx_t ctx) } FLINT_FORCE_INLINE -int _n_fq_is_zero(const mp_limb_t * a, slong d) +int _n_fq_is_zero(const ulong * a, slong d) { do { if (a[--d] != 0) @@ -533,7 +533,7 @@ int _n_fq_is_zero(const mp_limb_t * a, slong d) } FLINT_FORCE_INLINE -void _n_fq_zero(mp_limb_t * a, slong d) +void _n_fq_zero(ulong * a, slong d) { slong i; for (i = 0; i < d; i++) @@ -541,7 +541,7 @@ void _n_fq_zero(mp_limb_t * a, slong d) } FLINT_FORCE_INLINE -int _n_fq_is_one(const mp_limb_t * a, slong d) +int _n_fq_is_one(const ulong * a, slong d) { slong i; if (a[0] != 1) @@ -553,7 +553,7 @@ int _n_fq_is_one(const mp_limb_t * a, slong d) } FLINT_FORCE_INLINE -int _n_fq_is_ui(const mp_limb_t * a, slong d) +int _n_fq_is_ui(const ulong * a, slong d) { slong i; for (i = 1; i < d; i++) @@ -563,13 +563,13 @@ int _n_fq_is_ui(const mp_limb_t * a, slong d) } FLINT_FORCE_INLINE -int n_fq_is_one(const mp_limb_t * a, const fq_nmod_ctx_t ctx) +int n_fq_is_one(const ulong * a, const fq_nmod_ctx_t ctx) { return _n_fq_is_one(a, fq_nmod_ctx_degree(ctx)); } FLINT_FORCE_INLINE -void _n_fq_one(mp_limb_t * a, slong d) +void _n_fq_one(ulong * a, slong d) { slong i; a[0] = 1; @@ -578,7 +578,7 @@ void _n_fq_one(mp_limb_t * a, slong d) } FLINT_FORCE_INLINE -void _n_fq_set_nmod(mp_limb_t * a, mp_limb_t b, slong d) +void _n_fq_set_nmod(ulong * a, ulong b, slong d) { slong i; a[0] = b; @@ -587,13 +587,13 @@ void _n_fq_set_nmod(mp_limb_t * a, mp_limb_t b, slong d) } void n_fq_gen( - mp_limb_t * a, + ulong * a, const fq_nmod_ctx_t ctx); FLINT_FORCE_INLINE void _n_fq_set( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ slong d) { slong i = 0; @@ -605,21 +605,21 @@ void _n_fq_set( FLINT_FORCE_INLINE void _n_fq_swap( - mp_limb_t * a, /* length d */ - mp_limb_t * b, /* length d */ + ulong * a, /* length d */ + ulong * b, /* length d */ slong d) { slong i = 0; do { - FLINT_SWAP(mp_limb_t, a[i], b[i]); + FLINT_SWAP(ulong, a[i], b[i]); i++; } while (i < d); } FLINT_FORCE_INLINE int _n_fq_equal( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ slong d) { slong i = 0; @@ -631,60 +631,60 @@ int _n_fq_equal( } int n_fq_equal_fq_nmod( - const mp_limb_t * a, + const ulong * a, const fq_nmod_t b, const fq_nmod_ctx_t ctx); int n_fq_is_canonical( - const mp_limb_t * a, + const ulong * a, const fq_nmod_ctx_t ctx); void n_fq_randtest_not_zero( - mp_limb_t * a, + ulong * a, flint_rand_t state, const fq_nmod_ctx_t ctx); char * n_fq_get_str_pretty( - const mp_limb_t * a, + const ulong * a, const fq_nmod_ctx_t ctx); #ifdef FLINT_HAVE_FILE -int n_fq_fprint_pretty(FILE * file, const mp_limb_t * a, const fq_nmod_ctx_t ctx); +int n_fq_fprint_pretty(FILE * file, const ulong * a, const fq_nmod_ctx_t ctx); #endif -void n_fq_print_pretty(const mp_limb_t * a, const fq_nmod_ctx_t ctx); +void n_fq_print_pretty(const ulong * a, const fq_nmod_ctx_t ctx); void n_fq_get_fq_nmod( fq_nmod_t a, - const mp_limb_t * b, + const ulong * b, const fq_nmod_ctx_t ctx); void n_fq_set_fq_nmod( - mp_limb_t * a, + ulong * a, const fq_nmod_t b, const fq_nmod_ctx_t ctx); void n_fq_get_n_poly( n_poly_t a, - const mp_limb_t * b, + const ulong * b, const fq_nmod_ctx_t ctx); void _n_fq_set_n_poly( - mp_limb_t * a, - const mp_limb_t * bcoeffs, slong blen, + ulong * a, + const ulong * bcoeffs, slong blen, const fq_nmod_ctx_t ctx); void n_fq_add_si( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, slong c, const fq_nmod_ctx_t ctx); FLINT_FORCE_INLINE void n_fq_add( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -693,22 +693,22 @@ void n_fq_add( } void n_fq_add_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t c, const fq_nmod_ctx_t ctx); void n_fq_sub_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t c, const fq_nmod_ctx_t ctx); FLINT_FORCE_INLINE void n_fq_sub( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -718,9 +718,9 @@ void n_fq_sub( FLINT_FORCE_INLINE void _n_fq_add( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d, nmod_t mod) { @@ -730,9 +730,9 @@ void _n_fq_add( FLINT_FORCE_INLINE void _n_fq_sub( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d, nmod_t mod) { @@ -742,8 +742,8 @@ void _n_fq_sub( FLINT_FORCE_INLINE void _n_fq_neg( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, slong d, nmod_t mod) { @@ -752,39 +752,39 @@ void _n_fq_neg( } void _n_fq_mul_ui( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - mp_limb_t c, + ulong * a, /* length d */ + const ulong * b, /* length d */ + ulong c, slong d, nmod_t mod); void _n_fq_madd2( - mp_limb_t * a, /* length 2d-1 */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 2d-1 */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx, - mp_limb_t * t); /* length 2d */ + ulong * t); /* length 2d */ void _n_fq_mul2( - mp_limb_t * t, /* length 2d-1 */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * t, /* length 2d-1 */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx); #define N_FQ_REDUCE_ITCH 2 void _n_fq_reduce( - mp_limb_t * a, - mp_limb_t * b, slong blen, + ulong * a, + ulong * b, slong blen, const fq_nmod_ctx_t ctx, - mp_limb_t * t); + ulong * t); /* same itch as reduce */ FLINT_FORCE_INLINE void _n_fq_reduce2( - mp_limb_t * a, /* length d */ - mp_limb_t * b, /* length 2d-1 */ + ulong * a, /* length d */ + ulong * b, /* length 2d-1 */ const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length 2d */ + ulong * t) /* length 2d */ { slong blen = 2*fq_nmod_ctx_degree(ctx) - 1; @@ -800,11 +800,11 @@ void _n_fq_reduce2( #define N_FQ_MUL_ITCH 4 FLINT_FORCE_INLINE void _n_fq_mul( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length 4d */ + ulong * t) /* length 4d */ { slong d = fq_nmod_ctx_degree(ctx); _n_fq_mul2(t, b, c, ctx); @@ -813,12 +813,12 @@ void _n_fq_mul( FLINT_FORCE_INLINE void _n_fq_addmul( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ - const mp_limb_t * e, /* length d */ + ulong * a, /* length d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ + const ulong * e, /* length d */ const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length 4d */ + ulong * t) /* length 4d */ { slong d = fq_nmod_ctx_degree(ctx); _n_fq_mul2(t, c, e, ctx); @@ -830,105 +830,105 @@ void _n_fq_addmul( int _n_fq_dot_lazy_size(slong len, const fq_nmod_ctx_t ctx); void _n_fq_reduce2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ + ulong * a, /* length 6d, 2d used */ slong d, nmod_t ctx); void _n_fq_madd2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 2d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); void _n_fq_mul2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 2d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); void _n_fq_reduce2_lazy2( - mp_limb_t * a, /* length 6d, 4d used */ + ulong * a, /* length 6d, 4d used */ slong d, nmod_t ctx); void _n_fq_madd2_lazy2( - mp_limb_t * a, /* length 6d, 4d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 4d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); void _n_fq_mul2_lazy2( - mp_limb_t * a, /* length 6d, 4d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 4d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); void _n_fq_reduce2_lazy3( - mp_limb_t * a, /* length 6d */ + ulong * a, /* length 6d */ slong d, nmod_t ctx); void _n_fq_madd2_lazy3( - mp_limb_t * a, /* length 6d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); void _n_fq_mul2_lazy3( - mp_limb_t * a, /* length 6d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d); #define N_FQ_INV_ITCH 1 void _n_fq_inv( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_ctx_t ctx, - mp_limb_t * t); + ulong * t); #define N_FQ_MUL_INV_ITCH FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_INV_ITCH) void _n_fq_pow_ui( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, ulong e, const fq_nmod_ctx_t ctx); void n_fq_pow_fmpz( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fmpz_t e, const fq_nmod_ctx_t ctx); void n_fq_mul( - mp_limb_t * a, - const mp_limb_t * b, - const mp_limb_t * c, + ulong * a, + const ulong * b, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_mul_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t c, const fq_nmod_ctx_t ctx); void n_fq_addmul( - mp_limb_t * a, - const mp_limb_t * b, - const mp_limb_t * c, - const mp_limb_t * d, + ulong * a, + const ulong * b, + const ulong * c, + const ulong * d, const fq_nmod_ctx_t ctx); void n_fq_inv( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_ctx_t ctx); void n_fq_pow_ui( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, ulong e, const fq_nmod_ctx_t ctx); @@ -1016,7 +1016,7 @@ void n_fq_poly_make_monic( const fq_nmod_ctx_t ctx); void n_fq_poly_get_coeff_n_fq( - mp_limb_t * c, + ulong * c, const n_poly_t A, slong e, const fq_nmod_ctx_t ctx); @@ -1030,7 +1030,7 @@ void n_fq_poly_get_coeff_fq_nmod( void n_fq_poly_set_coeff_n_fq( n_poly_t A, slong j, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_poly_set_coeff_fq_nmod( @@ -1042,7 +1042,7 @@ void n_fq_poly_set_coeff_fq_nmod( void n_fq_poly_scalar_mul_n_fq( n_poly_t A, const n_poly_t B, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_poly_scalar_mul_ui( @@ -1055,13 +1055,13 @@ void n_fq_poly_scalar_addmul_n_fq( n_fq_poly_t A, const n_fq_poly_t B, const n_fq_poly_t C, - const mp_limb_t * d, + const ulong * d, const fq_nmod_ctx_t ctx); void n_fq_poly_shift_left_scalar_submul( n_poly_t A, slong k, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_poly_evaluate_fq_nmod( @@ -1071,9 +1071,9 @@ void n_fq_poly_evaluate_fq_nmod( const fq_nmod_ctx_t ctx); void n_fq_poly_evaluate_n_fq( - mp_limb_t * e, + ulong * e, const n_poly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_poly_get_fq_nmod_poly( @@ -1088,7 +1088,7 @@ void n_fq_poly_set_fq_nmod_poly( void n_fq_poly_set_n_fq( n_poly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_poly_set_fq_nmod( @@ -1137,9 +1137,9 @@ void n_fq_poly_add_si( const fq_nmod_ctx_t ctx); void _n_fq_poly_mul_( - mp_limb_t * A, - const mp_limb_t * B, slong Blen, - const mp_limb_t * C, slong Clen, + ulong * A, + const ulong * B, slong Blen, + const ulong * C, slong Clen, const fq_nmod_ctx_t ctx, n_poly_stack_t St); @@ -1242,7 +1242,7 @@ void n_fq_poly_inv_series( const fq_nmod_ctx_t ctx); void n_fq_poly_eval_pow( - mp_limb_t * ev, + ulong * ev, const n_fq_poly_t A, n_fq_poly_t alphapow, const fq_nmod_ctx_t ctx); @@ -1311,16 +1311,16 @@ void n_bpoly_one(n_bpoly_t A); int n_bpoly_equal(const n_bpoly_t A, const n_bpoly_t B); -void n_bpoly_set_coeff(n_bpoly_t A, slong e0, slong e1, mp_limb_t c); +void n_bpoly_set_coeff(n_bpoly_t A, slong e0, slong e1, ulong c); void n_bpoly_set_coeff_nonzero(n_bpoly_t A, slong e0, slong e1, - mp_limb_t c); + ulong c); void n_bpoly_mod_derivative_gen0(n_bpoly_t A, const n_bpoly_t B, nmod_t ctx); FLINT_FORCE_INLINE -mp_limb_t n_bpoly_get_coeff(const n_bpoly_t A, slong e0, slong e1) +ulong n_bpoly_get_coeff(const n_bpoly_t A, slong e0, slong e1) { if (e0 >= A->length) return 0; @@ -1354,7 +1354,7 @@ ulong n_bpoly_bidegree(const n_bpoly_t A) return (x << (FLINT_BITS/2)) + y; } -void n_bpoly_scalar_mul_nmod(n_bpoly_t A, mp_limb_t c, nmod_t ctx); +void n_bpoly_scalar_mul_nmod(n_bpoly_t A, ulong c, nmod_t ctx); void n_bpoly_mod_content_last(n_poly_t g, const n_bpoly_t A, nmod_t ctx); @@ -1364,9 +1364,9 @@ void n_bpoly_mod_mul_last(n_bpoly_t A, const n_poly_t b, nmod_t ctx); void n_bpoly_mod_taylor_shift_gen1(n_bpoly_t A, const n_bpoly_t B, - mp_limb_t c, nmod_t ctx); + ulong c, nmod_t ctx); -void n_bpoly_mod_taylor_shift_gen0(n_bpoly_t A, mp_limb_t c, +void n_bpoly_mod_taylor_shift_gen0(n_bpoly_t A, ulong c, nmod_t ctx); void n_bpoly_mod_add(n_bpoly_t A, const n_bpoly_t B, @@ -1393,7 +1393,7 @@ void n_bpoly_mod_interp_reduce_2sm_poly(n_poly_t Ap, n_poly_t Am, const n_bpoly_t A, n_poly_t alphapow, nmod_t mod); void n_bpoly_mod_interp_lift_2sm_poly(slong * deg1, n_bpoly_t T, - const n_poly_t A, const n_poly_t B, mp_limb_t alpha, nmod_t mod); + const n_poly_t A, const n_poly_t B, ulong alpha, nmod_t mod); int n_bpoly_mod_interp_crt_2sm_poly(slong * deg1, n_bpoly_t F, n_bpoly_t T, n_poly_t A, n_poly_t B, const n_poly_t modulus, @@ -1415,7 +1415,7 @@ int n_fq_bpoly_equal( const fq_nmod_ctx_t ctx); void n_fq_bpoly_get_coeff_n_fq( - mp_limb_t * c, + ulong * c, const n_bpoly_t A, slong e0, slong e1, @@ -1425,7 +1425,7 @@ void n_fq_bpoly_set_coeff_n_fq( n_fq_bpoly_t A, slong e0, slong e1, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_bpoly_get_coeff_fq_nmod( @@ -1457,7 +1457,7 @@ void n_fq_bpoly_derivative_gen0( void n_fq_bpoly_scalar_mul_n_fq( n_fq_bpoly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx); void n_fq_bpoly_taylor_shift_gen1_fq_nmod( @@ -1473,7 +1473,7 @@ void n_fq_bpoly_taylor_shift_gen0_fq_nmod( void n_fq_bpoly_taylor_shift_gen0_n_fq( n_fq_bpoly_t A, - const mp_limb_t * alpha, + const ulong * alpha, const fq_nmod_ctx_t ctx); int n_fq_bpoly_gcd_brown_smprime( @@ -1559,20 +1559,20 @@ void n_polyu3_degrees(slong * deg0, slong * deg1, slong * deg2, /*****************************************************************************/ -void nmod_pow_cache_start(mp_limb_t b, n_poly_t pos_direct, n_poly_t pos_bin, +void nmod_pow_cache_start(ulong b, n_poly_t pos_direct, n_poly_t pos_bin, n_poly_t neg_direct); -mp_limb_t nmod_pow_cache_mulpow_ui(mp_limb_t a, ulong e, n_poly_t pos_direct, +ulong nmod_pow_cache_mulpow_ui(ulong a, ulong e, n_poly_t pos_direct, n_poly_t pos_bin, n_poly_t neg_direct, nmod_t ctx); -mp_limb_t nmod_pow_cache_mulpow_neg_ui(mp_limb_t a, ulong e, n_poly_t pos_direct, +ulong nmod_pow_cache_mulpow_neg_ui(ulong a, ulong e, n_poly_t pos_direct, n_poly_t pos_bin, n_poly_t neg_direct, nmod_t ctx); -mp_limb_t nmod_pow_cache_mulpow_fmpz(mp_limb_t a, const fmpz_t e, n_poly_t pos_direct, +ulong nmod_pow_cache_mulpow_fmpz(ulong a, const fmpz_t e, n_poly_t pos_direct, n_poly_t pos_bin, n_poly_t neg_direct, nmod_t ctx); void n_fq_pow_cache_start_n_fq( - const mp_limb_t * b, + const ulong * b, n_poly_t pos_direct, n_poly_t pos_bin, n_poly_t neg_direct, @@ -1586,8 +1586,8 @@ void n_fq_pow_cache_start_fq_nmod( const fq_nmod_ctx_t ctx); void n_fq_pow_cache_mulpow_ui( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, ulong e, n_poly_t pos_direct, n_poly_t pos_bin, @@ -1595,8 +1595,8 @@ void n_fq_pow_cache_mulpow_ui( const fq_nmod_ctx_t ctx); void n_fq_pow_cache_mulpow_neg_ui( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, ulong e, n_poly_t pos_direct, n_poly_t pos_bin, @@ -1604,8 +1604,8 @@ void n_fq_pow_cache_mulpow_neg_ui( const fq_nmod_ctx_t ctx); void n_fq_pow_cache_mulpow_fmpz( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, const fmpz_t e, n_poly_t pos_direct, n_poly_t pos_bin, @@ -1757,7 +1757,7 @@ ulong n_polyu1n_bidegree(n_polyun_t A) /*****************************************************************************/ -void n_fq_poly_product_roots_n_fq(n_poly_t M, const mp_limb_t * H, +void n_fq_poly_product_roots_n_fq(n_poly_t M, const ulong * H, slong length, const fq_nmod_ctx_t ctx, n_poly_stack_t St); slong n_polyun_product_roots(n_polyun_t M, const n_polyun_t H, @@ -1766,33 +1766,33 @@ slong n_polyun_product_roots(n_polyun_t M, const n_polyun_t H, slong n_fq_polyun_product_roots(n_fq_polyun_t M, const n_fq_polyun_t H, const fq_nmod_ctx_t ctx, n_poly_stack_t St); -mp_limb_t _nmod_zip_eval_step(mp_limb_t * cur, - const mp_limb_t * inc, const mp_limb_t * coeffs, +ulong _nmod_zip_eval_step(ulong * cur, + const ulong * inc, const ulong * coeffs, slong length, nmod_t ctx); -void _n_fq_zip_eval_step(mp_limb_t * res, mp_limb_t * cur, - const mp_limb_t * inc, const mp_limb_t * coeffs, +void _n_fq_zip_eval_step(ulong * res, ulong * cur, + const ulong * inc, const ulong * coeffs, slong length, const fq_nmod_ctx_t ctx); -void _n_fqp_zip_eval_step(mp_limb_t * res, mp_limb_t * cur, - const mp_limb_t * inc, const mp_limb_t * coeffs, +void _n_fqp_zip_eval_step(ulong * res, ulong * cur, + const ulong * inc, const ulong * coeffs, slong length, slong d, nmod_t mod); -int _nmod_zip_vand_solve(mp_limb_t * coeffs, - const mp_limb_t * monomials, slong mlength, - const mp_limb_t * evals, slong elength, - const mp_limb_t * master, mp_limb_t * scratch, nmod_t ctx); +int _nmod_zip_vand_solve(ulong * coeffs, + const ulong * monomials, slong mlength, + const ulong * evals, slong elength, + const ulong * master, ulong * scratch, nmod_t ctx); -int _n_fq_zip_vand_solve(mp_limb_t * coeffs, - const mp_limb_t * monomials, slong mlength, - const mp_limb_t * evals, slong elength, - const mp_limb_t * master, mp_limb_t * scratch, +int _n_fq_zip_vand_solve(ulong * coeffs, + const ulong * monomials, slong mlength, + const ulong * evals, slong elength, + const ulong * master, ulong * scratch, const fq_nmod_ctx_t ctx); -int _n_fqp_zip_vand_solve(mp_limb_t * coeffs, - const mp_limb_t * monomials, slong mlength, - const mp_limb_t * evals, slong elength, - const mp_limb_t * master, mp_limb_t * scratch, +int _n_fqp_zip_vand_solve(ulong * coeffs, + const ulong * monomials, slong mlength, + const ulong * evals, slong elength, + const ulong * master, ulong * scratch, const fq_nmod_ctx_t ctx); /*****************************************************************************/ @@ -1804,7 +1804,7 @@ void n_poly_stack_clear(n_poly_stack_t S); n_poly_struct ** n_poly_stack_fit_request(n_poly_stack_t S, slong k); FLINT_FORCE_INLINE -mp_limb_t * n_poly_stack_vec_init(n_poly_stack_t S, slong len) +ulong * n_poly_stack_vec_init(n_poly_stack_t S, slong len) { n_poly_struct * poly_top; poly_top = n_poly_stack_fit_request(S, 1)[0]; diff --git a/src/n_poly/io.c b/src/n_poly/io.c index daee39a285..16fba0ac09 100644 --- a/src/n_poly/io.c +++ b/src/n_poly/io.c @@ -97,7 +97,7 @@ void n_bpoly_print_pretty( } char * n_fq_get_str_pretty( - const mp_limb_t * a, + const ulong * a, const fq_nmod_ctx_t ctx) { char * s; @@ -111,7 +111,7 @@ char * n_fq_get_str_pretty( int n_fq_fprint_pretty( FILE * file, - const mp_limb_t * a, + const ulong * a, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -144,7 +144,7 @@ int n_fq_fprint_pretty( return 1; } -void n_fq_print_pretty(const mp_limb_t * a, const fq_nmod_ctx_t ctx) { n_fq_fprint_pretty(stdout, a, ctx); } +void n_fq_print_pretty(const ulong * a, const fq_nmod_ctx_t ctx) { n_fq_fprint_pretty(stdout, a, ctx); } void n_poly_print_pretty(const n_poly_t A, const char * x) { diff --git a/src/n_poly/n_bpoly.c b/src/n_poly/n_bpoly.c index e8cdfc721e..c4c7a672ea 100644 --- a/src/n_poly/n_bpoly.c +++ b/src/n_poly/n_bpoly.c @@ -94,7 +94,7 @@ void _n_bpoly_set(n_bpoly_t A, const n_bpoly_t B) n_poly_set(A->coeffs + i, B->coeffs + i); } -void n_bpoly_set_coeff_nonzero(n_bpoly_t A, slong xi, slong yi, mp_limb_t c) +void n_bpoly_set_coeff_nonzero(n_bpoly_t A, slong xi, slong yi, ulong c) { slong i; @@ -112,7 +112,7 @@ void n_bpoly_set_coeff_nonzero(n_bpoly_t A, slong xi, slong yi, mp_limb_t c) FLINT_ASSERT(!n_poly_is_zero(A->coeffs + A->length - 1)); } -void n_bpoly_set_coeff(n_bpoly_t A, slong xi, slong yi, mp_limb_t c) +void n_bpoly_set_coeff(n_bpoly_t A, slong xi, slong yi, ulong c) { slong i; diff --git a/src/n_poly/n_bpoly_mod.c b/src/n_poly/n_bpoly_mod.c index db0261acc0..f0d0d1e98d 100644 --- a/src/n_poly/n_bpoly_mod.c +++ b/src/n_poly/n_bpoly_mod.c @@ -31,7 +31,7 @@ int n_bpoly_mod_is_canonical(const n_bpoly_t A, nmod_t mod) } -void n_bpoly_scalar_mul_nmod(n_bpoly_t A, mp_limb_t c, nmod_t ctx) +void n_bpoly_scalar_mul_nmod(n_bpoly_t A, ulong c, nmod_t ctx) { slong i; diff --git a/src/n_poly/n_bpoly_mod_gcd.c b/src/n_poly/n_bpoly_mod_gcd.c index edcffa8c4b..e3ca3f0e72 100644 --- a/src/n_poly/n_bpoly_mod_gcd.c +++ b/src/n_poly/n_bpoly_mod_gcd.c @@ -21,7 +21,7 @@ void n_bpoly_mod_interp_reduce_2sm_poly( { slong i, Alen = A->length; const n_poly_struct * Ac = A->coeffs; - mp_limb_t * Apc, * Amc; + ulong * Apc, * Amc; n_poly_fit_length(Ap, Alen); n_poly_fit_length(Am, Alen); @@ -43,20 +43,20 @@ void n_bpoly_mod_interp_lift_2sm_poly( n_bpoly_t T, const n_poly_t A, const n_poly_t B, - mp_limb_t alpha, + ulong alpha, nmod_t mod) { slong i; slong lastlength = 0; - const mp_limb_t * Acoeffs = A->coeffs; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Acoeffs = A->coeffs; + const ulong * Bcoeffs = B->coeffs; n_poly_struct * Tcoeffs; slong Alen = A->length; slong Blen = B->length; slong Tlen = FLINT_MAX(Alen, Blen); - mp_limb_t d0 = (1 + mod.n)/2; - mp_limb_t d1 = nmod_inv(nmod_add(alpha, alpha, mod), mod); - mp_limb_t Avalue, Bvalue, u, v; + ulong d0 = (1 + mod.n)/2; + ulong d1 = nmod_inv(nmod_add(alpha, alpha, mod), mod); + ulong Avalue, Bvalue, u, v; n_bpoly_fit_length(T, Tlen); @@ -107,11 +107,11 @@ int n_bpoly_mod_interp_crt_2sm_poly( slong Flen = F->length; slong Tlen = FLINT_MAX(FLINT_MAX(Alen, Blen), Flen); n_poly_struct * Tcoeffs, * Fcoeffs; - mp_limb_t * Acoeffs, * Bcoeffs; + ulong * Acoeffs, * Bcoeffs; n_poly_t zero; - mp_limb_t Avalue, Bvalue, FvalueA, FvalueB, u, v; + ulong Avalue, Bvalue, FvalueA, FvalueB, u, v; n_poly_struct * Fvalue; - mp_limb_t alpha = alphapow->coeffs[1]; + ulong alpha = alphapow->coeffs[1]; zero->alloc = 0; zero->length = 0; @@ -170,7 +170,7 @@ int n_bpoly_mod_gcd_brown_smprime( { int success; slong bound; - mp_limb_t alpha, temp, gammaevalp, gammaevalm; + ulong alpha, temp, gammaevalp, gammaevalm; n_poly_struct * Aevalp, * Bevalp, * Gevalp, * Abarevalp, * Bbarevalp; n_poly_struct * Aevalm, * Bevalm, * Gevalm, * Abarevalm, * Bbarevalm; n_bpoly_struct * T; diff --git a/src/n_poly/n_fq.c b/src/n_poly/n_fq.c index 43a05e938a..99a4a31592 100644 --- a/src/n_poly/n_fq.c +++ b/src/n_poly/n_fq.c @@ -9,20 +9,21 @@ (at your option) any later version. See . */ +#include #include "nmod.h" #include "fq_nmod.h" #include "n_poly.h" #define MAC(h, m, l, a, b) \ { \ - mp_limb_t p1, p0; \ + ulong p1, p0; \ umul_ppmm(p1, p0, a, b); \ add_sssaaaaaa(h, m, l, h, m, l, 0, p1, p0); \ } #define MAC3(h, m, l, a, b) \ { \ - mp_limb_t p1, p0; \ + ulong p1, p0; \ umul_ppmm(p1, p0, a, b); \ add_sssaaaaaa(h, m, l, h, m, l, 0, p1, p0); \ } @@ -30,13 +31,13 @@ #define MAC2(h, l, a, b) \ { \ - mp_limb_t p1, p0; \ + ulong p1, p0; \ umul_ppmm(p1, p0, a, b); \ add_ssaaaa(h, l, h, l, p1, p0); \ } void n_fq_randtest_not_zero( - mp_limb_t * a, + ulong * a, flint_rand_t state, const fq_nmod_ctx_t ctx) { @@ -50,7 +51,7 @@ void n_fq_randtest_not_zero( void n_fq_get_fq_nmod( fq_nmod_t a, - const mp_limb_t * b, + const ulong * b, const fq_nmod_ctx_t ctx) { slong i; @@ -66,7 +67,7 @@ void n_fq_get_fq_nmod( } void n_fq_set_fq_nmod( - mp_limb_t * a, + ulong * a, const fq_nmod_t b, const fq_nmod_ctx_t ctx) { @@ -80,7 +81,7 @@ void n_fq_set_fq_nmod( void n_fq_get_n_poly( n_poly_t a, - const mp_limb_t * b, + const ulong * b, const fq_nmod_ctx_t ctx) { slong i; @@ -96,8 +97,8 @@ void n_fq_get_n_poly( } void _n_fq_set_n_poly( - mp_limb_t * a, - const mp_limb_t * bcoeffs, slong blen, + ulong * a, + const ulong * bcoeffs, slong blen, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -118,7 +119,7 @@ void _n_fq_set_n_poly( void n_fq_gen( - mp_limb_t * a, + ulong * a, const fq_nmod_ctx_t ctx) { slong i, d = fq_nmod_ctx_degree(ctx); @@ -137,8 +138,8 @@ void n_fq_gen( } void n_fq_add_si( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, slong c, const fq_nmod_ctx_t ctx) { @@ -164,7 +165,7 @@ void n_fq_add_si( } int n_fq_equal_fq_nmod( - const mp_limb_t * a, + const ulong * a, const fq_nmod_t b, const fq_nmod_ctx_t ctx) { @@ -172,7 +173,7 @@ int n_fq_equal_fq_nmod( FLINT_ASSERT(b->length <= d); for (i = 0; i < d; i++) { - mp_limb_t c = (i >= b->length) ? 0 : b->coeffs[i]; + ulong c = (i >= b->length) ? 0 : b->coeffs[i]; if (a[i] != c) return 0; } @@ -180,8 +181,8 @@ int n_fq_equal_fq_nmod( } void n_fq_add_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t c, const fq_nmod_ctx_t ctx) { @@ -201,8 +202,8 @@ void n_fq_add_fq_nmod( void n_fq_sub_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t c, const fq_nmod_ctx_t ctx) { @@ -222,11 +223,11 @@ void n_fq_sub_fq_nmod( void _n_fq_reduce( - mp_limb_t * a, - mp_limb_t * b, + ulong * a, + ulong * b, slong blen, const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length 2d */ + ulong * t) /* length 2d */ { slong i, j, k, deg = ctx->modulus->length - 1; slong d = ctx->j[ctx->len - 1]; @@ -269,13 +270,13 @@ void _n_fq_reduce( ctx->inv->coeffs, ctx->inv->length, ctx->mod); */ - mp_limb_t * Q = t; - mp_limb_t * R = a; - const mp_limb_t * A = b; + ulong * Q = t; + ulong * R = a; + const ulong * A = b; slong lenA = blen; - const mp_limb_t * B = ctx->modulus->coeffs; + const ulong * B = ctx->modulus->coeffs; slong lenB = deg + 1; - const mp_limb_t * Binv = ctx->inv->coeffs; + const ulong * Binv = ctx->inv->coeffs; slong lenBinv = ctx->inv->length; const slong lenQ = lenA - lenB + 1; @@ -293,7 +294,7 @@ void _n_fq_reduce( { for (i = 0; i < lenQ; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; j = FLINT_MAX(0, i - lenBinv + 1); umul_ppmm(t1, t0, A[lenA - 1 - j], Binv[i - j]); for (j++; j <= i; j++) @@ -303,7 +304,7 @@ void _n_fq_reduce( for (i = 0; i < deg; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; for (j = FLINT_MAX(0, i - lenQ + 1); j <= i; j++) MAC(t2, t1, t0, B[j], Q[i - j]); NMOD_RED3(t0, t2, t1, t0, ctx->mod); @@ -312,7 +313,7 @@ void _n_fq_reduce( } else { - mp_ptr Arev = t + d; + nn_ptr Arev = t + d; _nmod_poly_reverse(Arev, A + (lenA - lenQ), lenQ, lenQ); _nmod_poly_mullow(Q, Arev, lenQ, Binv, FLINT_MIN(lenQ, lenBinv), lenQ, ctx->mod); _nmod_poly_reverse(Q, Q, lenQ, lenQ); @@ -325,11 +326,11 @@ void _n_fq_reduce( } void _n_fq_madd2( - mp_limb_t * a, /* length 2d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 2d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length 2d */ + ulong * t) /* length 2d */ { slong d = ctx->modulus->length - 1; FLINT_ASSERT(d > 0); @@ -338,8 +339,8 @@ void _n_fq_madd2( slong i, j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; - mp_limb_t s2 = 0, s1 = 0, s0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; + ulong s2 = 0, s1 = 0, s0 = 0; umul_ppmm(t1, t0, b[i], c[0]); umul_ppmm(s1, s0, b[d - 1], c[d - 1 - i]); @@ -356,7 +357,7 @@ void _n_fq_madd2( } { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; umul_ppmm(t1, t0, b[d - 1], c[0]); add_ssaaaa(t1, t0, t1, t0, 0, a[d - 1]); for (j = 1; j < d; j++) @@ -374,9 +375,9 @@ void _n_fq_madd2( } void _n_fq_mul_ui( - mp_limb_t * a, /* length d */ - const mp_limb_t * b, /* length d */ - mp_limb_t c, + ulong * a, /* length d */ + const ulong * b, /* length d */ + ulong c, slong d, nmod_t mod) { @@ -386,9 +387,9 @@ void _n_fq_mul_ui( } void _n_fq_mul2( - mp_limb_t * a, /* length 2d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 2d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -398,8 +399,8 @@ void _n_fq_mul2( slong i, j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; - mp_limb_t s2 = 0, s1 = 0, s0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; + ulong s2 = 0, s1 = 0, s0 = 0; umul_ppmm(t1, t0, b[i], c[0]); umul_ppmm(s1, s0, b[d - 1], c[d - 1 - i]); for (j = 1; j <= i; j++) @@ -412,7 +413,7 @@ void _n_fq_mul2( } { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; umul_ppmm(t1, t0, b[d - 1], c[0]); for (j = 1; j < d; j++) { @@ -435,7 +436,7 @@ int _n_fq_dot_lazy_size( { ulong t[4]; slong d = fq_nmod_ctx_degree(ctx); - mp_limb_t p = ctx->mod.n; + ulong p = ctx->mod.n; if (d > 30 || p < 2 || len < 0) return 0; @@ -455,7 +456,7 @@ int _n_fq_dot_lazy_size( void _n_fq_reduce2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ + ulong * a, /* length 6d, 2d used */ slong d, nmod_t ctx) { @@ -465,17 +466,17 @@ void _n_fq_reduce2_lazy1( } void _n_fq_madd2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 2d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i, j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t0 = 0; - mp_limb_t s0 = 0; + ulong t0 = 0; + ulong s0 = 0; t0 = a[i + 0]; s0 = a[(2*d - 2 - i) + 0]; t0 += b[i]*c[0]; @@ -490,7 +491,7 @@ void _n_fq_madd2_lazy1( } { - mp_limb_t t0 = 0; + ulong t0 = 0; t0 = a[(d - 1) + 0]; t0 += b[d - 1]*c[0]; for (j = 1; j < d; j++) @@ -503,17 +504,17 @@ void _n_fq_madd2_lazy1( void _n_fq_mul2_lazy1( - mp_limb_t * a, /* length 6d, 2d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 2d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i,j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t0 = 0; - mp_limb_t s0 = 0; + ulong t0 = 0; + ulong s0 = 0; t0 = b[i]*c[0]; s0 = b[d - 1]*c[d - 1 - i]; for (j = 1; j <= i; j++) @@ -526,7 +527,7 @@ void _n_fq_mul2_lazy1( } { - mp_limb_t t0 = 0; + ulong t0 = 0; t0 = b[d - 1]*c[0]; for (j = 1; j < d; j++) { @@ -538,7 +539,7 @@ void _n_fq_mul2_lazy1( void _n_fq_reduce2_lazy2( - mp_limb_t * a, /* length 6d, 4d used */ + ulong * a, /* length 6d, 4d used */ slong d, nmod_t ctx) { @@ -548,17 +549,17 @@ void _n_fq_reduce2_lazy2( } void _n_fq_madd2_lazy2( - mp_limb_t * a, /* length 6d, 4d used */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d, 4d used */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i,j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t1 = 0, t0 = 0; - mp_limb_t s1 = 0, s0 = 0; + ulong t1 = 0, t0 = 0; + ulong s1 = 0, s0 = 0; t0 = a[2*i + 0]; t1 = a[2*i + 1]; s0 = a[2*(2*d - 2 - i) + 0]; @@ -577,7 +578,7 @@ void _n_fq_madd2_lazy2( } { - mp_limb_t t1 = 0, t0 = 0; + ulong t1 = 0, t0 = 0; t0 = a[2*(d - 1) + 0]; t1 = a[2*(d - 1) + 1]; MAC2(t1, t0, b[d - 1], c[0]); @@ -592,17 +593,17 @@ void _n_fq_madd2_lazy2( void _n_fq_mul2_lazy2( - mp_limb_t * a, /* length 6d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i,j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t1 = 0, t0 = 0; - mp_limb_t s1 = 0, s0 = 0; + ulong t1 = 0, t0 = 0; + ulong s1 = 0, s0 = 0; umul_ppmm(t1, t0, b[i], c[0]); umul_ppmm(s1, s0, b[d - 1], c[d - 1 - i]); for (j = 1; j <= i; j++) @@ -617,7 +618,7 @@ void _n_fq_mul2_lazy2( } { - mp_limb_t t1 = 0, t0 = 0; + ulong t1 = 0, t0 = 0; umul_ppmm(t1, t0, b[d - 1], c[0]); for (j = 1; j < d; j++) { @@ -630,7 +631,7 @@ void _n_fq_mul2_lazy2( void _n_fq_reduce2_lazy3( - mp_limb_t * a, /* length 6d */ + ulong * a, /* length 6d */ slong d, nmod_t ctx) { @@ -640,17 +641,17 @@ void _n_fq_reduce2_lazy3( } void _n_fq_madd2_lazy3( - mp_limb_t * a, /* length 6d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i,j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; - mp_limb_t s2 = 0, s1 = 0, s0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; + ulong s2 = 0, s1 = 0, s0 = 0; t0 = a[3*i + 0]; t1 = a[3*i + 1]; t2 = a[3*i + 2]; @@ -673,7 +674,7 @@ void _n_fq_madd2_lazy3( } { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; t0 = a[3*(d - 1) + 0]; t1 = a[3*(d - 1) + 1]; t2 = a[3*(d - 1) + 2]; @@ -690,17 +691,17 @@ void _n_fq_madd2_lazy3( void _n_fq_mul2_lazy3( - mp_limb_t * a, /* length 6d */ - const mp_limb_t * b, /* length d */ - const mp_limb_t * c, /* length d */ + ulong * a, /* length 6d */ + const ulong * b, /* length d */ + const ulong * c, /* length d */ slong d) { slong i,j; for (i = 0; i + 1 < d; i++) { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; - mp_limb_t s2 = 0, s1 = 0, s0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; + ulong s2 = 0, s1 = 0, s0 = 0; umul_ppmm(t1, t0, b[i], c[0]); umul_ppmm(s1, s0, b[d - 1], c[d - 1 - i]); for (j = 1; j <= i; j++) @@ -717,7 +718,7 @@ void _n_fq_mul2_lazy3( } { - mp_limb_t t2 = 0, t1 = 0, t0 = 0; + ulong t2 = 0, t1 = 0, t0 = 0; umul_ppmm(t1, t0, b[d - 1], c[0]); for (j = 1; j < d; j++) { @@ -732,10 +733,10 @@ void _n_fq_mul2_lazy3( /***************************************************************************/ void _n_fq_inv( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_ctx_t ctx, - mp_limb_t * t) /* length d */ + ulong * t) /* length d */ { slong d = ctx->modulus->length - 1; slong blen = d; @@ -768,9 +769,9 @@ void _n_fq_inv( } void n_fq_mul( - mp_limb_t * a, - const mp_limb_t * b, - const mp_limb_t * c, + ulong * a, + const ulong * b, + const ulong * c, const fq_nmod_ctx_t ctx) { fq_nmod_t A, B, C; @@ -787,21 +788,21 @@ void n_fq_mul( } void n_fq_addmul( - mp_limb_t * a, - const mp_limb_t * b, - const mp_limb_t * c, - const mp_limb_t * d, + ulong * a, + const ulong * b, + const ulong * c, + const ulong * d, const fq_nmod_ctx_t ctx) { - mp_limb_t * t = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx), mp_limb_t); + ulong * t = FLINT_ARRAY_ALLOC(fq_nmod_ctx_degree(ctx), ulong); n_fq_mul(t, c, d, ctx); n_fq_add(a, b, t, ctx); flint_free(t); } void n_fq_mul_fq_nmod( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_t C, const fq_nmod_ctx_t ctx) { @@ -816,8 +817,8 @@ void n_fq_mul_fq_nmod( } void n_fq_inv( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fq_nmod_ctx_t ctx) { fq_nmod_t A, B; @@ -831,8 +832,8 @@ void n_fq_inv( } void _n_fq_pow_ui( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, ulong e, const fq_nmod_ctx_t ctx) { @@ -847,8 +848,8 @@ void _n_fq_pow_ui( } void n_fq_pow_fmpz( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, const fmpz_t e, const fq_nmod_ctx_t ctx) { @@ -863,8 +864,8 @@ void n_fq_pow_fmpz( } void n_fq_pow_ui( - mp_limb_t * a, - const mp_limb_t * b, + ulong * a, + const ulong * b, ulong e, const fq_nmod_ctx_t ctx) { @@ -879,7 +880,7 @@ void n_fq_pow_ui( } int n_fq_is_canonical( - const mp_limb_t * a, + const ulong * a, const fq_nmod_ctx_t ctx) { slong i, d = fq_nmod_ctx_degree(ctx); diff --git a/src/n_poly/n_fq_bpoly.c b/src/n_poly/n_fq_bpoly.c index ba7a04d2c8..3f99caa88c 100644 --- a/src/n_poly/n_fq_bpoly.c +++ b/src/n_poly/n_fq_bpoly.c @@ -76,7 +76,7 @@ int n_fq_bpoly_equal( } void n_fq_bpoly_get_coeff_n_fq( - mp_limb_t * c, + ulong * c, const n_bpoly_t A, slong e0, slong e1, @@ -92,7 +92,7 @@ void n_fq_bpoly_set_coeff_n_fq( n_bpoly_t A, slong e0, slong e1, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong i; @@ -192,7 +192,7 @@ void n_fq_bpoly_derivative_gen0( void n_fq_bpoly_scalar_mul_n_fq( n_fq_bpoly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); diff --git a/src/n_poly/n_fq_bpoly_gcd.c b/src/n_poly/n_fq_bpoly_gcd.c index 774d9bc415..f3543a721b 100644 --- a/src/n_poly/n_fq_bpoly_gcd.c +++ b/src/n_poly/n_fq_bpoly_gcd.c @@ -78,17 +78,17 @@ void n_fq_bpoly_mul_last(n_bpoly_t A, const n_poly_t b, const fq_nmod_ctx_t ctx) /*****************************************************************************/ void n_fq_poly_eval2p_pow( - mp_limb_t * vp, - mp_limb_t * vm, + ulong * vp, + ulong * vm, const n_fq_poly_t P, n_poly_t alphapow, slong d, nmod_t ctx) { - const mp_limb_t * Pcoeffs = P->coeffs; + const ulong * Pcoeffs = P->coeffs; slong Plen = P->length; - mp_limb_t * alpha_powers = alphapow->coeffs; - mp_limb_t p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; + ulong * alpha_powers = alphapow->coeffs; + ulong p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; slong i, k; FLINT_ASSERT(P->alloc >= d*Plen); @@ -147,7 +147,7 @@ void n_fq_bpoly_interp_reduce_2psm_poly( slong d = fq_nmod_ctx_degree(ctx); slong i, Alen = A->length; const n_poly_struct * Ac = A->coeffs; - mp_limb_t * Apc, * Amc; + ulong * Apc, * Amc; n_poly_fit_length(Ap, d*Alen); n_poly_fit_length(Am, d*Alen); @@ -171,24 +171,24 @@ void n_fq_bpoly_interp_lift_2psm_poly( n_fq_bpoly_t T, const n_fq_poly_t A, const n_fq_poly_t B, - mp_limb_t alpha, + ulong alpha, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); slong i, j; slong lastlength = 0; - const mp_limb_t * Acoeffs = A->coeffs; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Acoeffs = A->coeffs; + const ulong * Bcoeffs = B->coeffs; n_fq_poly_struct * Tcoeffs; slong Alen = A->length; slong Blen = B->length; slong Tlen = FLINT_MAX(Alen, Blen); - mp_limb_t d0 = (1 + mod.n)/2; - mp_limb_t d1 = nmod_inv(nmod_add(alpha, alpha, mod), mod); - mp_limb_t * u, u1nonzero, u0nonzero; + ulong d0 = (1 + mod.n)/2; + ulong d1 = nmod_inv(nmod_add(alpha, alpha, mod), mod); + ulong * u, u1nonzero, u0nonzero; - u = FLINT_ARRAY_ALLOC(2*d, mp_limb_t); + u = FLINT_ARRAY_ALLOC(2*d, ulong); n_bpoly_fit_length(T, Tlen); @@ -269,15 +269,15 @@ void n_fq_bpoly_interp_lift_2psm_poly( void _n_fq_poly_addmul_plinear( n_fq_poly_t A, - mp_limb_t * Bcoeffs, slong Blen, + ulong * Bcoeffs, slong Blen, const n_poly_t C, - mp_limb_t * s, + ulong * s, slong d, nmod_t mod) { slong i, j; - mp_limb_t * Acoeffs; - mp_limb_t * Ccoeffs = C->coeffs; + ulong * Acoeffs; + ulong * Ccoeffs = C->coeffs; slong Clen = C->length; slong Alen = FLINT_MAX(Blen, Clen + 1); @@ -331,9 +331,9 @@ int n_fq_bpoly_interp_crt_2psm_poly( slong Flen = F->length; slong Tlen = FLINT_MAX(FLINT_MAX(Alen, Blen), Flen); n_fq_poly_struct * Tcoeffs, * Fcoeffs; - mp_limb_t * Acoeffs, * Bcoeffs; - mp_limb_t * u, unonzero; - mp_limb_t malpha = mod.n - alphapow->coeffs[1]; + ulong * Acoeffs, * Bcoeffs; + ulong * u, unonzero; + ulong malpha = mod.n - alphapow->coeffs[1]; n_bpoly_fit_length(T, Tlen); Tcoeffs = T->coeffs; @@ -341,7 +341,7 @@ int n_fq_bpoly_interp_crt_2psm_poly( Bcoeffs = B->coeffs; Fcoeffs = F->coeffs; - u = FLINT_ARRAY_ALLOC(2*d, mp_limb_t); + u = FLINT_ARRAY_ALLOC(2*d, ulong); for (i = 0; i < Tlen; i++) { @@ -359,8 +359,8 @@ int n_fq_bpoly_interp_crt_2psm_poly( unonzero = 0; for (j = 0; j < d; j++) { - mp_limb_t t1 = nmod_sub(u[d*1 + j], u[d*0 + j], mod); - mp_limb_t t0 = nmod_add(u[d*1 + j], u[d*0 + j], mod); + ulong t1 = nmod_sub(u[d*1 + j], u[d*0 + j], mod); + ulong t0 = nmod_add(u[d*1 + j], u[d*0 + j], mod); u[d*1 + j] = t1; unonzero |= u[d*1 + j]; u[d*0 + j] = nmod_mul(malpha, t0, mod); @@ -369,7 +369,7 @@ int n_fq_bpoly_interp_crt_2psm_poly( if (unonzero) { - mp_limb_t * Ficoeffs = i < Flen ? Fcoeffs[i].coeffs : NULL; + ulong * Ficoeffs = i < Flen ? Fcoeffs[i].coeffs : NULL; slong Filen = i < Flen ? Fcoeffs[i].length : 0; _n_fq_poly_addmul_plinear(Tcoeffs + i, Ficoeffs, Filen, modulus, u, d, mod); changed = 1; @@ -423,7 +423,7 @@ int n_fq_bpoly_gcd_brown_smprime2p( nmod_t mod = fq_nmod_ctx_mod(ctx); int success; slong bound; - mp_limb_t alpha, temp, * gammaevalp, * gammaevalm; + ulong alpha, temp, * gammaevalp, * gammaevalm; n_fq_poly_struct * Aevalp, * Bevalp, * Gevalp, * Abarevalp, * Bbarevalp; n_fq_poly_struct * Aevalm, * Bevalm, * Gevalm, * Abarevalm, * Bbarevalm; n_bpoly_struct * T; @@ -442,8 +442,8 @@ int n_fq_bpoly_gcd_brown_smprime2p( FLINT_ASSERT(A->length > 0); FLINT_ASSERT(B->length > 0); - gammaevalp = FLINT_ARRAY_ALLOC(d, mp_limb_t); - gammaevalm = FLINT_ARRAY_ALLOC(d, mp_limb_t); + gammaevalp = FLINT_ARRAY_ALLOC(d, ulong); + gammaevalm = FLINT_ARRAY_ALLOC(d, ulong); n_poly_stack_fit_request(Sp->poly_stack, 12); Aevalp = n_poly_stack_take_top(Sp->poly_stack); @@ -668,7 +668,7 @@ void n_fq_bpoly_interp_reduce_sm_poly( slong d = fq_nmod_ctx_degree(ctx); slong i, Alen = A->length; const n_fq_poly_struct * Ac = A->coeffs; - mp_limb_t * Ec; + ulong * Ec; n_poly_fit_length(E, d*Alen); Ec = E->coeffs; @@ -687,7 +687,7 @@ void n_fq_bpoly_interp_lift_sm_poly( { slong d = fq_nmod_ctx_degree(ctx); slong i; - const mp_limb_t * Acoeffs = A->coeffs; + const ulong * Acoeffs = A->coeffs; n_poly_struct * Tcoeffs; slong Alen = A->length; @@ -720,14 +720,14 @@ int n_fq_bpoly_interp_crt_sm_poly( slong Alen = A->length; slong Flen = F->length; n_fq_poly_struct * Tcoeffs, * Fcoeffs; - mp_limb_t * Acoeffs; - mp_limb_t * u, * v; + ulong * Acoeffs; + ulong * u, * v; FLINT_ASSERT(n_fq_bpoly_is_canonical(F, ctx)); FLINT_ASSERT(n_fq_poly_is_canonical(A, ctx)); - u = FLINT_ARRAY_ALLOC(d, mp_limb_t); - v = FLINT_ARRAY_ALLOC(d, mp_limb_t); + u = FLINT_ARRAY_ALLOC(d, ulong); + v = FLINT_ARRAY_ALLOC(d, ulong); n_fq_bpoly_fit_length(T, FLINT_MAX(Alen, Flen)); Tcoeffs = T->coeffs; @@ -802,7 +802,7 @@ int n_fq_bpoly_gcd_brown_smprime( int success; slong bound; fq_nmod_t alpha; - mp_limb_t * temp, * gammaeval; + ulong * temp, * gammaeval; n_poly_struct * Aeval, * Beval, * Geval, * Abareval, * Bbareval; n_bpoly_struct * T; slong deggamma, ldegG, ldegAbar, ldegBbar, ldegA, ldegB; @@ -873,8 +873,8 @@ int n_fq_bpoly_gcd_brown_smprime( FLINT_ASSERT(B->length > 0); fq_nmod_init(alpha, ctx); - temp = FLINT_ARRAY_ALLOC(d, mp_limb_t); - gammaeval = FLINT_ARRAY_ALLOC(d, mp_limb_t); + temp = FLINT_ARRAY_ALLOC(d, ulong); + gammaeval = FLINT_ARRAY_ALLOC(d, ulong); n_poly_stack_fit_request(Sp->poly_stack, 7); Aeval = n_poly_stack_take_top(Sp->poly_stack); diff --git a/src/n_poly/n_fq_bpoly_taylor_shift.c b/src/n_poly/n_fq_bpoly_taylor_shift.c index b61d4ec8b1..99f38f7cc6 100644 --- a/src/n_poly/n_fq_bpoly_taylor_shift.c +++ b/src/n_poly/n_fq_bpoly_taylor_shift.c @@ -13,14 +13,14 @@ #include "n_poly.h" static void _n_fq_poly_taylor_shift_horner_n_fq( - mp_limb_t * poly, - const mp_limb_t * c, + ulong * poly, + const ulong * c, slong n, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); slong i, j; - mp_limb_t * p = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * p = FLINT_ARRAY_ALLOC(d, ulong); for (i = n - 2; i >= 0; i--) { @@ -43,7 +43,7 @@ void n_fq_bpoly_taylor_shift_gen1_fq_nmod( { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * c = FLINT_ARRAY_ALLOC(d, ulong); n_fq_set_fq_nmod(c, c_, ctx); n_fq_bpoly_set(A, B, ctx); @@ -60,13 +60,13 @@ void n_fq_bpoly_taylor_shift_gen0_fq_nmod( { slong d = fq_nmod_ctx_degree(ctx); slong n, i, j; - mp_limb_t * c; + ulong * c; n_poly_t t; if (fq_nmod_is_zero(alpha, ctx)) return; - c = FLINT_ARRAY_ALLOC(d, mp_limb_t); + c = FLINT_ARRAY_ALLOC(d, ulong); n_fq_set_fq_nmod(c, alpha, ctx); n_poly_init(t); @@ -90,12 +90,12 @@ void n_fq_bpoly_taylor_shift_gen0_fq_nmod( void n_fq_bpoly_taylor_shift_gen0_n_fq( n_fq_bpoly_t A, - const mp_limb_t * alpha, + const ulong * alpha, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); slong i, j, n = A->length; - mp_limb_t * tmp, * c, * alphainv; + ulong * tmp, * c, * alphainv; TMP_INIT; if (_n_fq_is_zero(alpha, d)) @@ -103,9 +103,9 @@ void n_fq_bpoly_taylor_shift_gen0_n_fq( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*N_FQ_MUL_INV_ITCH*sizeof(mp_limb_t)); - c = TMP_ALLOC(d*sizeof(mp_limb_t)); - alphainv = TMP_ALLOC(d*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*N_FQ_MUL_INV_ITCH*sizeof(ulong)); + c = TMP_ALLOC(d*sizeof(ulong)); + alphainv = TMP_ALLOC(d*sizeof(ulong)); _n_fq_one(c, d); for (i = 1; i < n; i++) @@ -113,7 +113,7 @@ void n_fq_bpoly_taylor_shift_gen0_n_fq( _n_fq_mul(c, c, alpha, ctx, tmp); if (!_n_fq_is_one(c, d)) { - mp_limb_t * Aic = A->coeffs[i].coeffs; + ulong * Aic = A->coeffs[i].coeffs; for (j = 0; j < A->coeffs[i].length; j++) _n_fq_mul(Aic + d*j, Aic + d*j, c, ctx, tmp); } @@ -134,7 +134,7 @@ void n_fq_bpoly_taylor_shift_gen0_n_fq( _n_fq_mul(c, c, alphainv, ctx, tmp); if (!_n_fq_is_one(c, d)) { - mp_limb_t * Aic = A->coeffs[i].coeffs; + ulong * Aic = A->coeffs[i].coeffs; for (j = 0; j < A->coeffs[i].length; j++) _n_fq_mul(Aic + d*j, Aic + d*j, c, ctx, tmp); } diff --git a/src/n_poly/n_fq_poly.c b/src/n_poly/n_fq_poly.c index 3429d0b178..d0652d781b 100644 --- a/src/n_poly/n_fq_poly.c +++ b/src/n_poly/n_fq_poly.c @@ -45,7 +45,7 @@ void n_fq_poly_init2( if (alloc > 0) { A->alloc = d*alloc; - A->coeffs = flint_malloc(A->alloc*sizeof(mp_limb_t)); + A->coeffs = flint_malloc(A->alloc*sizeof(ulong)); } else { @@ -95,7 +95,7 @@ int n_fq_poly_is_one(n_poly_t A, const fq_nmod_ctx_t ctx) void n_fq_poly_get_coeff_n_fq( - mp_limb_t * c, + ulong * c, const n_fq_poly_t A, slong e, const fq_nmod_ctx_t ctx) @@ -125,7 +125,7 @@ void n_fq_poly_get_coeff_fq_nmod( void n_fq_poly_set_coeff_n_fq( n_fq_poly_t A, slong j, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -241,7 +241,7 @@ void n_fq_poly_set( void n_fq_poly_set_n_fq( n_poly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); @@ -342,15 +342,15 @@ void n_fq_poly_evaluate_fq_nmod( void n_fq_poly_evaluate_n_fq( - mp_limb_t * e, + ulong * e, const n_poly_t A, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * u = FLINT_ARRAY_ALLOC(d, mp_limb_t); - mp_limb_t * t = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * u = FLINT_ARRAY_ALLOC(d, ulong); + ulong * t = FLINT_ARRAY_ALLOC(d, ulong); _n_fq_zero(t, d); for (i = 0; i < A->length; i++) @@ -367,21 +367,21 @@ void n_fq_poly_evaluate_n_fq( } void n_fq_poly_eval_pow( - mp_limb_t * ev, + ulong * ev, const n_fq_poly_t P, n_fq_poly_t alphapow, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); - const mp_limb_t * Pcoeffs = P->coeffs; + const ulong * Pcoeffs = P->coeffs; slong i, Plen = P->length; - mp_limb_t * alpha_powers = alphapow->coeffs; - mp_limb_t * t; + ulong * alpha_powers = alphapow->coeffs; + ulong * t; slong k; TMP_INIT; TMP_START; - t = TMP_ALLOC(d*FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_LAZY_ITCH)*sizeof(mp_limb_t)); + t = TMP_ALLOC(d*FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_LAZY_ITCH)*sizeof(ulong)); if (Plen > alphapow->length) { @@ -462,7 +462,7 @@ void n_fq_poly_get_fq_nmod_poly( void n_fq_poly_scalar_mul_n_fq( n_poly_t A, const n_poly_t B, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong i, d = fq_nmod_ctx_degree(ctx); @@ -480,7 +480,7 @@ void n_fq_poly_make_monic( { slong d = fq_nmod_ctx_degree(ctx); slong itch = FLINT_MAX(N_FQ_MUL_ITCH, N_FQ_INV_ITCH); - mp_limb_t * tmp, * inv; + ulong * tmp, * inv; slong i, Blen = B->length; if (Blen < 1) @@ -491,7 +491,7 @@ void n_fq_poly_make_monic( n_poly_fit_length(A, d*Blen); - tmp = FLINT_ARRAY_ALLOC(d*(itch + 1), mp_limb_t); + tmp = FLINT_ARRAY_ALLOC(d*(itch + 1), ulong); inv = tmp + d*itch; _n_fq_inv(inv, B->coeffs + d*(Blen - 1), ctx, tmp); @@ -511,17 +511,17 @@ void n_fq_poly_scalar_addmul_n_fq( n_fq_poly_t A, const n_fq_poly_t B, const n_fq_poly_t C, - const mp_limb_t * s, + const ulong * s, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * Acoeffs; - mp_limb_t * Bcoeffs; - mp_limb_t * Ccoeffs; + ulong * Acoeffs; + ulong * Bcoeffs; + ulong * Ccoeffs; slong Blen = B->length; slong Clen = C->length; - mp_limb_t * t; + ulong * t; TMP_INIT; n_poly_fit_length(A, d*FLINT_MAX(Blen, Clen)); @@ -530,7 +530,7 @@ void n_fq_poly_scalar_addmul_n_fq( Ccoeffs = C->coeffs; TMP_START; - t = TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(mp_limb_t)); + t = TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(ulong)); if (Blen > Clen) { @@ -570,14 +570,14 @@ void n_fq_poly_scalar_addmul_n_fq( void n_fq_poly_shift_left_scalar_submul( n_poly_t A, slong k, - const mp_limb_t * c, + const ulong * c, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); - mp_limb_t * Acoeffs; + ulong * Acoeffs; slong i; slong Alen = A->length; - mp_limb_t * u = FLINT_ARRAY_ALLOC(d, mp_limb_t); + ulong * u = FLINT_ARRAY_ALLOC(d, ulong); n_poly_fit_length(A, d*(Alen + k)); diff --git a/src/n_poly/n_fq_poly_divrem.c b/src/n_poly/n_fq_poly_divrem.c index 4e8c1f6055..9d40848672 100644 --- a/src/n_poly/n_fq_poly_divrem.c +++ b/src/n_poly/n_fq_poly_divrem.c @@ -20,21 +20,21 @@ FLINT_MAX(FLINT_MAX(4, N_FQ_MUL_ITCH), 2 + (N_FQ_REDUCE_ITCH)) void _n_fq_poly_rem_basecase_( - mp_limb_t * Q, - mp_limb_t * A, - const mp_limb_t * AA, slong Alen, - const mp_limb_t * B, slong Blen, - const mp_limb_t * invB, + ulong * Q, + ulong * A, + const ulong * AA, slong Alen, + const ulong * B, slong Blen, + const ulong * invB, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { slong i; slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); - mp_limb_t * tmp = n_poly_stack_vec_init(St, d*(3 + N_FQ_POLY_DIVREM_BASECASE_ITCH)); - mp_limb_t * u = tmp + d*N_FQ_POLY_DIVREM_BASECASE_ITCH; - mp_limb_t * q0 = u + d; - mp_limb_t * q1 = q0 + d; + ulong * tmp = n_poly_stack_vec_init(St, d*(3 + N_FQ_POLY_DIVREM_BASECASE_ITCH)); + ulong * u = tmp + d*N_FQ_POLY_DIVREM_BASECASE_ITCH; + ulong * q0 = u + d; + ulong * q1 = q0 + d; if (A != AA) _nmod_vec_set(A, AA, d*Alen); @@ -81,27 +81,27 @@ void _n_fq_poly_rem_basecase_( void _n_fq_poly_divrem_basecase_( - mp_limb_t * Q, - mp_limb_t * A, - const mp_limb_t * AA, slong Alen, - const mp_limb_t * B, slong Blen, - const mp_limb_t * invB, + ulong * Q, + ulong * A, + const ulong * AA, slong Alen, + const ulong * B, slong Blen, + const ulong * invB, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { slong i; slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); - mp_limb_t * tmp = n_poly_stack_vec_init(St, d*(1 + N_FQ_POLY_DIVREM_BASECASE_ITCH)); - mp_limb_t * u = tmp + d*N_FQ_POLY_DIVREM_BASECASE_ITCH; + ulong * tmp = n_poly_stack_vec_init(St, d*(1 + N_FQ_POLY_DIVREM_BASECASE_ITCH)); + ulong * u = tmp + d*N_FQ_POLY_DIVREM_BASECASE_ITCH; if (A != AA) _nmod_vec_set(A, AA, d*Alen); while (Alen - Blen > 3 && Blen > 1) { - mp_limb_t * q1 = Q + d*(Alen - Blen); - mp_limb_t * q0 = Q + d*(Alen - Blen - 1); + ulong * q1 = Q + d*(Alen - Blen); + ulong * q0 = Q + d*(Alen - Blen - 1); _n_fq_mul(q1, A + d*(Alen - 1), invB, ctx, tmp); _n_fq_mul(q0, q1, B + d*(Blen - 2), ctx, tmp); @@ -128,7 +128,7 @@ void _n_fq_poly_divrem_basecase_( while (Alen - Blen >= 0) { - mp_limb_t * q0 = Q + d*(Alen - Blen); + ulong * q0 = Q + d*(Alen - Blen); _n_fq_mul(q0, A + d*(Alen - 1), invB, ctx, tmp); @@ -146,12 +146,12 @@ void _n_fq_poly_divrem_basecase_( } void _n_fq_poly_divrem_divconquer_recursive_( - mp_limb_t * Q, - mp_limb_t * BQ, - mp_limb_t * W, - const mp_limb_t * A, - const mp_limb_t * B, slong lenB, - const mp_limb_t * invB, + ulong * Q, + ulong * BQ, + ulong * W, + const ulong * A, + const ulong * B, slong lenB, + const ulong * invB, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { @@ -171,19 +171,19 @@ void _n_fq_poly_divrem_divconquer_recursive_( { const slong n2 = lenB / 2; const slong n1 = lenB - n2; - mp_limb_t * W1 = W; - mp_limb_t * W2 = W + d*lenB; - const mp_limb_t * p1 = A + d*2*n2; - const mp_limb_t * p2; - const mp_limb_t * d1 = B + d*n2; - const mp_limb_t * d2 = B; - const mp_limb_t * d3 = B + d*n1; - const mp_limb_t * d4 = B; - mp_limb_t * q1 = Q + d*n2; - mp_limb_t * q2 = Q; - mp_limb_t * dq1 = BQ + d*n2; - mp_limb_t * d1q1 = BQ + d*2*n2; - mp_limb_t * d2q1, * d3q2, * d4q2, * t; + ulong * W1 = W; + ulong * W2 = W + d*lenB; + const ulong * p1 = A + d*2*n2; + const ulong * p2; + const ulong * d1 = B + d*n2; + const ulong * d2 = B; + const ulong * d3 = B + d*n1; + const ulong * d4 = B; + ulong * q1 = Q + d*n2; + ulong * q2 = Q; + ulong * dq1 = BQ + d*n2; + ulong * d1q1 = BQ + d*2*n2; + ulong * d2q1, * d3q2, * d4q2, * t; _n_fq_poly_divrem_divconquer_recursive_(q1, d1q1, W1, p1, d1, n1, invB, ctx, St); @@ -210,11 +210,11 @@ void _n_fq_poly_divrem_divconquer_recursive_( } static void __n_fq_poly_divrem_divconquer_( - mp_limb_t * Q, - mp_limb_t * R, - mp_limb_t * A, slong lenA, - mp_limb_t * B, slong lenB, - mp_limb_t * invB, + ulong * Q, + ulong * R, + ulong * A, slong lenA, + ulong * B, slong lenB, + ulong * invB, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { @@ -229,12 +229,12 @@ static void __n_fq_poly_divrem_divconquer_( { const slong n1 = lenA - lenB + 1; const slong n2 = lenB - n1; - const mp_limb_t * p1 = A + d*n2; - const mp_limb_t * d1 = B + d*n2; - const mp_limb_t * d2 = B; - mp_limb_t * W = n_poly_stack_vec_init(St, d*((2*n1 - 1) + lenB - 1)); - mp_limb_t * d1q1 = R + d*n2; - mp_limb_t * d2q1 = W + d*(2*n1 - 1); + const ulong * p1 = A + d*n2; + const ulong * d1 = B + d*n2; + const ulong * d2 = B; + ulong * W = n_poly_stack_vec_init(St, d*((2*n1 - 1) + lenB - 1)); + ulong * d1q1 = R + d*n2; + ulong * d2q1 = W + d*(2*n1 - 1); _n_fq_poly_divrem_divconquer_recursive_(Q, d1q1, W, p1, d1, n1, invB, ctx, St); @@ -248,7 +248,7 @@ static void __n_fq_poly_divrem_divconquer_( } else { - mp_limb_t * W = n_poly_stack_vec_init(St, d*lenA); + ulong * W = n_poly_stack_vec_init(St, d*lenA); _n_fq_poly_divrem_divconquer_recursive_(Q, R, W, A, B, lenB, invB, ctx, St); @@ -260,11 +260,11 @@ static void __n_fq_poly_divrem_divconquer_( void _n_fq_poly_divrem_divconquer_( - mp_limb_t * Q, - mp_limb_t * R, - mp_limb_t * A, slong lenA, - mp_limb_t * B, slong lenB, - mp_limb_t * invB, + ulong * Q, + ulong * R, + ulong * A, slong lenA, + ulong * B, slong lenB, + ulong * invB, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { @@ -277,7 +277,7 @@ void _n_fq_poly_divrem_divconquer_( else { slong shift, n = 2*lenB - 1; - mp_limb_t * QB, * W; + ulong * QB, * W; _nmod_vec_set(R, A, d*lenA); W = n_poly_stack_vec_init(St, d*2*n); @@ -314,9 +314,9 @@ void n_fq_poly_divrem_divconquer_( const slong lenA = A->length; const slong lenB = B->length; const slong lenQ = lenA - lenB + 1; - mp_limb_t * tmp, * invB; + ulong * tmp, * invB; n_poly_t Qt, Rt; - mp_limb_t * q, * r; + ulong * q, * r; #if FLINT_WANT_ASSERT fq_nmod_poly_t QQ, RR, AA, BB; #endif diff --git a/src/n_poly/n_fq_poly_gcd.c b/src/n_poly/n_fq_poly_gcd.c index 66a3343121..3cf1fac41f 100644 --- a/src/n_poly/n_fq_poly_gcd.c +++ b/src/n_poly/n_fq_poly_gcd.c @@ -14,18 +14,18 @@ #include "n_poly.h" slong _n_fq_poly_gcd_euclidean_inplace_( - mp_limb_t * A, slong Alen, - mp_limb_t * B, slong Blen, + ulong * A, slong Alen, + ulong * B, slong Blen, const fq_nmod_ctx_t ctx, - mp_limb_t * tmp) + ulong * tmp) { slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); slong i; - mp_limb_t * u = tmp; - mp_limb_t * q0 = u + d; - mp_limb_t * q1 = q0 + d; - mp_limb_t * t = q1 + d; + ulong * u = tmp; + ulong * q0 = u + d; + ulong * q1 = q0 + d; + ulong * t = q1 + d; again: @@ -151,7 +151,7 @@ void n_fq_poly_gcd_( { slong d = fq_nmod_ctx_degree(ctx); slong n; - mp_limb_t * a, * b, * t; + ulong * a, * b, * t; #if FLINT_WANT_ASSERT fq_nmod_poly_t GG, AA, BB; fq_nmod_poly_init(GG, ctx); diff --git a/src/n_poly/n_fq_poly_mul.c b/src/n_poly/n_fq_poly_mul.c index c0d8833a54..4f7005ada3 100644 --- a/src/n_poly/n_fq_poly_mul.c +++ b/src/n_poly/n_fq_poly_mul.c @@ -17,15 +17,15 @@ #endif void _n_fq_poly_mul_( - mp_limb_t * A, /* length d*(Blen + Clen - 1) */ - const mp_limb_t * B, slong Blen, - const mp_limb_t * C, slong Clen, + ulong * A, /* length d*(Blen + Clen - 1) */ + const ulong * B, slong Blen, + const ulong * C, slong Clen, const fq_nmod_ctx_t ctx, n_poly_stack_t St) { slong d = fq_nmod_ctx_degree(ctx); slong Alen = Blen + Clen - 1; - mp_limb_t * tmp, * u; + ulong * tmp, * u; slong i, j; FLINT_ASSERT(Blen > 0); @@ -62,7 +62,7 @@ void _n_fq_poly_mul_( } else { - mp_limb_t * xA, * xB, * xC; + ulong * xA, * xB, * xC; slong xAlen, xBlen, xClen, start; xBlen = (2*d-1)*Blen; diff --git a/src/n_poly/n_fq_poly_mullow.c b/src/n_poly/n_fq_poly_mullow.c index 5a3ccc9e3e..5ad647d736 100644 --- a/src/n_poly/n_fq_poly_mullow.c +++ b/src/n_poly/n_fq_poly_mullow.c @@ -18,9 +18,9 @@ #endif void _n_fq_poly_mullow_( - mp_limb_t * rop, - const mp_limb_t * op1, slong len1, - const mp_limb_t * op2, slong len2, + ulong * rop, + const ulong * op1, slong len1, + const ulong * op2, slong len2, slong n, const fq_nmod_ctx_t ctx, n_poly_stack_t St) @@ -35,8 +35,8 @@ void _n_fq_poly_mullow_( const slong clen1 = pfqlen*len1; const slong clen2 = pfqlen*len2; slong i; - mp_limb_t * tmp; - mp_ptr cop1, cop2, crop; + ulong * tmp; + nn_ptr cop1, cop2, crop; if (len1 < 1 || len2 < 1) { diff --git a/src/n_poly/n_fq_pow_cache.c b/src/n_poly/n_fq_pow_cache.c index 07db984296..196455514d 100644 --- a/src/n_poly/n_fq_pow_cache.c +++ b/src/n_poly/n_fq_pow_cache.c @@ -15,7 +15,7 @@ /* hold positive and negative powers of b */ void n_fq_pow_cache_start_n_fq( - const mp_limb_t * b, + const ulong * b, n_poly_t pos, /* b^0, b^1, b^2, ..., b^50 */ n_poly_t bin, /* b^1, b^2, b^3, b^4, b^8, b^12, ... */ n_poly_t neg, /* b^-0, b^-1, b^-2, ..., b^-50 */ @@ -48,18 +48,18 @@ void n_fq_pow_cache_start_fq_nmod( /* r = a*b^e */ static void n_fq_pow_cache_mulpow_ui_array_bin( - mp_limb_t * r, - const mp_limb_t * a, - mp_limb_t * elimbs, slong elen, + ulong * r, + const ulong * a, + ulong * elimbs, slong elen, n_poly_t bin, - const mp_limb_t * b, + const ulong * b, const fq_nmod_ctx_t ctx, - mp_limb_t * tmp) /* size d*N_FQ_MUL_ITCH */ + ulong * tmp) /* size d*N_FQ_MUL_ITCH */ { slong d = fq_nmod_ctx_degree(ctx); - const mp_limb_t * s = a; /* source */ + const ulong * s = a; /* source */ slong ei = 0, i = 0; - mp_limb_t e = (ei < elen) ? elimbs[ei] : 0; + ulong e = (ei < elen) ? elimbs[ei] : 0; int bits_left = FLINT_BITS; /* complicated code needed if an odd number of bits per limb */ @@ -130,8 +130,8 @@ static void n_fq_pow_cache_mulpow_ui_array_bin( /* r = a*b^e */ void n_fq_pow_cache_mulpow_ui( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, ulong e, n_poly_t pos, n_poly_t bin, @@ -182,8 +182,8 @@ void n_fq_pow_cache_mulpow_ui( /* r = a*b^-e */ void n_fq_pow_cache_mulpow_neg_ui( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, ulong e, n_poly_t pos, n_poly_t bin, @@ -191,7 +191,7 @@ void n_fq_pow_cache_mulpow_neg_ui( const fq_nmod_ctx_t ctx) { slong i, d = fq_nmod_ctx_degree(ctx); - mp_limb_t * tmp; + ulong * tmp; fmpz_t f; FLINT_ASSERT(pos->length >= 2); @@ -242,8 +242,8 @@ void n_fq_pow_cache_mulpow_neg_ui( /* r = a*b^-e */ void n_fq_pow_cache_mulpow_fmpz( - mp_limb_t * r, - const mp_limb_t * a, + ulong * r, + const ulong * a, const fmpz_t e, n_poly_t pos, n_poly_t bin, diff --git a/src/n_poly/n_poly.c b/src/n_poly/n_poly.c index 3125e1e46b..a3714cf4a7 100644 --- a/src/n_poly/n_poly.c +++ b/src/n_poly/n_poly.c @@ -33,13 +33,13 @@ void n_poly_realloc(n_poly_t A, slong len) if (old_alloc > 0) { FLINT_ASSERT(A->coeffs != NULL); - A->coeffs = (mp_limb_t *) flint_realloc(A->coeffs, - new_alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_realloc(A->coeffs, + new_alloc*sizeof(ulong)); } else { FLINT_ASSERT(A->coeffs == NULL); - A->coeffs = (mp_limb_t *) flint_malloc(new_alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_malloc(new_alloc*sizeof(ulong)); } A->alloc = new_alloc; } diff --git a/src/n_poly/n_poly_mod.c b/src/n_poly/n_poly_mod.c index 474681fbe3..29da4c2db8 100644 --- a/src/n_poly/n_poly_mod.c +++ b/src/n_poly/n_poly_mod.c @@ -57,10 +57,10 @@ void n_poly_mod_add_ui(n_poly_t res, const n_poly_t poly, ulong c, nmod_t ctx) } } -mp_limb_t n_poly_mod_div_root(n_poly_t Q, - const n_poly_t A, mp_limb_t c, nmod_t ctx) +ulong n_poly_mod_div_root(n_poly_t Q, + const n_poly_t A, ulong c, nmod_t ctx) { - mp_limb_t rem; + ulong rem; slong len = A->length; @@ -243,7 +243,7 @@ void n_poly_mod_mulmod(n_poly_t res, const n_poly_t poly1, const n_poly_t poly2, const n_poly_t f, nmod_t ctx) { slong len1, len2, lenf; - mp_ptr fcoeffs; + nn_ptr fcoeffs; lenf = f->length; len1 = poly1->length; @@ -264,7 +264,7 @@ void n_poly_mod_mulmod(n_poly_t res, const n_poly_t poly1, { if (f == res) { - fcoeffs = flint_malloc(sizeof(mp_limb_t) * lenf); + fcoeffs = flint_malloc(sizeof(ulong) * lenf); _nmod_vec_set(fcoeffs, f->coeffs, lenf); } else @@ -291,7 +291,7 @@ void n_poly_mod_mulmod(n_poly_t res, const n_poly_t poly1, void n_poly_mod_div(n_poly_t Q, const n_poly_t A, const n_poly_t B, nmod_t ctx) { n_poly_t tQ; - mp_ptr q; + nn_ptr q; slong A_len, B_len; B_len = B->length; @@ -342,7 +342,7 @@ void n_poly_mod_div(n_poly_t Q, const n_poly_t A, const n_poly_t B, nmod_t ctx) void n_poly_mod_divexact(n_poly_t Q, const n_poly_t A, const n_poly_t B, nmod_t ctx) { n_poly_t tQ; - mp_ptr q; + nn_ptr q; slong A_len, B_len; B_len = B->length; @@ -394,7 +394,7 @@ void n_poly_mod_rem(n_poly_t R, const n_poly_t A, const n_poly_t B, nmod_t ctx) { const slong lenA = A->length, lenB = B->length; n_poly_t tR; - mp_ptr r; + nn_ptr r; if (lenB == 0) { @@ -435,7 +435,7 @@ void n_poly_mod_divrem(n_poly_t Q, n_poly_t R, { const slong lenA = A->length, lenB = B->length; n_poly_t tQ, tR; - mp_ptr q, r; + nn_ptr q, r; if (lenB == 0) { @@ -503,7 +503,7 @@ void n_poly_mod_divrem(n_poly_t Q, n_poly_t R, int n_poly_mod_invmod(n_poly_t A, const n_poly_t B, const n_poly_t P, nmod_t ctx) { const slong lenB = B->length, lenP = P->length; - mp_limb_t * a; + ulong * a; n_poly_t tA; int ans; @@ -561,7 +561,7 @@ void n_poly_mod_gcd(n_poly_t G, const n_poly_t A, const n_poly_t B, nmod_t ctx) { slong lenA = A->length, lenB = B->length, lenG; n_poly_t tG; - mp_ptr g; + nn_ptr g; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -616,7 +616,7 @@ void n_poly_mod_xgcd( else /* lenA >= lenB >= 0 */ { const slong lenA = A->length, lenB = B->length; - mp_limb_t inv; + ulong inv; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -642,7 +642,7 @@ void n_poly_mod_xgcd( } else /* lenA >= lenB >= 2 */ { - mp_ptr g, s, t; + nn_ptr g, s, t; slong lenG; if (G == A || G == B) @@ -735,7 +735,7 @@ void n_poly_mod_mulmod_preinv( nmod_t ctx) { slong len1, len2, lenf; - mp_ptr fcoeffs; + nn_ptr fcoeffs; lenf = f->length; len1 = poly1->length; @@ -756,7 +756,7 @@ void n_poly_mod_mulmod_preinv( { if (f == res) { - fcoeffs = flint_malloc(sizeof(mp_limb_t) * lenf); + fcoeffs = flint_malloc(sizeof(ulong) * lenf); _nmod_vec_set(fcoeffs, f->coeffs, lenf); } else @@ -846,7 +846,7 @@ void n_poly_mod_div_series(n_poly_t Q, const n_poly_t A, const n_poly_t B, _n_poly_normalise(Q); } -void n_poly_mod_scalar_mul_ui(n_poly_t A, const n_poly_t B, mp_limb_t c, nmod_t ctx) +void n_poly_mod_scalar_mul_ui(n_poly_t A, const n_poly_t B, ulong c, nmod_t ctx) { if (c >= ctx.n) { @@ -864,10 +864,10 @@ void n_poly_mod_scalar_mul_ui(n_poly_t A, const n_poly_t B, mp_limb_t c, nmod_t } /* multiply A by (x^k + c) */ -void n_poly_mod_shift_left_scalar_addmul(n_poly_t A, slong k, mp_limb_t c, +void n_poly_mod_shift_left_scalar_addmul(n_poly_t A, slong k, ulong c, nmod_t ctx) { - mp_limb_t * Acoeffs; + ulong * Acoeffs; slong i; slong Alen = A->length; @@ -889,11 +889,11 @@ void n_poly_mod_addmul_linear( n_poly_t A, const n_poly_t B, const n_poly_t C, - mp_limb_t d1, mp_limb_t d0, + ulong d1, ulong d0, nmod_t ctx) { slong i; - mp_limb_t * Acoeffs, * Bcoeffs, * Ccoeffs; + ulong * Acoeffs, * Bcoeffs, * Ccoeffs; slong Blen = B->length; slong Clen = C->length; slong Alen = FLINT_MAX(B->length, C->length + 1); @@ -936,12 +936,12 @@ void n_poly_mod_scalar_addmul_nmod( n_poly_t A, const n_poly_t B, const n_poly_t C, - mp_limb_t d0, + ulong d0, nmod_t ctx) { slong i; - mp_limb_t t0, t1; - mp_limb_t * Acoeffs, * Bcoeffs, * Ccoeffs; + ulong t0, t1; + ulong * Acoeffs, * Bcoeffs, * Ccoeffs; slong Blen = B->length; slong Clen = C->length; slong Alen = FLINT_MAX(B->length, C->length); @@ -1030,12 +1030,12 @@ ulong n_poly_mod_remove(n_poly_t f, const n_poly_t p, nmod_t ctx) return i; } -mp_limb_t _n_poly_eval_pow(n_poly_t P, n_poly_t alphapow, int nlimbs, nmod_t ctx) +ulong _n_poly_eval_pow(n_poly_t P, n_poly_t alphapow, int nlimbs, nmod_t ctx) { - mp_limb_t * Pcoeffs = P->coeffs; + ulong * Pcoeffs = P->coeffs; slong Plen = P->length; - mp_limb_t * alpha_powers = alphapow->coeffs; - mp_limb_t res; + ulong * alpha_powers = alphapow->coeffs; + ulong res; slong k; if (Plen > alphapow->length) @@ -1054,23 +1054,23 @@ mp_limb_t _n_poly_eval_pow(n_poly_t P, n_poly_t alphapow, int nlimbs, nmod_t ctx return res; } -mp_limb_t n_poly_mod_eval_pow(n_poly_t P, n_poly_t alphapow, nmod_t ctx) +ulong n_poly_mod_eval_pow(n_poly_t P, n_poly_t alphapow, nmod_t ctx) { int nlimbs = _nmod_vec_dot_bound_limbs(P->length, ctx); return _n_poly_eval_pow(P, alphapow, nlimbs, ctx); } void n_poly_mod_eval2_pow( - mp_limb_t * vp, - mp_limb_t * vm, + ulong * vp, + ulong * vm, const n_poly_t P, n_poly_t alphapow, nmod_t ctx) { - const mp_limb_t * Pcoeffs = P->coeffs; + const ulong * Pcoeffs = P->coeffs; slong Plen = P->length; - mp_limb_t * alpha_powers = alphapow->coeffs; - mp_limb_t p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; + ulong * alpha_powers = alphapow->coeffs; + ulong p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; slong k; a0 = a1 = a2 = 0; @@ -1114,14 +1114,14 @@ void n_poly_mod_eval2_pow( *vm = nmod_sub(p0, q0, ctx); } -mp_limb_t n_poly_mod_eval_step2( +ulong n_poly_mod_eval_step2( n_poly_t Acur, const n_poly_t Ainc, nmod_t mod) { slong i, Alen = Acur->length; - mp_limb_t * cur = Acur->coeffs; - const mp_limb_t * inc = Ainc->coeffs; + ulong * cur = Acur->coeffs; + const ulong * inc = Ainc->coeffs; ulong t0, t1, t2, p0, p1; FLINT_ASSERT(2*Alen == Ainc->length); diff --git a/src/n_poly/n_polyu.c b/src/n_poly/n_polyu.c index ab8f49f7a1..7e4d7fe6a2 100644 --- a/src/n_poly/n_polyu.c +++ b/src/n_poly/n_polyu.c @@ -40,14 +40,14 @@ void n_polyu_realloc(n_polyu_t A, slong len) if (old_alloc > 0) { A->exps = (ulong *) flint_realloc(A->exps, new_alloc*sizeof(ulong)); - A->coeffs = (mp_limb_t *) flint_realloc(A->coeffs, new_alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_realloc(A->coeffs, new_alloc*sizeof(ulong)); } else { FLINT_ASSERT(A->exps == NULL); FLINT_ASSERT(A->coeffs == NULL); A->exps = (ulong *) flint_malloc(new_alloc*sizeof(ulong)); - A->coeffs = (mp_limb_t *) flint_malloc(new_alloc*sizeof(mp_limb_t)); + A->coeffs = (ulong *) flint_malloc(new_alloc*sizeof(ulong)); } A->alloc = new_alloc; diff --git a/src/n_poly/n_polyu1n_gcd.c b/src/n_poly/n_polyu1n_gcd.c index 7ff3dbcb89..051559f184 100644 --- a/src/n_poly/n_polyu1n_gcd.c +++ b/src/n_poly/n_polyu1n_gcd.c @@ -21,7 +21,7 @@ static void n_polyu1n_mod_interp_reduce_2sm_poly( nmod_t ctx) { slong i; - mp_limb_t u, v; + ulong u, v; n_poly_zero(E); n_poly_zero(F); @@ -38,17 +38,17 @@ static void n_polyu1n_mod_interp_lift_2sm_poly( n_polyun_t F, const n_poly_t A, const n_poly_t B, - mp_limb_t alpha, + ulong alpha, nmod_t ctx) { slong lastlen = 0; slong Fi, Aexp, Bexp; - const mp_limb_t * Acoeffs = A->coeffs; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Acoeffs = A->coeffs; + const ulong * Bcoeffs = B->coeffs; slong e; - mp_limb_t d0 = (1 + ctx.n)/2; - mp_limb_t d1 = nmod_inv(nmod_add(alpha, alpha, ctx), ctx); - mp_limb_t Avalue, Bvalue, u, v; + ulong d0 = (1 + ctx.n)/2; + ulong d1 = nmod_inv(nmod_add(alpha, alpha, ctx), ctx); + ulong Avalue, Bvalue, u, v; Aexp = n_poly_degree(A); Bexp = n_poly_degree(B); @@ -126,10 +126,10 @@ static int n_polyu1n_mod_interp_crt_2sm_poly( int changed = 0, Finc; slong lastlen = 0; n_poly_struct * Fvalue; - mp_limb_t u, v, FvalueA, FvalueB; + ulong u, v, FvalueA, FvalueB; slong Fi, Ti, Aexp, Bexp, e, fexp; - const mp_limb_t * Acoeff = A->coeffs; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Acoeff = A->coeffs; + const ulong * Bcoeff = B->coeffs; slong Flen = F->length; n_poly_t zero; @@ -250,7 +250,7 @@ int n_polyu1n_mod_gcd_brown_smprime( { int success; slong bound; - mp_limb_t alpha, temp, gammaevalp, gammaevalm; + ulong alpha, temp, gammaevalp, gammaevalm; n_poly_struct * Aevalp, * Bevalp, * Gevalp, * Abarevalp, * Bbarevalp; n_poly_struct * Aevalm, * Bevalm, * Gevalm, * Abarevalm, * Bbarevalm; n_polyun_struct * T; diff --git a/src/n_poly/nmod_n_fq_interp.c b/src/n_poly/nmod_n_fq_interp.c index 0704472645..8512c81f09 100644 --- a/src/n_poly/nmod_n_fq_interp.c +++ b/src/n_poly/nmod_n_fq_interp.c @@ -25,7 +25,7 @@ #define MAC(h, m, l, a, b) \ { \ - mp_limb_t p1, p0; \ + ulong p1, p0; \ umul_ppmm(p1, p0, a, b); \ add_sssaaaaaa(h, m, l, h, m, l, 0, p1, p0); \ } @@ -36,13 +36,13 @@ /* p = 1 mod 4 */ static slong _find_eval_points4( - mp_limb_t * list, + ulong * list, slong d, nmod_t ctx) { slong i, len; - mp_limb_t p = ctx.n; - mp_limb_t n; + ulong p = ctx.n; + ulong n; FLINT_ASSERT(d > 0); FLINT_ASSERT((p & UWORD(3)) == 1); @@ -53,7 +53,7 @@ static slong _find_eval_points4( for (n = 2; len < d && n <= (p - 1)/2; n++) { int ok = 1; - mp_limb_t mn2 = p - nmod_mul(n, n, ctx); + ulong mn2 = p - nmod_mul(n, n, ctx); for (i = 0; ok && i < len; i++) ok = (nmod_mul(list[i], list[i], ctx) != mn2); if (ok) @@ -63,17 +63,17 @@ static slong _find_eval_points4( } static int _fill_matrices4( - mp_limb_t * M, /* length d by 4d */ - mp_limb_t * Q, /* length d by 4d+1 */ + ulong * M, /* length d by 4d */ + ulong * Q, /* length d by 4d+1 */ slong d, nmod_t ctx) { slong i, j; n_poly_t g, h; - mp_limb_t * list; - mp_limb_t g0i, c; + ulong * list; + ulong g0i, c; - list = FLINT_ARRAY_ALLOC(d, mp_limb_t); + list = FLINT_ARRAY_ALLOC(d, ulong); if (d != _find_eval_points4(list, d, ctx)) { flint_free(list); @@ -114,12 +114,12 @@ static int _fill_matrices4( static void _from_coeffs4( - mp_limb_t * v, /* length 4d+1 */ - const mp_limb_t * a, + ulong * v, /* length 4d+1 */ + const ulong * a, slong alen, - const mp_limb_t * M, /* length d by 4d */ + const ulong * M, /* length d by 4d */ slong d, - mp_limb_t w, + ulong w, nmod_t ctx) { slong i, j; @@ -129,7 +129,7 @@ static void _from_coeffs4( if (alen <= 1) { - mp_limb_t t = (alen == 1) ? a[0] : 0; + ulong t = (alen == 1) ? a[0] : 0; for (i = 0; i < 4*d+1; i++) v[i] = t; return; @@ -138,11 +138,11 @@ static void _from_coeffs4( v[0] = a[0]; for (i = 0; i < d; i++) { - mp_limb_t t1, t2, t3, t4; - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; - mp_limb_t c3h, c3m, c3; - mp_limb_t c4h, c4m, c4; + ulong t1, t2, t3, t4; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; + ulong c3h, c3m, c3; + ulong c4h, c4m, c4; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; c3h = c3m = c3 = 0; @@ -184,17 +184,17 @@ static void _from_coeffs4( static void _from_coeffs4_n_fq( - mp_limb_t * v, /* length 4d+1 */ - const mp_limb_t * a, + ulong * v, /* length 4d+1 */ + const ulong * a, slong alen, - const mp_limb_t * M_, /* length d by 4d */ + const ulong * M_, /* length d by 4d */ slong D, - mp_limb_t w, + ulong w, slong d, nmod_t ctx) { slong i, j, k; - const mp_limb_t * Mrow; + const ulong * Mrow; FLINT_ASSERT(0 <= alen); FLINT_ASSERT(alen <= 1 + 4*D); @@ -221,11 +221,11 @@ static void _from_coeffs4_n_fq( Mrow = M_; for (i = 0; i < D; i++) { - mp_limb_t t1, t2, t3, t4; - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; - mp_limb_t c3h, c3m, c3; - mp_limb_t c4h, c4m, c4; + ulong t1, t2, t3, t4; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; + ulong c3h, c3m, c3; + ulong c4h, c4m, c4; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; c3h = c3m = c3 = 0; @@ -267,12 +267,12 @@ static void _from_coeffs4_n_fq( } static void _to_coeffs4( - mp_limb_t * a, /* length 4d+1 */ - const mp_limb_t * v, /* length 4d+1 */ - mp_limb_t * t, /* length 4d */ - const mp_limb_t * Q, /* length d by 4d+1 */ + ulong * a, /* length 4d+1 */ + const ulong * v, /* length 4d+1 */ + ulong * t, /* length 4d */ + const ulong * Q, /* length d by 4d+1 */ slong d, - mp_limb_t w, + ulong w, nmod_t ctx) { slong i, j; @@ -281,10 +281,10 @@ static void _to_coeffs4( for (i = 0; i < d; i++) { - mp_limb_t t2 = nmod_add(v[1+4*i+0], v[1+4*i+2], ctx); - mp_limb_t t1 = nmod_sub(v[1+4*i+0], v[1+4*i+2], ctx); - mp_limb_t t3 = nmod_add(v[1+4*i+1], v[1+4*i+3], ctx); - mp_limb_t t4 = nmod_mul(nmod_sub(v[1+4*i+1], v[1+4*i+3], ctx), w, ctx); + ulong t2 = nmod_add(v[1+4*i+0], v[1+4*i+2], ctx); + ulong t1 = nmod_sub(v[1+4*i+0], v[1+4*i+2], ctx); + ulong t3 = nmod_add(v[1+4*i+1], v[1+4*i+3], ctx); + ulong t4 = nmod_mul(nmod_sub(v[1+4*i+1], v[1+4*i+3], ctx), w, ctx); t[4*i+0] = nmod_sub(t1, t4, ctx); t[4*i+1] = nmod_sub(t2, t3, ctx); t[4*i+2] = nmod_add(t1, t4, ctx); @@ -293,10 +293,10 @@ static void _to_coeffs4( for (i = 0; i < d; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; - mp_limb_t c3h, c3m, c3; - mp_limb_t c4h, c4m, c4; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; + ulong c3h, c3m, c3; + ulong c4h, c4m, c4; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; c3h = c3m = c3 = 0; @@ -320,17 +320,17 @@ static void _to_coeffs4( } static void _to_coeffs4_n_fq( - mp_limb_t * a, /* length 4D+1 */ - const mp_limb_t * v, /* length 4D+1 */ - mp_limb_t * t, /* length 4D */ - const mp_limb_t * Q_, /* length D by 4D+1 */ + ulong * a, /* length 4D+1 */ + const ulong * v, /* length 4D+1 */ + ulong * t, /* length 4D */ + const ulong * Q_, /* length D by 4D+1 */ slong D, - mp_limb_t w, + ulong w, slong d, nmod_t ctx) { slong i, j, k; - const mp_limb_t * Qrow; + const ulong * Qrow; _n_fq_set(a + d*0, v + d*0, d); @@ -339,10 +339,10 @@ static void _to_coeffs4_n_fq( for (i = 0; i < D; i++) { - mp_limb_t t2 = nmod_add(v[d*(1+4*i+0)+k], v[d*(1+4*i+2)+k], ctx); - mp_limb_t t1 = nmod_sub(v[d*(1+4*i+0)+k], v[d*(1+4*i+2)+k], ctx); - mp_limb_t t3 = nmod_add(v[d*(1+4*i+1)+k], v[d*(1+4*i+3)+k], ctx); - mp_limb_t t4 = nmod_mul(nmod_sub(v[d*(1+4*i+1)+k], v[d*(1+4*i+3)+k], ctx), w, ctx); + ulong t2 = nmod_add(v[d*(1+4*i+0)+k], v[d*(1+4*i+2)+k], ctx); + ulong t1 = nmod_sub(v[d*(1+4*i+0)+k], v[d*(1+4*i+2)+k], ctx); + ulong t3 = nmod_add(v[d*(1+4*i+1)+k], v[d*(1+4*i+3)+k], ctx); + ulong t4 = nmod_mul(nmod_sub(v[d*(1+4*i+1)+k], v[d*(1+4*i+3)+k], ctx), w, ctx); t[4*i+0] = nmod_sub(t1, t4, ctx); t[4*i+1] = nmod_sub(t2, t3, ctx); t[4*i+2] = nmod_add(t1, t4, ctx); @@ -352,10 +352,10 @@ static void _to_coeffs4_n_fq( Qrow = Q_; for (i = 0; i < D; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; - mp_limb_t c3h, c3m, c3; - mp_limb_t c4h, c4m, c4; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; + ulong c3h, c3m, c3; + ulong c4h, c4m, c4; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; c3h = c3m = c3 = 0; @@ -381,14 +381,14 @@ static void _to_coeffs4_n_fq( static int _fill_matrices2( - mp_limb_t * M, /* length d by 2d */ - mp_limb_t * Q, /* length d by 2d+1 */ + ulong * M, /* length d by 2d */ + ulong * Q, /* length d by 2d+1 */ slong d, nmod_t ctx) { slong i, j; n_poly_t g, h; - mp_limb_t g0i, c; + ulong g0i, c; if (2*d >= ctx.n) return 0; @@ -426,10 +426,10 @@ static int _fill_matrices2( static void _from_coeffs2( - mp_limb_t * v, /* length 2d+1 */ - const mp_limb_t * a, /* length alen <= 2d+1 */ + ulong * v, /* length 2d+1 */ + const ulong * a, /* length alen <= 2d+1 */ slong alen, - const mp_limb_t * M, /* length d by 2d */ + const ulong * M, /* length d by 2d */ slong d, nmod_t ctx) { @@ -440,7 +440,7 @@ static void _from_coeffs2( if (alen <= 1) { - mp_limb_t t = (alen == 1) ? a[0] : 0; + ulong t = (alen == 1) ? a[0] : 0; for (i = 0; i < 2*d+1; i++) v[i] = t; return; @@ -449,8 +449,8 @@ static void _from_coeffs2( v[0] = a[0]; for (i = 0; i < d; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; @@ -477,16 +477,16 @@ static void _from_coeffs2( } static void _from_coeffs2_n_fq( - mp_limb_t * v, /* length 4D+1 */ - const mp_limb_t * a, /* length alen <= 2D+1 */ + ulong * v, /* length 4D+1 */ + const ulong * a, /* length alen <= 2D+1 */ slong alen, - const mp_limb_t * M_, /* length D by 4D */ + const ulong * M_, /* length D by 4D */ slong D, slong d, nmod_t ctx) { slong i, j, k; - const mp_limb_t * Mrow; + const ulong * Mrow; FLINT_ASSERT(0 <= alen); FLINT_ASSERT(alen <= 1 + 2*D); @@ -513,8 +513,8 @@ static void _from_coeffs2_n_fq( Mrow = M_; for (i = 0; i < D; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; @@ -543,10 +543,10 @@ static void _from_coeffs2_n_fq( static void _to_coeffs2( - mp_limb_t * a, /* length 2d+1 */ - const mp_limb_t * v, /* length 2d+1 */ - mp_limb_t * t, /* length 2d */ - const mp_limb_t * Q, /* length d by 2d+1 */ + ulong * a, /* length 2d+1 */ + const ulong * v, /* length 2d+1 */ + ulong * t, /* length 2d */ + const ulong * Q, /* length d by 2d+1 */ slong d, nmod_t ctx) { @@ -562,8 +562,8 @@ static void _to_coeffs2( for (i = 0; i < d; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; umul_ppmm(c2m, c2, Q[0], v[0]); @@ -581,16 +581,16 @@ static void _to_coeffs2( } static void _to_coeffs2_n_fq( - mp_limb_t * a, /* length 2d+1 */ - const mp_limb_t * v, /* length 2d+1 */ - mp_limb_t * t, /* length 2d */ - const mp_limb_t * Q_, /* length d by 2d+1 */ + ulong * a, /* length 2d+1 */ + const ulong * v, /* length 2d+1 */ + ulong * t, /* length 2d */ + const ulong * Q_, /* length d by 2d+1 */ slong D, slong d, nmod_t ctx) { slong i, j, k; - const mp_limb_t * Qrow; + const ulong * Qrow; _n_fq_set(a + d*0, v + d*0, d); @@ -605,8 +605,8 @@ static void _to_coeffs2_n_fq( Qrow = Q_; for (i = 0; i < D; i++) { - mp_limb_t c1h, c1m, c1; - mp_limb_t c2h, c2m, c2; + ulong c1h, c1m, c1; + ulong c2h, c2m, c2; c1h = c1m = c1 = 0; c2h = c2m = c2 = 0; umul_ppmm(c2m, c2, Qrow[0], v[d*0+k]); @@ -649,7 +649,7 @@ int nmod_eval_interp_set_degree_modulus( nmod_t ctx) { slong d, new_alloc; - mp_limb_t p = ctx.n; + ulong p = ctx.n; FLINT_ASSERT(deg >= 0); @@ -664,9 +664,9 @@ int nmod_eval_interp_set_degree_modulus( new_alloc = d*(4*d) + 4*d + d*(4*d + 1); if (E->alloc > 0) - E->array = flint_realloc(E->array, new_alloc*sizeof(mp_limb_t)); + E->array = flint_realloc(E->array, new_alloc*sizeof(ulong)); else - E->array = flint_malloc(new_alloc*sizeof(mp_limb_t)); + E->array = flint_malloc(new_alloc*sizeof(ulong)); E->radix = 4; E->alloc = new_alloc; @@ -686,9 +686,9 @@ int nmod_eval_interp_set_degree_modulus( new_alloc = d*(2*d) + 2*d + d*(2*d + 1); if (E->alloc > 0) - E->array = flint_realloc(E->array, new_alloc*sizeof(mp_limb_t)); + E->array = flint_realloc(E->array, new_alloc*sizeof(ulong)); else - E->array = flint_malloc(new_alloc*sizeof(mp_limb_t)); + E->array = flint_malloc(new_alloc*sizeof(ulong)); E->radix = 2; E->alloc = new_alloc; @@ -703,8 +703,8 @@ int nmod_eval_interp_set_degree_modulus( } static void nmod_eval_interp_to_coeffs( - mp_limb_t * a, - const mp_limb_t * v, + ulong * a, + const ulong * v, nmod_eval_interp_t E, nmod_t ctx) { @@ -715,8 +715,8 @@ static void nmod_eval_interp_to_coeffs( } static void nmod_eval_interp_from_coeffs( - mp_limb_t * v, - const mp_limb_t * a, + ulong * v, + const ulong * a, slong alen, nmod_eval_interp_t E, nmod_t ctx) @@ -728,8 +728,8 @@ static void nmod_eval_interp_from_coeffs( } static void nmod_eval_interp_to_coeffs_n_fq( - mp_limb_t * a, - const mp_limb_t * v, + ulong * a, + const ulong * v, nmod_eval_interp_t E, slong d, nmod_t ctx) @@ -741,8 +741,8 @@ static void nmod_eval_interp_to_coeffs_n_fq( } static void nmod_eval_interp_from_coeffs_n_fq( - mp_limb_t * v, - const mp_limb_t * a, + ulong * v, + const ulong * a, slong alen, nmod_eval_interp_t E, slong d, @@ -944,7 +944,7 @@ void nmod_evals_fmma( for (i = 0; i < len; i++) { - mp_limb_t t = nmod_mul(b->coeffs[i], c->coeffs[i], ctx); + ulong t = nmod_mul(b->coeffs[i], c->coeffs[i], ctx); NMOD_ADDMUL(t, d->coeffs[i], e->coeffs[i], ctx); a->coeffs[i] = t; } @@ -990,7 +990,7 @@ void n_fq_evals_mul( { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * tmp; + ulong * tmp; TMP_INIT; if (b->length == 0 || c->length == 0) @@ -1003,7 +1003,7 @@ void n_fq_evals_mul( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(ulong)); for (i = 0; i < len; i++) _n_fq_mul(a->coeffs + d*i, b->coeffs + d*i, c->coeffs + d*i, ctx, tmp); @@ -1023,7 +1023,7 @@ void n_fq_evals_addmul( { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * tmp; + ulong * tmp; TMP_INIT; if (b->length == 0 || c->length == 0) @@ -1037,7 +1037,7 @@ void n_fq_evals_addmul( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*N_FQ_MUL_ITCH*sizeof(ulong)); for (i = 0; i < len; i++) _n_fq_addmul(a->coeffs + d*i, a->coeffs + d*i, @@ -1059,7 +1059,7 @@ void n_fq_evals_fmma( { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * tmp, * t; + ulong * tmp, * t; TMP_INIT; if (b->length == 0 || c->length == 0) @@ -1078,7 +1078,7 @@ void n_fq_evals_fmma( TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(d*(1 + N_FQ_MUL_ITCH)*sizeof(ulong)); t = tmp + d*N_FQ_MUL_ITCH; for (i = 0; i < len; i++) diff --git a/src/n_poly/nmod_pow_cache.c b/src/n_poly/nmod_pow_cache.c index fee23e883a..b99f58e8b7 100644 --- a/src/n_poly/nmod_pow_cache.c +++ b/src/n_poly/nmod_pow_cache.c @@ -15,7 +15,7 @@ /* hold positive and negative powers of b */ void nmod_pow_cache_start( - mp_limb_t b, + ulong b, n_poly_t pos, /* b^0, b^1, b^2, ..., b^50 */ n_poly_t bin, /* b^1, b^2, b^3, b^4, b^8, b^12, ... */ n_poly_t neg) /* b^-0, b^-1, b^-2, ..., b^-50 */ @@ -29,15 +29,15 @@ void nmod_pow_cache_start( } /* return a*b^e */ -static mp_limb_t nmod_pow_cache_mulpow_ui_array_bin( - mp_limb_t a, - mp_limb_t * elimbs, slong elen, +static ulong nmod_pow_cache_mulpow_ui_array_bin( + ulong a, + ulong * elimbs, slong elen, n_poly_t bin, - mp_limb_t b, + ulong b, nmod_t ctx) { slong ei = 0, i = 0; - mp_limb_t e = (ei < elen) ? elimbs[ei] : 0; + ulong e = (ei < elen) ? elimbs[ei] : 0; int bits_left = FLINT_BITS; /* complicated code needed if an odd number of bits per limb */ @@ -95,8 +95,8 @@ static mp_limb_t nmod_pow_cache_mulpow_ui_array_bin( } /* return a*b^e */ -mp_limb_t nmod_pow_cache_mulpow_ui( - mp_limb_t a, +ulong nmod_pow_cache_mulpow_ui( + ulong a, ulong e, n_poly_t pos, n_poly_t bin, @@ -104,7 +104,7 @@ mp_limb_t nmod_pow_cache_mulpow_ui( nmod_t ctx) { slong i; - mp_limb_t b; + ulong b; FLINT_ASSERT(pos->length >= 2); @@ -129,8 +129,8 @@ mp_limb_t nmod_pow_cache_mulpow_ui( } /* return a*b^-e, assume ctx.n is prime */ -mp_limb_t nmod_pow_cache_mulpow_neg_ui( - mp_limb_t a, +ulong nmod_pow_cache_mulpow_neg_ui( + ulong a, ulong e, n_poly_t pos, n_poly_t bin, @@ -138,7 +138,7 @@ mp_limb_t nmod_pow_cache_mulpow_neg_ui( nmod_t ctx) { slong i; - mp_limb_t b; + ulong b; FLINT_ASSERT(pos->length >= 2); @@ -177,15 +177,15 @@ mp_limb_t nmod_pow_cache_mulpow_neg_ui( } /* return a*b^-e */ -mp_limb_t nmod_pow_cache_mulpow_fmpz( - mp_limb_t a, +ulong nmod_pow_cache_mulpow_fmpz( + ulong a, const fmpz_t e, n_poly_t pos, n_poly_t bin, n_poly_t neg, nmod_t ctx) { - mp_limb_t b = pos->coeffs[1]; + ulong b = pos->coeffs[1]; FLINT_ASSERT(pos->length >= 2); diff --git a/src/n_poly/test/main.c b/src/n_poly/test/main.c index 1993390bf8..bba5d45f9c 100644 --- a/src/n_poly/test/main.c +++ b/src/n_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-n_fq_poly_add.c" diff --git a/src/n_poly/zippel_helpers.c b/src/n_poly/zippel_helpers.c index 3011bcac3e..d68a2209f5 100644 --- a/src/n_poly/zippel_helpers.c +++ b/src/n_poly/zippel_helpers.c @@ -24,7 +24,7 @@ void fq_nmod_poly_product_roots( void n_fq_poly_product_roots_n_fq( n_poly_t master, - const mp_limb_t * monomials, + const ulong * monomials, slong mlength, const fq_nmod_ctx_t ctx, n_poly_stack_t St) @@ -100,10 +100,10 @@ slong n_fq_polyun_product_roots( and multiply cur pointwise by inc */ -mp_limb_t _nmod_zip_eval_step( - mp_limb_t * cur, /* in Fp */ - const mp_limb_t * inc, /* in Fp */ - const mp_limb_t * coeffs, /* in Fp */ +ulong _nmod_zip_eval_step( + ulong * cur, /* in Fp */ + const ulong * inc, /* in Fp */ + const ulong * coeffs, /* in Fp */ slong length, nmod_t ctx) { @@ -122,16 +122,16 @@ mp_limb_t _nmod_zip_eval_step( void _n_fq_zip_eval_step( - mp_limb_t * res, /* in Fq: size d */ - mp_limb_t * cur, /* in Fq: size d*length */ - const mp_limb_t * inc, /* in Fq: size d*length */ - const mp_limb_t * coeffs, /* in Fq: size d*length */ + ulong * res, /* in Fq: size d */ + ulong * cur, /* in Fq: size d*length */ + const ulong * inc, /* in Fq: size d*length */ + const ulong * coeffs, /* in Fq: size d*length */ slong length, const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); slong i; - mp_limb_t * tmp, * sum; + ulong * tmp, * sum; TMP_INIT; if (length < 1) @@ -141,7 +141,7 @@ void _n_fq_zip_eval_step( } TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(8*d*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(8*d*sizeof(ulong)); sum = tmp + 4*d; i = 0; @@ -159,17 +159,17 @@ void _n_fq_zip_eval_step( void _n_fqp_zip_eval_step( - mp_limb_t * res, /* in Fq: size d */ - mp_limb_t * cur, /* in Fp: size length */ - const mp_limb_t * inc, /* in Fp: size length */ - const mp_limb_t * coeffs, /* in Fq: size d*length */ + ulong * res, /* in Fq: size d */ + ulong * cur, /* in Fp: size length */ + const ulong * inc, /* in Fp: size length */ + const ulong * coeffs, /* in Fq: size d*length */ slong length, slong d, nmod_t mod) { slong i, j; - mp_limb_t p0, p1; - mp_limb_t * tmp; + ulong p0, p1; + ulong * tmp; TMP_INIT; if (length < 1) @@ -179,7 +179,7 @@ void _n_fqp_zip_eval_step( } TMP_START; - tmp = (mp_limb_t *) TMP_ALLOC(3*d*sizeof(mp_limb_t)); + tmp = (ulong *) TMP_ALLOC(3*d*sizeof(ulong)); i = 0; @@ -217,17 +217,17 @@ void _n_fqp_zip_eval_step( 1: success */ int _nmod_zip_vand_solve( - mp_limb_t * coeffs, /* in Fp: size mlength */ - const mp_limb_t * monomials, /* in Fp: size mlength */ + ulong * coeffs, /* in Fp: size mlength */ + const ulong * monomials, /* in Fp: size mlength */ slong mlength, - const mp_limb_t * evals, /* in Fp: size elength */ + const ulong * evals, /* in Fp: size elength */ slong elength, - const mp_limb_t * master, /* in Fp: size mlength + 1 */ - mp_limb_t * scratch, /* in Fp: size mlength */ + const ulong * master, /* in Fp: size mlength + 1 */ + ulong * scratch, /* in Fp: size mlength */ nmod_t ctx) { slong i, j; - mp_limb_t V, V0, V1, V2, T, S, r, p0, p1; + ulong V, V0, V1, V2, T, S, r, p0, p1; FLINT_ASSERT(elength >= mlength); @@ -274,26 +274,26 @@ int _nmod_zip_vand_solve( } int _n_fq_zip_vand_solve( - mp_limb_t * coeffs, /* in Fq: size d*mlength */ - const mp_limb_t * monomials, /* in Fq: size d*mlength */ + ulong * coeffs, /* in Fq: size d*mlength */ + const ulong * monomials, /* in Fq: size d*mlength */ slong mlength, - const mp_limb_t * evals, /* in Fq: size d*elength */ + const ulong * evals, /* in Fq: size d*elength */ slong elength, - const mp_limb_t * master, /* in Fq: size d*(mlength + 1) */ - mp_limb_t * scratch, /* in Fq: size d*mlength */ + const ulong * master, /* in Fq: size d*(mlength + 1) */ + ulong * scratch, /* in Fq: size d*mlength */ const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); int success; slong i, j; - mp_limb_t * tmp = FLINT_ARRAY_ALLOC(12*d, mp_limb_t); - mp_limb_t * V = tmp + 6*d; - mp_limb_t * V0 = V + d; - mp_limb_t * T = V0 + d; - mp_limb_t * S = T + d; - mp_limb_t * r = S + d; - mp_limb_t * p0 = r + d; + ulong * tmp = FLINT_ARRAY_ALLOC(12*d, ulong); + ulong * V = tmp + 6*d; + ulong * V0 = V + d; + ulong * T = V0 + d; + ulong * S = T + d; + ulong * r = S + d; + ulong * p0 = r + d; FLINT_ASSERT(elength >= mlength); @@ -373,28 +373,28 @@ int _n_fq_zip_vand_solve( int _n_fqp_zip_vand_solve( - mp_limb_t * coeffs, /* in Fq: size d*mlength */ - const mp_limb_t * monomials, /* in Fp: size mlength */ + ulong * coeffs, /* in Fq: size d*mlength */ + const ulong * monomials, /* in Fp: size mlength */ slong mlength, - const mp_limb_t * evals, /* in Fq: size d*elength */ + const ulong * evals, /* in Fq: size d*elength */ slong elength, - const mp_limb_t * master, /* in Fp: size (mlength + 1) */ - mp_limb_t * scratch, /* in Fp: size mlength */ + const ulong * master, /* in Fp: size (mlength + 1) */ + ulong * scratch, /* in Fp: size mlength */ const fq_nmod_ctx_t ctx) { slong d = fq_nmod_ctx_degree(ctx); nmod_t mod = fq_nmod_ctx_mod(ctx); int success; slong i, j, k; - mp_limb_t * tmp = FLINT_ARRAY_ALLOC(d*20, mp_limb_t); - mp_limb_t * V = tmp + 6*d; - mp_limb_t * V0 = V + d; - mp_limb_t * T = V0 + d; - mp_limb_t * S = T + d; - mp_limb_t * r = S + d; - mp_limb_t * p0 = r + d; - mp_limb_t * V_p = p0 + d; - mp_limb_t r_p, T_p, S_p; + ulong * tmp = FLINT_ARRAY_ALLOC(d*20, ulong); + ulong * V = tmp + 6*d; + ulong * V0 = V + d; + ulong * T = V0 + d; + ulong * S = T + d; + ulong * r = S + d; + ulong * p0 = r + d; + ulong * V_p = p0 + d; + ulong r_p, T_p, S_p; FLINT_ASSERT(elength >= mlength); @@ -415,7 +415,7 @@ int _n_fqp_zip_vand_solve( S_p = nmod_add(nmod_mul(r_p, S_p, mod), T_p, mod); for (k = 0; k < d; k++) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, T_p, (evals + d*(j - 1))[k]); add_sssaaaaaa(V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], 0, hi, lo); @@ -434,7 +434,7 @@ int _n_fqp_zip_vand_solve( for (k = 0; k < d; k++) { - mp_limb_t vk; + ulong vk; NMOD_RED3(vk, V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], mod); (coeffs + d*i)[k] = nmod_mul(vk, S_p, mod); } @@ -453,7 +453,7 @@ int _n_fqp_zip_vand_solve( scratch[j] = nmod_mul(scratch[j], monomials[j], mod); for (k = 0; k < d; k++) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, scratch[j], (coeffs + d*j)[k]); add_sssaaaaaa(V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], 0, hi, lo); @@ -462,7 +462,7 @@ int _n_fqp_zip_vand_solve( for (k = 0; k < d; k++) { - mp_limb_t vk; + ulong vk; NMOD_RED3(vk, V_p[3*k+2], V_p[3*k+1], V_p[3*k+0], mod); if (vk != (evals + d*i)[k]) { diff --git a/src/n_poly_types.h b/src/n_poly_types.h index 5e0ac4c471..a15944a988 100644 --- a/src/n_poly_types.h +++ b/src/n_poly_types.h @@ -21,7 +21,7 @@ extern "C" { /* arrays of ulong */ typedef struct { - mp_limb_t * coeffs; + ulong * coeffs; slong alloc; slong length; } n_poly_struct; @@ -58,7 +58,7 @@ typedef n_tpoly_t n_fq_tpoly_t; typedef struct { ulong * exps; - mp_limb_t * coeffs; + ulong * coeffs; slong length; slong alloc; } n_polyu_struct; @@ -113,14 +113,14 @@ typedef struct { typedef n_poly_bpoly_stack_struct n_poly_bpoly_stack_t[1]; typedef struct { - mp_limb_t * M; - mp_limb_t * T; - mp_limb_t * Q; - mp_limb_t * array; + ulong * M; + ulong * T; + ulong * Q; + ulong * array; slong alloc; slong d; slong radix; - mp_limb_t w; + ulong w; } nmod_eval_interp_struct; typedef nmod_eval_interp_struct nmod_eval_interp_t[1]; diff --git a/src/nf.h b/src/nf.h index 19334a3a02..6971306493 100644 --- a/src/nf.h +++ b/src/nf.h @@ -52,7 +52,7 @@ typedef nf_struct nf_t[1]; void nf_init(nf_t nf, const fmpq_poly_t pol); -void nf_init_randtest(nf_t nf, flint_rand_t state, slong len, mp_bitcnt_t bits_in); +void nf_init_randtest(nf_t nf, flint_rand_t state, slong len, flint_bitcnt_t bits_in); void nf_clear(nf_t nf); diff --git a/src/nf/init_randtest.c b/src/nf/init_randtest.c index 4115b74a49..f8396fc340 100644 --- a/src/nf/init_randtest.c +++ b/src/nf/init_randtest.c @@ -14,7 +14,7 @@ void nf_init_randtest(nf_t nf, flint_rand_t state, slong len, - mp_bitcnt_t bits_in) + flint_bitcnt_t bits_in) { fmpq_poly_t pol; fmpz_poly_t q; diff --git a/src/nf/test/main.c b/src/nf/test/main.c index 3e0524c8b3..b439f0d12c 100644 --- a/src/nf/test/main.c +++ b/src/nf/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-init_clear.c" diff --git a/src/nf_elem.h b/src/nf_elem.h index 0acf2cf40f..7ff7348fe0 100644 --- a/src/nf_elem.h +++ b/src/nf_elem.h @@ -75,10 +75,10 @@ void nf_elem_clear(nf_elem_t a, const nf_t nf); void nf_elem_swap(nf_elem_t a, nf_elem_t b, const nf_t nf); void nf_elem_randtest(nf_elem_t a, flint_rand_t state, - mp_bitcnt_t bits, const nf_t nf); + flint_bitcnt_t bits, const nf_t nf); void nf_elem_randtest_not_zero(nf_elem_t a, flint_rand_t state, - mp_bitcnt_t bits, const nf_t nf); + flint_bitcnt_t bits, const nf_t nf); NF_ELEM_INLINE void nf_elem_canonicalise(nf_elem_t a, const nf_t nf) diff --git a/src/nf_elem/randtest.c b/src/nf_elem/randtest.c index 2f942e0b7f..a87d692c31 100644 --- a/src/nf_elem/randtest.c +++ b/src/nf_elem/randtest.c @@ -14,7 +14,7 @@ #include "nf_elem.h" void nf_elem_randtest(nf_elem_t a, flint_rand_t state, - mp_bitcnt_t bits, const nf_t nf) + flint_bitcnt_t bits, const nf_t nf) { if (nf->flag & NF_LINEAR) { @@ -64,7 +64,7 @@ void nf_elem_randtest(nf_elem_t a, flint_rand_t state, } void nf_elem_randtest_not_zero(nf_elem_t a, flint_rand_t state, - mp_bitcnt_t bits, const nf_t nf) + flint_bitcnt_t bits, const nf_t nf) { if (nf->flag & NF_LINEAR) { diff --git a/src/nf_elem/set_coeff_num_fmpz.c b/src/nf_elem/set_coeff_num_fmpz.c index 0a004b4527..c0d13113af 100644 --- a/src/nf_elem/set_coeff_num_fmpz.c +++ b/src/nf_elem/set_coeff_num_fmpz.c @@ -39,7 +39,7 @@ void _nf_elem_set_coeff_num_fmpz(nf_elem_t a, slong i, const fmpz_t b, const nf_ { fmpq_poly_fit_length(NF_ELEM(a), i + 1); _fmpq_poly_set_length(NF_ELEM(a), i + 1); - flint_mpn_zero((mp_ptr) NF_ELEM(a)->coeffs + len, (i + 1) - len); + flint_mpn_zero((nn_ptr) NF_ELEM(a)->coeffs + len, (i + 1) - len); } if (*NF_ELEM(a)->den == WORD(1)) diff --git a/src/nf_elem/test/main.c b/src/nf_elem/test/main.c index 851fff6006..e93bf0dc61 100644 --- a/src/nf_elem/test/main.c +++ b/src/nf_elem/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/nfloat.h b/src/nfloat.h index 0ed7fbf756..2c01f65403 100644 --- a/src/nfloat.h +++ b/src/nfloat.h @@ -42,10 +42,10 @@ extern "C" { /* Number of header limbs used to encode sign + exponent. We use a whole limb for the sign bit to avoid the overhead of bit fiddling. */ #define NFLOAT_HEADER_LIMBS 2 -#define NFLOAT_EXP(x) (((mp_limb_signed_t *) x)[0]) -#define NFLOAT_SGNBIT(x) (((mp_ptr) x)[1]) -#define NFLOAT_D(x) (((mp_ptr) x) + NFLOAT_HEADER_LIMBS) -#define NFLOAT_DATA(x) (((mp_ptr) x)) +#define NFLOAT_EXP(x) (((slong *) x)[0]) +#define NFLOAT_SGNBIT(x) (((nn_ptr) x)[1]) +#define NFLOAT_D(x) (((nn_ptr) x) + NFLOAT_HEADER_LIMBS) +#define NFLOAT_DATA(x) (((nn_ptr) x)) /* Limbs needed to hold a temporary element of any precision. */ #define NFLOAT_MAX_ALLOC (NFLOAT_HEADER_LIMBS + NFLOAT_MAX_LIMBS) @@ -96,14 +96,14 @@ typedef const void * nfloat_srcptr; #define NFLOAT_CTX_DATA_NLIMBS(ctx) (NFLOAT_CTX_NLIMBS(ctx) + NFLOAT_HEADER_LIMBS) #define NFLOAT_CTX_HAS_INF_NAN(ctx) ((NFLOAT_CTX_FLAGS(ctx) & (NFLOAT_ALLOW_INF | NFLOAT_ALLOW_NAN)) != 0) -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[64 / FLINT_BITS]; } nfloat64_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[128 / FLINT_BITS]; } nfloat128_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[192 / FLINT_BITS]; } nfloat192_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[256 / FLINT_BITS]; } nfloat256_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[384 / FLINT_BITS]; } nfloat384_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[512 / FLINT_BITS]; } nfloat512_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[1024 / FLINT_BITS]; } nfloat1024_struct; -typedef struct { mp_limb_t head[NFLOAT_HEADER_LIMBS]; mp_limb_t d[2048 / FLINT_BITS]; } nfloat2048_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[64 / FLINT_BITS]; } nfloat64_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[128 / FLINT_BITS]; } nfloat128_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[192 / FLINT_BITS]; } nfloat192_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[256 / FLINT_BITS]; } nfloat256_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[384 / FLINT_BITS]; } nfloat384_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[512 / FLINT_BITS]; } nfloat512_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[1024 / FLINT_BITS]; } nfloat1024_struct; +typedef struct { ulong head[NFLOAT_HEADER_LIMBS]; ulong d[2048 / FLINT_BITS]; } nfloat2048_struct; typedef nfloat64_struct nfloat64_t[1]; typedef nfloat128_struct nfloat128_t[1]; @@ -114,7 +114,7 @@ typedef nfloat512_struct nfloat512_t[1]; typedef nfloat1024_struct nfloat1024_t[1]; typedef nfloat2048_struct nfloat2048_t[1]; -#define LIMB_MSB_IS_SET(n) ((mp_limb_signed_t) (n) < 0) +#define LIMB_MSB_IS_SET(n) ((slong) (n) < 0) int nfloat_ctx_init(gr_ctx_t ctx, slong prec, int flags); int nfloat_ctx_write(gr_stream_t out, gr_ctx_t ctx); @@ -202,9 +202,9 @@ int nfloat_set_si(nfloat_ptr res, slong x, gr_ctx_t ctx); /* Here exp is understood such that {x, xn} is a fraction in [0, 1). */ NFLOAT_INLINE int -_nfloat_set_mpn_2exp(nfloat_ptr res, mp_srcptr x, mp_size_t xn, slong exp, int xsgnbit, gr_ctx_t ctx) +_nfloat_set_mpn_2exp(nfloat_ptr res, nn_srcptr x, slong xn, slong exp, int xsgnbit, gr_ctx_t ctx) { - mp_limb_t top; + ulong top; slong norm; slong nlimbs = NFLOAT_CTX_NLIMBS(ctx); @@ -251,7 +251,7 @@ _nfloat_set_mpn_2exp(nfloat_ptr res, mp_srcptr x, mp_size_t xn, slong exp, int x } NFLOAT_INLINE int -nfloat_set_mpn_2exp(nfloat_ptr res, mp_srcptr x, mp_size_t xn, slong exp, int xsgnbit, gr_ctx_t ctx) +nfloat_set_mpn_2exp(nfloat_ptr res, nn_srcptr x, slong xn, slong exp, int xsgnbit, gr_ctx_t ctx) { while (xn != 0 && x[xn - 1] == 0) { @@ -289,10 +289,10 @@ int nfloat_cmpabs(int * res, nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx); int nfloat_sgn(nfloat_ptr res, nfloat_srcptr x, gr_ctx_t ctx); int nfloat_im(nfloat_ptr res, nfloat_srcptr x, gr_ctx_t ctx); -int _nfloat_add_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx); -int _nfloat_sub_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx); -int _nfloat_add_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx); -int _nfloat_sub_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx); +int _nfloat_add_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx); +int _nfloat_sub_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx); +int _nfloat_add_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx); +int _nfloat_sub_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx); int nfloat_add(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx); int nfloat_sub(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx); diff --git a/src/nfloat/ctx.c b/src/nfloat/ctx.c index 2fc44e47b8..5a628e7cb2 100644 --- a/src/nfloat/ctx.c +++ b/src/nfloat/ctx.c @@ -187,7 +187,7 @@ nfloat_ctx_init(gr_ctx_t ctx, slong prec, int flags) nlimbs = (prec + FLINT_BITS - 1) / FLINT_BITS; ctx->which_ring = GR_CTX_NFLOAT; - ctx->sizeof_elem = sizeof(mp_limb_t) * (nlimbs + NFLOAT_HEADER_LIMBS); + ctx->sizeof_elem = sizeof(ulong) * (nlimbs + NFLOAT_HEADER_LIMBS); ctx->size_limit = WORD_MAX; NFLOAT_CTX_NLIMBS(ctx) = nlimbs; diff --git a/src/nfloat/dot.c b/src/nfloat/dot.c index 65b549924f..20a6f8255d 100644 --- a/src/nfloat/dot.c +++ b/src/nfloat/dot.c @@ -21,9 +21,9 @@ /* Experimental: use fast but inaccurate product? */ #define FLINT_MPN_MUL_2X2H(r3, r2, r1, a1, a0, b1, b0) \ do { \ - mp_limb_t __t1, __t2, __u1, __u2, __v3, __v2; \ - mp_limb_t __r3, __r2, __r1; \ - mp_limb_t __a1 = (a1), __a0 = (a0), __b1 = (b1), __b0 = (b0); \ + ulong __t1, __t2, __u1, __u2, __v3, __v2; \ + ulong __r3, __r2, __r1; \ + ulong __a1 = (a1), __a0 = (a0), __b1 = (b1), __b0 = (b0); \ umul_ppmm(__t2, __t1, __a0, __b1); \ umul_ppmm(__u2, __u1, __a1, __b0); \ add_sssaaaaaa(__r3, __r2, __r1, 0, __t2, __t1, 0, __u2, __u1); \ @@ -39,7 +39,7 @@ __nfloat_vec_dot(nfloat_ptr res, nfloat_srcptr initial, int subtract, nfloat_src { slong i, xexp, delta, sexp, norm, pad_bits; int xsgnbit; - mp_limb_t t1, t0, s1, s0; + ulong t1, t0, s1, s0; nfloat_srcptr xi, yi; int have_zeros = 0; @@ -198,8 +198,8 @@ __nfloat_vec_dot(nfloat_ptr res, nfloat_srcptr initial, int subtract, nfloat_src slong i, xexp, delta, sexp, norm, pad_bits; int xsgnbit; nfloat_srcptr xi, yi; - mp_limb_t s0, s1, s2; - mp_limb_t t0, t1, t2, t3; + ulong s0, s1, s2; + ulong t0, t1, t2, t3; s0 = s1 = s2 = 0; sexp = WORD_MIN; @@ -391,8 +391,8 @@ __nfloat_vec_dot(nfloat_ptr res, nfloat_srcptr initial, int subtract, nfloat_src { slong i, xexp, delta, sexp, norm, pad_bits; int xsgnbit; - mp_limb_t t[NFLOAT_MAX_LIMBS + 1]; - mp_limb_t s[NFLOAT_MAX_LIMBS + 1]; + ulong t[NFLOAT_MAX_LIMBS + 1]; + ulong s[NFLOAT_MAX_LIMBS + 1]; nfloat_srcptr xi, yi; slong n; diff --git a/src/nfloat/nfloat.c b/src/nfloat/nfloat.c index fb62ad511e..9d44810f40 100644 --- a/src/nfloat/nfloat.c +++ b/src/nfloat/nfloat.c @@ -54,7 +54,7 @@ nfloat_swap(nfloat_ptr x, nfloat_ptr y, gr_ctx_t ctx) slong i, n = NFLOAT_CTX_DATA_NLIMBS(ctx); for (i = 0; i < n; i++) - FLINT_SWAP(mp_limb_t, NFLOAT_DATA(x)[i], NFLOAT_DATA(y)[i]); + FLINT_SWAP(ulong, NFLOAT_DATA(x)[i], NFLOAT_DATA(y)[i]); } int @@ -74,7 +74,7 @@ _nfloat_vec_init(nfloat_ptr res, slong len, gr_ctx_t ctx) slong i, n = NFLOAT_CTX_DATA_NLIMBS(ctx); for (i = 0; i < len; i++) - nfloat_init((mp_ptr) res + i * n, ctx); + nfloat_init((nn_ptr) res + i * n, ctx); } void @@ -95,7 +95,7 @@ _nfloat_vec_zero(nfloat_ptr res, slong len, gr_ctx_t ctx) slong i, n = NFLOAT_CTX_DATA_NLIMBS(ctx); for (i = 0; i < len; i++) - nfloat_zero((mp_ptr) res + i * n, ctx); + nfloat_zero((nn_ptr) res + i * n, ctx); return GR_SUCCESS; } @@ -419,7 +419,7 @@ nfloat_set_arf(nfloat_ptr res, const arf_t x, gr_ctx_t ctx) else { slong exp, xn; - mp_srcptr xp; + nn_srcptr xp; int sgnbit; ARF_GET_MPN_READONLY(xp, xn, x); @@ -641,7 +641,7 @@ nfloat_im(nfloat_ptr res, nfloat_srcptr x, gr_ctx_t ctx) } static mp_limb_pair_t -_flint_mpn_mulhigh_normalised2(mp_ptr rp, mp_srcptr xp, mp_srcptr yp, mp_size_t n) +_flint_mpn_mulhigh_normalised2(nn_ptr rp, nn_srcptr xp, nn_srcptr yp, slong n) { mp_limb_pair_t ret; @@ -649,10 +649,10 @@ _flint_mpn_mulhigh_normalised2(mp_ptr rp, mp_srcptr xp, mp_srcptr yp, mp_size_t if (rp == xp || rp == yp) { - mp_ptr t; + nn_ptr t; TMP_INIT; TMP_START; - t = TMP_ALLOC(sizeof(mp_limb_t) * n); + t = TMP_ALLOC(sizeof(ulong) * n); ret = _flint_mpn_mulhigh_normalised2(t, xp, yp, n); flint_mpn_copyi(rp, t, n); TMP_END; @@ -681,7 +681,7 @@ _flint_mpn_mulhigh_normalised2(mp_ptr rp, mp_srcptr xp, mp_srcptr yp, mp_size_t /* handles aliasing */ FLINT_FORCE_INLINE -mp_limb_pair_t flint_mpn_mulhigh_normalised2(mp_ptr rp, mp_srcptr xp, mp_srcptr yp, mp_size_t n) +mp_limb_pair_t flint_mpn_mulhigh_normalised2(nn_ptr rp, nn_srcptr xp, nn_srcptr yp, slong n) { FLINT_ASSERT(n >= 1); @@ -707,9 +707,9 @@ n_signed_sub(ulong * r, ulong x, ulong y) } int -_nfloat_add_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx) +_nfloat_add_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx) { - mp_limb_t hi, lo; + ulong hi, lo; NFLOAT_SGNBIT(res) = xsgnbit; @@ -740,9 +740,9 @@ _nfloat_add_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y } int -_nfloat_add_2(nfloat_ptr res, mp_srcptr x, slong xexp, int xsgnbit, mp_srcptr y, slong delta, gr_ctx_t ctx) +_nfloat_add_2(nfloat_ptr res, nn_srcptr x, slong xexp, int xsgnbit, nn_srcptr y, slong delta, gr_ctx_t ctx) { - mp_limb_t x1, x0, y1, y0, s2, s1, s0; + ulong x1, x0, y1, y0, s2, s1, s0; NFLOAT_SGNBIT(res) = xsgnbit; @@ -787,9 +787,9 @@ _nfloat_add_2(nfloat_ptr res, mp_srcptr x, slong xexp, int xsgnbit, mp_srcptr y, } int -_nfloat_sub_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y0, slong delta, gr_ctx_t ctx) +_nfloat_sub_1(nfloat_ptr res, ulong x0, slong xexp, int xsgnbit, ulong y0, slong delta, gr_ctx_t ctx) { - mp_limb_t u; + ulong u; slong norm; if (delta == 0) @@ -820,9 +820,9 @@ _nfloat_sub_1(nfloat_ptr res, mp_limb_t x0, slong xexp, int xsgnbit, mp_limb_t y } int -_nfloat_sub_2(nfloat_ptr res, mp_srcptr x, slong xexp, int xsgnbit, mp_srcptr y, slong delta, gr_ctx_t ctx) +_nfloat_sub_2(nfloat_ptr res, nn_srcptr x, slong xexp, int xsgnbit, nn_srcptr y, slong delta, gr_ctx_t ctx) { - mp_limb_t x1, x0, y1, y0, s1, s0; + ulong x1, x0, y1, y0, s1, s0; slong norm; NFLOAT_SGNBIT(res) = xsgnbit; @@ -899,11 +899,11 @@ _nfloat_sub_2(nfloat_ptr res, mp_srcptr x, slong xexp, int xsgnbit, mp_srcptr y, } int -_nfloat_add_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) +_nfloat_add_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) { slong shift_limbs, shift_bits; - mp_limb_t cy; - mp_limb_t t[NFLOAT_MAX_LIMBS]; + ulong cy; + ulong t[NFLOAT_MAX_LIMBS]; NFLOAT_SGNBIT(res) = xsgnbit; @@ -948,10 +948,10 @@ _nfloat_add_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr y } int -_nfloat_sub_n(nfloat_ptr res, mp_srcptr xd, slong xexp, int xsgnbit, mp_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) +_nfloat_sub_n(nfloat_ptr res, nn_srcptr xd, slong xexp, int xsgnbit, nn_srcptr yd, slong delta, slong nlimbs, gr_ctx_t ctx) { slong shift_limbs, shift_bits, n, norm; - mp_limb_t t[NFLOAT_MAX_LIMBS]; + ulong t[NFLOAT_MAX_LIMBS]; if (delta == 0) { @@ -1142,9 +1142,9 @@ _nfloat_vec_aors_1(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, int subtrac for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); yexp = NFLOAT_EXP(yi); @@ -1201,9 +1201,9 @@ _nfloat_vec_aors_2(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, int subtrac for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); yexp = NFLOAT_EXP(yi); @@ -1268,9 +1268,9 @@ _nfloat_vec_add(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, gr_ for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; status |= nfloat_add(ri, xi, yi, ctx); } @@ -1300,9 +1300,9 @@ _nfloat_vec_sub(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, gr_ for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; status |= nfloat_sub(ri, xi, yi, ctx); } @@ -1336,7 +1336,7 @@ nfloat_mul(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx) if (nlimbs == 1) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, NFLOAT_D(x)[0], NFLOAT_D(y)[0]); @@ -1353,7 +1353,7 @@ nfloat_mul(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, gr_ctx_t ctx) } else if (nlimbs == 2) { - mp_limb_t r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); + ulong r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); FLINT_MPN_MUL_2X2(r3, r2, r1, r0, NFLOAT_D(x)[1], NFLOAT_D(x)[0], NFLOAT_D(y)[1], NFLOAT_D(y)[0]); @@ -1389,7 +1389,7 @@ _nfloat_vec_mul_1(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, g slong xexp, yexp; nfloat_srcptr xi, yi; nfloat_ptr ri; - mp_limb_t hi, lo; + ulong hi, lo; int xsgnbit, ysgnbit; int status = GR_SUCCESS; @@ -1397,9 +1397,9 @@ _nfloat_vec_mul_1(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, g for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); yexp = NFLOAT_EXP(yi); @@ -1445,7 +1445,7 @@ _nfloat_vec_mul_2(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, g slong xexp, yexp; nfloat_srcptr xi, yi; nfloat_ptr ri; - mp_limb_t r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); + ulong r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); int xsgnbit, ysgnbit; int status = GR_SUCCESS; @@ -1453,9 +1453,9 @@ _nfloat_vec_mul_2(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, g for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); yexp = NFLOAT_EXP(yi); @@ -1518,9 +1518,9 @@ _nfloat_vec_mul(nfloat_ptr res, nfloat_srcptr x, nfloat_srcptr y, slong len, gr_ for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - yi = (mp_srcptr) y + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + yi = (nn_srcptr) y + i * stride; + ri = (nn_ptr) res + i * stride; status |= nfloat_mul(ri, xi, yi, ctx); } @@ -1535,7 +1535,7 @@ _nfloat_vec_mul_scalar_1(nfloat_ptr res, nfloat_srcptr x, slong len, nfloat_srcp slong xexp, yexp; nfloat_srcptr xi; nfloat_ptr ri; - mp_limb_t y0, hi, lo; + ulong y0, hi, lo; int xsgnbit, ysgnbit; int status = GR_SUCCESS; @@ -1550,8 +1550,8 @@ _nfloat_vec_mul_scalar_1(nfloat_ptr res, nfloat_srcptr x, slong len, nfloat_srcp for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); @@ -1595,8 +1595,8 @@ _nfloat_vec_mul_scalar_2(nfloat_ptr res, nfloat_srcptr x, slong len, nfloat_srcp slong xexp, yexp; nfloat_srcptr xi; nfloat_ptr ri; - mp_limb_t y0, y1; - mp_limb_t r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); + ulong y0, y1; + ulong r3, r2, r1, FLINT_SET_BUT_UNUSED(r0); int xsgnbit, ysgnbit; int status = GR_SUCCESS; @@ -1612,8 +1612,8 @@ _nfloat_vec_mul_scalar_2(nfloat_ptr res, nfloat_srcptr x, slong len, nfloat_srcp for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + ri = (nn_ptr) res + i * stride; xexp = NFLOAT_EXP(xi); @@ -1674,8 +1674,8 @@ _nfloat_vec_mul_scalar(nfloat_ptr res, nfloat_srcptr x, slong len, nfloat_srcptr for (i = 0; i < len; i++) { - xi = (mp_srcptr) x + i * stride; - ri = (mp_ptr) res + i * stride; + xi = (nn_srcptr) x + i * stride; + ri = (nn_ptr) res + i * stride; status |= nfloat_mul(ri, xi, y, ctx); } diff --git a/src/nfloat/test/main.c b/src/nfloat/test/main.c index 2bce47d042..328cfa3dda 100644 --- a/src/nfloat/test/main.c +++ b/src/nfloat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-nfloat.c" diff --git a/src/nmod.h b/src/nmod.h index 7bb5d5bf29..13f61f7bc0 100644 --- a/src/nmod.h +++ b/src/nmod.h @@ -28,10 +28,10 @@ extern "C" { #define NMOD_RED2(r, a_hi, a_lo, mod) \ do { \ - mp_limb_t q0xx, q1xx, r1xx; \ - const mp_limb_t u1xx = ((a_hi)<<(mod).norm) + r_shift((a_lo), FLINT_BITS - (mod).norm); \ - const mp_limb_t u0xx = ((a_lo)<<(mod).norm); \ - const mp_limb_t nxx = ((mod).n<<(mod).norm); \ + ulong q0xx, q1xx, r1xx; \ + const ulong u1xx = ((a_hi)<<(mod).norm) + r_shift((a_lo), FLINT_BITS - (mod).norm); \ + const ulong u0xx = ((a_lo)<<(mod).norm); \ + const ulong nxx = ((mod).n<<(mod).norm); \ umul_ppmm(q1xx, q0xx, (mod).ninv, u1xx); \ add_ssaaaa(q1xx, q0xx, q1xx, q0xx, u1xx, u0xx); \ r1xx = (u0xx - (q1xx + 1)*nxx); \ @@ -47,14 +47,14 @@ extern "C" { #define NMOD2_RED2(r, a_hi, a_lo, mod) \ do { \ - mp_limb_t v_hi; \ + ulong v_hi; \ NMOD_RED(v_hi, a_hi, mod); \ NMOD_RED2(r, v_hi, a_lo, mod); \ } while (0) #define NMOD_RED3(r, a_hi, a_me, a_lo, mod) \ do { \ - mp_limb_t v_hi; \ + ulong v_hi; \ NMOD_RED2(v_hi, a_hi, a_me, mod); \ NMOD_RED2(r, v_hi, a_lo, mod); \ } while (0) @@ -64,8 +64,8 @@ extern "C" { #define NMOD_MUL_PRENORM(res, a, b, mod) \ do { \ - mp_limb_t q0xx, q1xx, rxx, p_hixx, p_loxx; \ - mp_limb_t nxx, ninvxx; \ + ulong q0xx, q1xx, rxx, p_hixx, p_loxx; \ + ulong nxx, ninvxx; \ unsigned int normxx; \ ninvxx = (mod).ninv; \ normxx = (mod).norm; \ @@ -82,8 +82,8 @@ extern "C" { #define NMOD_MUL_FULLWORD(res, a, b, mod) \ do { \ - mp_limb_t q0xx, q1xx, rxx, p_hixx, p_loxx; \ - mp_limb_t nxx, ninvxx; \ + ulong q0xx, q1xx, rxx, p_hixx, p_loxx; \ + ulong nxx, ninvxx; \ ninvxx = (mod).ninv; \ nxx = (mod).n; \ umul_ppmm(p_hixx, p_loxx, (a), (b)); \ @@ -96,7 +96,7 @@ extern "C" { (res) = rxx; \ } while (0) -NMOD_INLINE mp_limb_t nmod_set_ui(ulong x, nmod_t mod) +NMOD_INLINE ulong nmod_set_ui(ulong x, nmod_t mod) { if (x < mod.n) return x; @@ -106,7 +106,7 @@ NMOD_INLINE mp_limb_t nmod_set_ui(ulong x, nmod_t mod) } NMOD_INLINE -mp_limb_t nmod_set_si(slong x, nmod_t mod) +ulong nmod_set_si(slong x, nmod_t mod) { ulong res = FLINT_ABS(x); NMOD_RED(res, res, mod); @@ -114,23 +114,23 @@ mp_limb_t nmod_set_si(slong x, nmod_t mod) } NMOD_INLINE -mp_limb_t _nmod_add(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong _nmod_add(ulong a, ulong b, nmod_t mod) { - const mp_limb_t sum = a + b; - return sum - mod.n + ((((mp_limb_signed_t)(sum - mod.n))>>(FLINT_BITS - 1)) & mod.n); + const ulong sum = a + b; + return sum - mod.n + ((((slong)(sum - mod.n))>>(FLINT_BITS - 1)) & mod.n); } NMOD_INLINE -mp_limb_t _nmod_sub(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong _nmod_sub(ulong a, ulong b, nmod_t mod) { - const mp_limb_t diff = a - b; - return ((((mp_limb_signed_t)diff)>>(FLINT_BITS - 1)) & mod.n) + diff; + const ulong diff = a - b; + return ((((slong)diff)>>(FLINT_BITS - 1)) & mod.n) + diff; } NMOD_INLINE -mp_limb_t nmod_add(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong nmod_add(ulong a, ulong b, nmod_t mod) { - const mp_limb_t neg = mod.n - a; + const ulong neg = mod.n - a; if (neg > b) return a + b; else @@ -138,9 +138,9 @@ mp_limb_t nmod_add(mp_limb_t a, mp_limb_t b, nmod_t mod) } NMOD_INLINE -mp_limb_t nmod_sub(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong nmod_sub(ulong a, ulong b, nmod_t mod) { - const mp_limb_t diff = a - b; + const ulong diff = a - b; if (a < b) return mod.n + diff; @@ -149,7 +149,7 @@ mp_limb_t nmod_sub(mp_limb_t a, mp_limb_t b, nmod_t mod) } NMOD_INLINE -mp_limb_t nmod_neg(mp_limb_t a, nmod_t mod) +ulong nmod_neg(ulong a, nmod_t mod) { if (a) return mod.n - a; @@ -158,23 +158,23 @@ mp_limb_t nmod_neg(mp_limb_t a, nmod_t mod) } NMOD_INLINE -mp_limb_t nmod_mul(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong nmod_mul(ulong a, ulong b, nmod_t mod) { - mp_limb_t res; + ulong res; NMOD_MUL_PRENORM(res, a, b << mod.norm, mod); return res; } NMOD_INLINE -mp_limb_t _nmod_mul_fullword(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong _nmod_mul_fullword(ulong a, ulong b, nmod_t mod) { - mp_limb_t res; + ulong res; NMOD_MUL_FULLWORD(res, a, b, mod); return res; } NMOD_INLINE -mp_limb_t nmod_addmul(mp_limb_t a, mp_limb_t b, mp_limb_t c, nmod_t mod) +ulong nmod_addmul(ulong a, ulong b, ulong c, nmod_t mod) { return nmod_add(a, nmod_mul(b, c, mod), mod); } @@ -185,33 +185,33 @@ mp_limb_t nmod_addmul(mp_limb_t a, mp_limb_t b, mp_limb_t c, nmod_t mod) } while (0) NMOD_INLINE -mp_limb_t nmod_inv(mp_limb_t a, nmod_t mod) +ulong nmod_inv(ulong a, nmod_t mod) { return n_invmod(a, mod.n); } NMOD_INLINE -mp_limb_t nmod_div(mp_limb_t a, mp_limb_t b, nmod_t mod) +ulong nmod_div(ulong a, ulong b, nmod_t mod) { return nmod_mul(a, n_invmod(b, mod.n), mod); } -int nmod_divides(mp_limb_t * a, mp_limb_t b, mp_limb_t c, nmod_t mod); +int nmod_divides(ulong * a, ulong b, ulong c, nmod_t mod); NMOD_INLINE -mp_limb_t nmod_pow_ui(mp_limb_t a, ulong exp, nmod_t mod) +ulong nmod_pow_ui(ulong a, ulong exp, nmod_t mod) { return n_powmod2_ui_preinv(a, exp, mod.n, mod.ninv); } NMOD_INLINE -mp_limb_t nmod_pow_fmpz(mp_limb_t a, const fmpz_t exp, nmod_t mod) +ulong nmod_pow_fmpz(ulong a, const fmpz_t exp, nmod_t mod) { return n_powmod2_fmpz_preinv(a, exp, mod.n, mod.ninv); } NMOD_INLINE -void nmod_init(nmod_t * mod, mp_limb_t n) +void nmod_init(nmod_t * mod, ulong n) { mod->n = n; mod->ninv = n_preinvert_limb(n); @@ -221,16 +221,16 @@ void nmod_init(nmod_t * mod, mp_limb_t n) /* discrete logs a la Pohlig - Hellman ***************************************/ typedef struct { - mp_limb_t gammapow; + ulong gammapow; ulong cm; } nmod_discrete_log_pohlig_hellman_table_entry_struct; typedef struct { slong exp; ulong prime; - mp_limb_t gamma; - mp_limb_t gammainv; - mp_limb_t startingbeta; + ulong gamma; + ulong gammainv; + ulong startingbeta; ulong co; ulong startinge; ulong idem; @@ -241,8 +241,8 @@ typedef struct { typedef struct { nmod_t mod; /* p is mod.n */ - mp_limb_t alpha; /* p.r. of p */ - mp_limb_t alphainv; + ulong alpha; /* p.r. of p */ + ulong alphainv; slong num_factors; /* factors of p - 1*/ nmod_discrete_log_pohlig_hellman_entry_struct * entries; } nmod_discrete_log_pohlig_hellman_struct; @@ -257,13 +257,13 @@ void nmod_discrete_log_pohlig_hellman_clear( double nmod_discrete_log_pohlig_hellman_precompute_prime( nmod_discrete_log_pohlig_hellman_t L, - mp_limb_t p); + ulong p); ulong nmod_discrete_log_pohlig_hellman_run( const nmod_discrete_log_pohlig_hellman_t L, - mp_limb_t y); + ulong y); -NMOD_INLINE mp_limb_t nmod_discrete_log_pohlig_hellman_primitive_root( +NMOD_INLINE ulong nmod_discrete_log_pohlig_hellman_primitive_root( const nmod_discrete_log_pohlig_hellman_t L) { return L->alpha; diff --git a/src/nmod/divides.c b/src/nmod/divides.c index 4172cecb7e..cab18566b8 100644 --- a/src/nmod/divides.c +++ b/src/nmod/divides.c @@ -13,7 +13,7 @@ #include "nmod.h" #include "ulong_extras.h" -int nmod_divides(mp_limb_t * a, mp_limb_t b, mp_limb_t c, nmod_t mod) +int nmod_divides(ulong * a, ulong b, ulong c, nmod_t mod) { int success; ulong g, x, y, q; diff --git a/src/nmod/test/main.c b/src/nmod/test/main.c index c64084bde8..8562f910c2 100644 --- a/src/nmod/test/main.c +++ b/src/nmod/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-divides.c" diff --git a/src/nmod/test/t-divides.c b/src/nmod/test/t-divides.c index e284f775d3..843af261fd 100644 --- a/src/nmod/test/t-divides.c +++ b/src/nmod/test/t-divides.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(nmod_divides, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t n, x, y, xy, z; + ulong n, x, y, xy, z; int div; n = n_randtest_not_zero(state); diff --git a/src/nmod_mat.h b/src/nmod_mat.h index 629cc50541..d0f185c228 100644 --- a/src/nmod_mat.h +++ b/src/nmod_mat.h @@ -29,13 +29,13 @@ extern "C" { #define nmod_mat_entry(mat,i,j) ((mat)->rows[(i)][(j)]) NMOD_MAT_INLINE -mp_limb_t nmod_mat_get_entry(const nmod_mat_t mat, slong i, slong j) +ulong nmod_mat_get_entry(const nmod_mat_t mat, slong i, slong j) { return mat->rows[i][j]; } NMOD_MAT_INLINE -mp_limb_t * nmod_mat_entry_ptr(const nmod_mat_t mat, slong i, slong j) +ulong * nmod_mat_entry_ptr(const nmod_mat_t mat, slong i, slong j) { return mat->rows[i] + j; } @@ -55,10 +55,10 @@ slong nmod_mat_ncols(const nmod_mat_t mat) } /* TODO: Document */ -void nmod_mat_set_mod(nmod_mat_t mat, mp_limb_t n); +void nmod_mat_set_mod(nmod_mat_t mat, ulong n); /* Memory management */ -void nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, mp_limb_t n); +void nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, ulong n); void nmod_mat_init_set(nmod_mat_t mat, const nmod_mat_t src); void nmod_mat_clear(nmod_mat_t mat); void nmod_mat_one(nmod_mat_t mat); @@ -71,10 +71,10 @@ nmod_mat_swap_entrywise(nmod_mat_t mat1, nmod_mat_t mat2) slong i, j; for (i = 0; i < nmod_mat_nrows(mat1); i++) { - mp_limb_t * row1 = mat1->rows[i]; - mp_limb_t * row2 = mat2->rows[i]; + ulong * row1 = mat1->rows[i]; + ulong * row2 = mat2->rows[i]; for (j = 0; j < nmod_mat_ncols(mat1); j++) - FLINT_SWAP(mp_limb_t, row1[j], row2[j]); + FLINT_SWAP(ulong, row1[j], row2[j]); } } @@ -92,7 +92,7 @@ void nmod_mat_concat_vertical(nmod_mat_t res, void nmod_mat_randtest(nmod_mat_t mat, flint_rand_t state); void nmod_mat_randfull(nmod_mat_t mat, flint_rand_t state); int nmod_mat_randpermdiag(nmod_mat_t mat, flint_rand_t state, - mp_srcptr diag, slong n); + nn_srcptr diag, slong n); void nmod_mat_randrank(nmod_mat_t, flint_rand_t state, slong rank); void nmod_mat_randops(nmod_mat_t mat, flint_rand_t state, slong count); void nmod_mat_randtril(nmod_mat_t mat, flint_rand_t state, int unit); @@ -138,9 +138,9 @@ void nmod_mat_neg(nmod_mat_t B, const nmod_mat_t A); /* Matrix-scalar arithmetic */ -void nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, mp_limb_t c); +void nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, ulong c); void nmod_mat_scalar_addmul_ui(nmod_mat_t dest, - const nmod_mat_t X, const nmod_mat_t Y, const mp_limb_t b); + const nmod_mat_t X, const nmod_mat_t Y, const ulong b); void nmod_mat_scalar_mul_fmpz(nmod_mat_t res, const nmod_mat_t M, const fmpz_t c); @@ -171,17 +171,17 @@ void nmod_mat_addmul(nmod_mat_t D, const nmod_mat_t C, void nmod_mat_submul(nmod_mat_t D, const nmod_mat_t C, const nmod_mat_t A, const nmod_mat_t B); -void nmod_mat_mul_nmod_vec(mp_limb_t * c, const nmod_mat_t A, - const mp_limb_t * b, slong blen); +void nmod_mat_mul_nmod_vec(ulong * c, const nmod_mat_t A, + const ulong * b, slong blen); -void nmod_mat_mul_nmod_vec_ptr(mp_limb_t * const * c, - const nmod_mat_t A, const mp_limb_t * const * b, slong blen); +void nmod_mat_mul_nmod_vec_ptr(ulong * const * c, + const nmod_mat_t A, const ulong * const * b, slong blen); -void nmod_mat_nmod_vec_mul(mp_limb_t * c, const mp_limb_t * a, +void nmod_mat_nmod_vec_mul(ulong * c, const ulong * a, slong alen, const nmod_mat_t B); -void nmod_mat_nmod_vec_mul_ptr(mp_limb_t * const * c, - const mp_limb_t * const * a, slong alen, const nmod_mat_t B); +void nmod_mat_nmod_vec_mul_ptr(ulong * const * c, + const ulong * const * a, slong alen, const nmod_mat_t B); /* Exponent */ @@ -190,15 +190,15 @@ void nmod_mat_pow(nmod_mat_t dest, const nmod_mat_t mat, ulong pow); /* Trace */ -mp_limb_t nmod_mat_trace(const nmod_mat_t mat); +ulong nmod_mat_trace(const nmod_mat_t mat); /* Determinant */ -mp_limb_t _nmod_mat_det(nmod_mat_t A); -mp_limb_t nmod_mat_det(const nmod_mat_t A); +ulong _nmod_mat_det(nmod_mat_t A); +ulong nmod_mat_det(const nmod_mat_t A); -mp_limb_t _nmod_mat_det_howell(nmod_mat_t A); -mp_limb_t nmod_mat_det_howell(const nmod_mat_t A); +ulong _nmod_mat_det_howell(nmod_mat_t A); +ulong nmod_mat_det_howell(const nmod_mat_t A); /* Rank */ @@ -218,7 +218,7 @@ void nmod_mat_swap_rows(nmod_mat_t mat, slong * perm, slong r, slong s) if (perm) FLINT_SWAP(slong, perm[r], perm[s]); - FLINT_SWAP(mp_ptr, mat->rows[r], mat->rows[s]); + FLINT_SWAP(nn_ptr, mat->rows[r], mat->rows[s]); } } @@ -242,7 +242,7 @@ void nmod_mat_swap_cols(nmod_mat_t mat, slong * perm, slong r, slong s) FLINT_SWAP(slong, perm[r], perm[s]); for (i = 0; i < mat->r; i++) - FLINT_SWAP(mp_limb_t, mat->rows[i][r], mat->rows[i][s]); + FLINT_SWAP(ulong, mat->rows[i][r], mat->rows[i][s]); } } @@ -261,7 +261,7 @@ void nmod_mat_invert_cols(nmod_mat_t mat, slong * perm) for (t = 0; t < mat->r; t++) for (i = 0; i < k; i++) - FLINT_SWAP(mp_limb_t, mat->rows[t][i], mat->rows[t][c - i - 1]); + FLINT_SWAP(ulong, mat->rows[t][i], mat->rows[t][c - i - 1]); } } @@ -287,7 +287,7 @@ slong nmod_mat_lu_recursive(slong * P, nmod_mat_t A, int rank_check); /* Nonsingular solving */ int nmod_mat_solve(nmod_mat_t X, const nmod_mat_t A, const nmod_mat_t B); -int nmod_mat_solve_vec(mp_ptr x, const nmod_mat_t A, mp_srcptr b); +int nmod_mat_solve_vec(nn_ptr x, const nmod_mat_t A, nn_srcptr b); /* Solving */ @@ -351,7 +351,7 @@ void nmod_mat_similarity(nmod_mat_t M, slong r, ulong d); /* Inlines *******************************************************************/ -void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, mp_limb_t x); +void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, ulong x); #ifdef __cplusplus } diff --git a/src/nmod_mat/charpoly.c b/src/nmod_mat/charpoly.c index 75d78396d9..c5f870d4f9 100644 --- a/src/nmod_mat/charpoly.c +++ b/src/nmod_mat/charpoly.c @@ -16,7 +16,7 @@ #include "nmod_poly.h" void -_nmod_mat_charpoly_berkowitz(mp_ptr cp, const nmod_mat_t mat, nmod_t mod) +_nmod_mat_charpoly_berkowitz(nn_ptr cp, const nmod_mat_t mat, nmod_t mod) { const slong n = mat->r; @@ -44,12 +44,12 @@ _nmod_mat_charpoly_berkowitz(mp_ptr cp, const nmod_mat_t mat, nmod_t mod) else { slong i, k, t; - mp_ptr a, A, s; + nn_ptr a, A, s; int nlimbs; TMP_INIT; TMP_START; - a = TMP_ALLOC(sizeof(mp_limb_t) * (n * n)); + a = TMP_ALLOC(sizeof(ulong) * (n * n)); A = a + (n - 1) * n; nlimbs = _nmod_vec_dot_bound_limbs(n, mod); diff --git a/src/nmod_mat/det.c b/src/nmod_mat/det.c index ebcb9c3d37..97439c9c73 100644 --- a/src/nmod_mat/det.c +++ b/src/nmod_mat/det.c @@ -14,19 +14,19 @@ #include "nmod_poly.h" #include "perm.h" -static mp_limb_t -_nmod_mat_det_2x2(mp_limb_t a, mp_limb_t b, mp_limb_t c, mp_limb_t d, nmod_t mod) +static ulong +_nmod_mat_det_2x2(ulong a, ulong b, ulong c, ulong d, nmod_t mod) { b = nmod_neg(b, mod); return nmod_addmul(nmod_mul(a, d, mod), b, c, mod); } -static mp_limb_t -_nmod_mat_det_3x3(mp_limb_t a, mp_limb_t b, mp_limb_t c, - mp_limb_t d, mp_limb_t e, mp_limb_t f, - mp_limb_t g, mp_limb_t h, mp_limb_t i, nmod_t mod) +static ulong +_nmod_mat_det_3x3(ulong a, ulong b, ulong c, + ulong d, ulong e, ulong f, + ulong g, ulong h, ulong i, nmod_t mod) { - mp_limb_t s, t, u; + ulong s, t, u; s = _nmod_mat_det_2x2(e, f, h, i, mod); t = _nmod_mat_det_2x2(g, i, d, f, mod); @@ -39,10 +39,10 @@ _nmod_mat_det_3x3(mp_limb_t a, mp_limb_t b, mp_limb_t c, return s; } -static mp_limb_t -_nmod_mat_det_4x4(mp_limb_t ** const mat, nmod_t mod) +static ulong +_nmod_mat_det_4x4(ulong ** const mat, nmod_t mod) { - mp_limb_t s, t, u, v; + ulong s, t, u, v; s = _nmod_mat_det_3x3(mat[1][1], mat[1][2], mat[1][3], mat[2][1], mat[2][2], mat[2][3], @@ -71,10 +71,10 @@ _nmod_mat_det_4x4(mp_limb_t ** const mat, nmod_t mod) return s; } -mp_limb_t +ulong _nmod_mat_det(nmod_mat_t A) { - mp_limb_t det; + ulong det; slong * P; slong m = A->r; @@ -101,11 +101,11 @@ _nmod_mat_det(nmod_mat_t A) return det; } -mp_limb_t +ulong nmod_mat_det(const nmod_mat_t A) { nmod_mat_t tmp; - mp_limb_t det; + ulong det; slong dim = A->r; if (dim != A->c) @@ -130,7 +130,7 @@ nmod_mat_det(const nmod_mat_t A) if (dim <= 8) { - mp_limb_t cp[9]; + ulong cp[9]; _nmod_mat_charpoly_berkowitz(cp, A, A->mod); if (dim % 2) return nmod_neg(cp[0], A->mod); diff --git a/src/nmod_mat/det_howell.c b/src/nmod_mat/det_howell.c index b900e55c09..51e80de0cf 100644 --- a/src/nmod_mat/det_howell.c +++ b/src/nmod_mat/det_howell.c @@ -25,10 +25,10 @@ Find s, t such that g = s*a - t*b is the gcd of a and b mod n and where s is a unit mod n. Assumes a and b are reduced mod n and no aliasing. */ -static inline mp_limb_t -_nmod_xgcd_unit(mp_limb_t * s, mp_limb_t * t, mp_limb_t a, mp_limb_t b, nmod_t mod) +static inline ulong +_nmod_xgcd_unit(ulong * s, ulong * t, ulong a, ulong b, nmod_t mod) { - mp_limb_t g, ag, bg; + ulong g, ag, bg; if (a >= b) g = n_xgcd(s, t, a, b); @@ -55,7 +55,7 @@ static inline int _nmod_mat_pivot(nmod_mat_t A, slong start_row, slong col) { slong j; - mp_ptr u; + nn_ptr u; if (nmod_mat_entry(A, start_row, col) != 0) return 1; @@ -76,9 +76,9 @@ _nmod_mat_pivot(nmod_mat_t A, slong start_row, slong col) /* test whether q*a = b mod N has a solution */ static int -_n_is_divisible(mp_ptr q, mp_limb_t b, mp_limb_t a, nmod_t N) +_n_is_divisible(nn_ptr q, ulong b, ulong a, nmod_t N) { - mp_limb_t e, g; + ulong e, g; g = n_gcdinv(&e, a, N.n); if (( b % g ) == 0) @@ -90,9 +90,9 @@ _n_is_divisible(mp_ptr q, mp_limb_t b, mp_limb_t a, nmod_t N) return 0; } -mp_limb_t _nmod_mat_det_howell(nmod_mat_t A) +ulong _nmod_mat_det_howell(nmod_mat_t A) { - mp_limb_t s, t, t1, det = 1, unit = 1; + ulong s, t, t1, det = 1, unit = 1; slong m, n, row, col, i, k; nmod_t mod = A->mod; @@ -158,11 +158,11 @@ mp_limb_t _nmod_mat_det_howell(nmod_mat_t A) return nmod_mul(det, unit, mod); } -mp_limb_t +ulong nmod_mat_det_howell(const nmod_mat_t A) { nmod_mat_t tmp; - mp_limb_t det; + ulong det; slong dim = A->r; if (dim != A->c) diff --git a/src/nmod_mat/init.c b/src/nmod_mat/init.c index abd87ed1ba..8f327857e8 100644 --- a/src/nmod_mat/init.c +++ b/src/nmod_mat/init.c @@ -14,18 +14,18 @@ #include "nmod_mat.h" void -nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, mp_limb_t n) +nmod_mat_init(nmod_mat_t mat, slong rows, slong cols, ulong n) { slong i; if (rows != 0) - mat->rows = (mp_limb_t **) flint_malloc(rows * sizeof(mp_limb_t *)); + mat->rows = (ulong **) flint_malloc(rows * sizeof(ulong *)); else mat->rows = NULL; if (rows != 0 && cols != 0) { - mat->entries = (mp_limb_t *) flint_calloc(flint_mul_sizes(rows, cols), sizeof(mp_limb_t)); + mat->entries = (ulong *) flint_calloc(flint_mul_sizes(rows, cols), sizeof(ulong)); for (i = 0; i < rows; i++) mat->rows[i] = mat->entries + i * cols; @@ -54,13 +54,13 @@ nmod_mat_init_set(nmod_mat_t mat, const nmod_mat_t src) slong i; if (rows != 0) - mat->rows = flint_malloc(rows * sizeof(mp_limb_t *)); + mat->rows = flint_malloc(rows * sizeof(ulong *)); else mat->rows = NULL; if ((rows) && (cols)) { - mat->entries = flint_malloc(flint_mul_sizes(rows, cols) * sizeof(mp_limb_t)); + mat->entries = flint_malloc(flint_mul_sizes(rows, cols) * sizeof(ulong)); for (i = 0; i < rows; i++) { diff --git a/src/nmod_mat/inlines.c b/src/nmod_mat/inlines.c index 3c99d2004d..651bda5757 100644 --- a/src/nmod_mat/inlines.c +++ b/src/nmod_mat/inlines.c @@ -14,7 +14,7 @@ #include "nmod_mat.h" -void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, mp_limb_t x) +void nmod_mat_set_entry(nmod_mat_t mat, slong i, slong j, ulong x) { nmod_mat_entry(mat, i, j) = x; } diff --git a/src/nmod_mat/lu_classical.c b/src/nmod_mat/lu_classical.c index a7d3a16cdb..f351961c95 100644 --- a/src/nmod_mat/lu_classical.c +++ b/src/nmod_mat/lu_classical.c @@ -17,7 +17,7 @@ static inline int nmod_mat_pivot(nmod_mat_t A, slong * P, slong start_row, slong col) { slong j, t; - mp_ptr u; + nn_ptr u; if (nmod_mat_entry(A, start_row, col) != 0) return 1; @@ -44,7 +44,7 @@ nmod_mat_pivot(nmod_mat_t A, slong * P, slong start_row, slong col) slong nmod_mat_lu_classical(slong * P, nmod_mat_t A, int rank_check) { - mp_limb_t d, e, **a; + ulong d, e, **a; nmod_t mod; slong i, m, n, rank, length, row, col; diff --git a/src/nmod_mat/lu_classical_delayed.c b/src/nmod_mat/lu_classical_delayed.c index 899b381a25..9f9bd52bc6 100644 --- a/src/nmod_mat/lu_classical_delayed.c +++ b/src/nmod_mat/lu_classical_delayed.c @@ -13,7 +13,7 @@ #include "nmod_vec.h" #include "nmod_mat.h" -static mp_limb_t +static ulong nmod_set_uiuiui(ulong s2, ulong s1, ulong s0, nmod_t mod) { NMOD_RED(s2, s2, mod); @@ -24,10 +24,10 @@ nmod_set_uiuiui(ulong s2, ulong s1, ulong s0, nmod_t mod) slong nmod_mat_lu_classical_delayed_1(slong * P, nmod_mat_t A, int rank_check) { - mp_limb_t d, e, f, **a; + ulong d, e, f, **a; nmod_t mod; slong i, j, nrows, ncols, rank, row, col, pivot_row, tmp_index; - mp_ptr tmp_ptr; + nn_ptr tnn_ptr; nrows = A->r; ncols = A->c; @@ -72,9 +72,9 @@ nmod_mat_lu_classical_delayed_1(slong * P, nmod_mat_t A, int rank_check) /* swap rows */ if (pivot_row != row) { - tmp_ptr = a[pivot_row]; + tnn_ptr = a[pivot_row]; a[pivot_row] = a[row]; - a[row] = tmp_ptr; + a[row] = tnn_ptr; tmp_index = P[pivot_row]; P[pivot_row] = P[row]; @@ -97,7 +97,7 @@ nmod_mat_lu_classical_delayed_1(slong * P, nmod_mat_t A, int rank_check) for (j = col + 1; j + 4 < ncols; j += 4) { - mp_limb_t x0, x1, x2, x3; + ulong x0, x1, x2, x3; x0 = a[row][j + 0]; x1 = a[row][j + 1]; x2 = a[row][j + 2]; @@ -124,11 +124,11 @@ nmod_mat_lu_classical_delayed_1(slong * P, nmod_mat_t A, int rank_check) slong nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) { - mp_limb_t d, e, f, **a; + ulong d, e, f, **a; nmod_t mod; slong i, j, nrows, ncols, rank, row, col, pivot_row, tmp_index; - mp_ptr tmp_ptr; - mp_ptr b; + nn_ptr tnn_ptr; + nn_ptr b; TMP_INIT; nrows = A->r; @@ -142,7 +142,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) P[i] = i; TMP_START; - b = TMP_ALLOC(2 * sizeof(mp_limb_t) * nrows * ncols); + b = TMP_ALLOC(2 * sizeof(ulong) * nrows * ncols); #define UNREDUCED_LO(ii, jj) b[2 * ((ii) * ncols + jj)] #define UNREDUCED_HI(ii, jj) b[2 * ((ii) * ncols + jj) + 1] @@ -189,9 +189,9 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) /* swap rows */ if (pivot_row != row) { - tmp_ptr = a[pivot_row]; + tnn_ptr = a[pivot_row]; a[pivot_row] = a[row]; - a[row] = tmp_ptr; + a[row] = tnn_ptr; tmp_index = P[pivot_row]; P[pivot_row] = P[row]; @@ -200,7 +200,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) /* swap rows in unreduced submatrix, and reduce new pivot row */ for (j = col + 1; j < ncols; j++) { - mp_limb_t hi, lo; + ulong hi, lo; lo = UNREDUCED_LO(row, j); hi = UNREDUCED_HI(row, j); NMOD2_RED2(a[row][j], UNREDUCED_HI(pivot_row, j), UNREDUCED_LO(pivot_row, j), mod); @@ -228,7 +228,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) { for (j = col + 1; j + 4 < ncols; j += 4) { - mp_limb_t x0, x1, x2, x3; + ulong x0, x1, x2, x3; x0 = a[row][j + 0] * f; x1 = a[row][j + 1] * f; x2 = a[row][j + 2] * f; @@ -245,7 +245,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) for ( ; j < ncols; j++) { - mp_limb_t hi, lo; + ulong hi, lo; hi = 0; lo = a[row][j] * f; add_ssaaaa(UNREDUCED_HI(i, j), UNREDUCED_LO(i, j), @@ -256,7 +256,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) { for (j = col + 1; j + 4 < ncols; j += 4) { - mp_limb_t x0, x1, x2, x3, h0, h1, h2, h3; + ulong x0, x1, x2, x3, h0, h1, h2, h3; umul_ppmm(h0, x0, a[row][j + 0], f); umul_ppmm(h1, x1, a[row][j + 1], f); umul_ppmm(h2, x2, a[row][j + 2], f); @@ -273,7 +273,7 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) for ( ; j < ncols; j++) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, a[row][j], f); add_ssaaaa(UNREDUCED_HI(i, j), UNREDUCED_LO(i, j), UNREDUCED_HI(i, j), UNREDUCED_LO(i, j), hi, lo); @@ -294,11 +294,11 @@ nmod_mat_lu_classical_delayed_2(slong * P, nmod_mat_t A, int rank_check) slong nmod_mat_lu_classical_delayed_3(slong * P, nmod_mat_t A, int rank_check) { - mp_limb_t d, e, f, **a; + ulong d, e, f, **a; nmod_t mod; slong i, j, nrows, ncols, rank, row, col, pivot_row, tmp_index; - mp_ptr tmp_ptr; - mp_ptr b; + nn_ptr tnn_ptr; + nn_ptr b; TMP_INIT; nrows = A->r; @@ -312,7 +312,7 @@ nmod_mat_lu_classical_delayed_3(slong * P, nmod_mat_t A, int rank_check) P[i] = i; TMP_START; - b = TMP_ALLOC(3 * sizeof(mp_limb_t) * nrows * ncols); + b = TMP_ALLOC(3 * sizeof(ulong) * nrows * ncols); #define UNREDUCED3_L0(ii, jj) b[3 * ((ii) * ncols + jj)] #define UNREDUCED3_L1(ii, jj) b[3 * ((ii) * ncols + jj) + 1] @@ -363,9 +363,9 @@ nmod_mat_lu_classical_delayed_3(slong * P, nmod_mat_t A, int rank_check) /* swap rows */ if (pivot_row != row) { - tmp_ptr = a[pivot_row]; + tnn_ptr = a[pivot_row]; a[pivot_row] = a[row]; - a[row] = tmp_ptr; + a[row] = tnn_ptr; tmp_index = P[pivot_row]; P[pivot_row] = P[row]; @@ -374,7 +374,7 @@ nmod_mat_lu_classical_delayed_3(slong * P, nmod_mat_t A, int rank_check) /* swap rows in unreduced submatrix, and reduce new pivot row */ for (j = col + 1; j < ncols; j++) { - mp_limb_t t2, t1, t0; + ulong t2, t1, t0; t0 = UNREDUCED3_L0(row, j); t1 = UNREDUCED3_L1(row, j); t2 = UNREDUCED3_L2(row, j); @@ -408,7 +408,7 @@ nmod_mat_lu_classical_delayed_3(slong * P, nmod_mat_t A, int rank_check) for (j = col + 1; j < ncols; j++) { - mp_limb_t hi, lo; + ulong hi, lo; umul_ppmm(hi, lo, a[row][j], f); add_sssaaaaaa(UNREDUCED3_L2(i, j), UNREDUCED3_L1(i, j), UNREDUCED3_L0(i, j), UNREDUCED3_L2(i, j), UNREDUCED3_L1(i, j), UNREDUCED3_L0(i, j), diff --git a/src/nmod_mat/lu_recursive.c b/src/nmod_mat/lu_recursive.c index cfc11e7ad1..6a8c483d1d 100644 --- a/src/nmod_mat/lu_recursive.c +++ b/src/nmod_mat/lu_recursive.c @@ -17,11 +17,11 @@ _apply_permutation(slong * AP, nmod_mat_t A, slong * P, { if (n != 0) { - mp_ptr * Atmp; + nn_ptr * Atmp; slong * APtmp; slong i; - Atmp = flint_malloc(sizeof(mp_ptr) * n); + Atmp = flint_malloc(sizeof(nn_ptr) * n); APtmp = flint_malloc(sizeof(slong) * n); for (i = 0; i < n; i++) Atmp[i] = A->rows[P[i] + offset]; @@ -101,7 +101,7 @@ nmod_mat_lu_recursive(slong * P, nmod_mat_t A, int rank_check) { for (i = 0; i < m - r1; i++) { - mp_ptr row = A->rows[r1 + i]; + nn_ptr row = A->rows[r1 + i]; for (j = 0; j < FLINT_MIN(i, r2); j++) { row[r1 + j] = row[n1 + j]; diff --git a/src/nmod_mat/mul_blas.c b/src/nmod_mat/mul_blas.c index 82da56663f..15c6b67e64 100644 --- a/src/nmod_mat/mul_blas.c +++ b/src/nmod_mat/mul_blas.c @@ -88,11 +88,11 @@ typedef struct { slong Astoprow; slong Bstartrow; slong Bstoprow; - mp_limb_t ctxn; + ulong ctxn; float * dA; float * dB; - mp_limb_t ** Arows; - mp_limb_t ** Brows; + ulong ** Arows; + ulong ** Brows; } _lift_sp_worker_arg_struct; void _lift_sp_worker(void * arg_ptr) @@ -104,11 +104,11 @@ void _lift_sp_worker(void * arg_ptr) slong Astoprow = arg->Astoprow; slong Bstartrow = arg->Bstartrow; slong Bstoprow = arg->Bstoprow; - mp_limb_t ctxn = arg->ctxn; + ulong ctxn = arg->ctxn; float * dA = arg->dA; float * dB = arg->dB; - mp_limb_t ** Arows = arg->Arows; - mp_limb_t ** Brows = arg->Brows; + ulong ** Arows = arg->Arows; + ulong ** Brows = arg->Brows; slong i; for (i = Astartrow; i < Astoprow; i++) @@ -123,9 +123,9 @@ typedef struct { slong Cstartrow; slong Cstoprow; nmod_t * ctx; - mp_limb_t shift; + ulong shift; float * dC; - mp_limb_t ** Crows; + ulong ** Crows; } _reduce_sp_worker_arg_struct; void _reduce_sp_worker(void * arg_ptr) @@ -135,9 +135,9 @@ void _reduce_sp_worker(void * arg_ptr) slong Cstartrow = arg->Cstartrow; slong Cstoprow = arg->Cstoprow; nmod_t ctx = *arg->ctx; - mp_limb_t shift = arg->shift; + ulong shift = arg->shift; float * dC = arg->dC; - mp_limb_t ** Crows = arg->Crows; + ulong ** Crows = arg->Crows; slong i, j; for (i = Cstartrow; i < Cstoprow; i++) @@ -145,7 +145,7 @@ void _reduce_sp_worker(void * arg_ptr) for (j = 0; j < n; j++) { slong a = (slong) dC[i*n + j]; - mp_limb_t b = (a < 0) ? a + shift : a; + ulong b = (a < 0) ? a + shift : a; NMOD_RED(Crows[i][j], b, ctx); } } @@ -256,7 +256,7 @@ static void _lift_vec_crt(double * a, ulong * b, slong len, nmod_t ctx) slong i; for (i = 0; i < len; i++) { - mp_limb_t bn; + ulong bn; NMOD_RED(bn, b[i], ctx); a[i] = (int)(bn - (ctx.n & FLINT_SIGN_EXT(ctx.n/2 - bn))); } @@ -273,8 +273,8 @@ typedef struct { nmod_t crtmod; double * dA; double * dB; - mp_limb_t ** Arows; - mp_limb_t ** Brows; + ulong ** Arows; + ulong ** Brows; } _lift_crt_worker_arg_struct; void _lift_crt_worker(void * arg_ptr) @@ -289,8 +289,8 @@ void _lift_crt_worker(void * arg_ptr) nmod_t crtmod = arg->crtmod; double * dA = arg->dA; double * dB = arg->dB; - mp_limb_t ** Arows = arg->Arows; - mp_limb_t ** Brows = arg->Brows; + ulong ** Arows = arg->Arows; + ulong ** Brows = arg->Brows; slong i; for (i = Astartrow; i < Astoprow; i++) @@ -309,7 +309,7 @@ typedef struct { nmod_t * crtmod; nmod_t * ctx; double * dC; - mp_limb_t ** Crows; + ulong ** Crows; } _reduce_crt_worker_arg_struct; void _reduce_crt_worker(void * arg_ptr) @@ -324,10 +324,10 @@ void _reduce_crt_worker(void * arg_ptr) nmod_t ctx = *arg->ctx; ulong s, t, hi, lo, reshi, reslo; slong crtnum = arg->crtnum; - mp_limb_t ** Crows = arg->Crows; + ulong ** Crows = arg->Crows; nmod_t crtmod[MAX_CRT_NUM]; - mp_limb_t q[MAX_CRT_NUM], v[MAX_CRT_NUM], u[MAX_CRT_NUM]; - mp_limb_t shifts[MAX_CRT_NUM], pmodinv[MAX_CRT_NUM*MAX_CRT_NUM]; + ulong q[MAX_CRT_NUM], v[MAX_CRT_NUM], u[MAX_CRT_NUM]; + ulong shifts[MAX_CRT_NUM], pmodinv[MAX_CRT_NUM*MAX_CRT_NUM]; for (i = 0; i < crtnum; i++) crtmod[i] = arg->crtmod[i]; @@ -366,7 +366,7 @@ void _reduce_crt_worker(void * arg_ptr) for (pi = 0; pi < crtnum; pi++) { slong a = (slong) dC[i*n + j + pi*m*n]; - mp_limb_t b = (a < 0) ? a + shifts[pi] : a; + ulong b = (a < 0) ? a + shifts[pi] : a; NMOD_RED(u[pi], b, crtmod[pi]); } @@ -537,11 +537,11 @@ typedef struct { slong Astoprow; slong Bstartrow; slong Bstoprow; - mp_limb_t ctxn; + ulong ctxn; double * dA; double * dB; - mp_limb_t ** Arows; - mp_limb_t ** Brows; + ulong ** Arows; + ulong ** Brows; } _lift_dp_worker_arg_struct; void _lift_dp_worker(void * arg_ptr) @@ -553,11 +553,11 @@ void _lift_dp_worker(void * arg_ptr) slong Astoprow = arg->Astoprow; slong Bstartrow = arg->Bstartrow; slong Bstoprow = arg->Bstoprow; - mp_limb_t ctxn = arg->ctxn; + ulong ctxn = arg->ctxn; double * dA = arg->dA; double * dB = arg->dB; - mp_limb_t ** Arows = arg->Arows; - mp_limb_t ** Brows = arg->Brows; + ulong ** Arows = arg->Arows; + ulong ** Brows = arg->Brows; slong i; for (i = Astartrow; i < Astoprow; i++) @@ -572,9 +572,9 @@ typedef struct { slong Cstartrow; slong Cstoprow; nmod_t * ctx; - mp_limb_t shift; + ulong shift; double * dC; - mp_limb_t ** Crows; + ulong ** Crows; } _reduce_dp_worker_arg_struct; void _reduce_dp_worker(void * arg_ptr) @@ -584,9 +584,9 @@ void _reduce_dp_worker(void * arg_ptr) slong Cstartrow = arg->Cstartrow; slong Cstoprow = arg->Cstoprow; nmod_t ctx = *arg->ctx; - mp_limb_t shift = arg->shift; + ulong shift = arg->shift; double * dC = arg->dC; - mp_limb_t ** Crows = arg->Crows; + ulong ** Crows = arg->Crows; slong i, j; for (i = Cstartrow; i < Cstoprow; i++) @@ -594,7 +594,7 @@ void _reduce_dp_worker(void * arg_ptr) for (j = 0; j < n; j++) { slong a = (slong) dC[i*n + j]; - mp_limb_t b = (a < 0) ? a + shift : a; + ulong b = (a < 0) ? a + shift : a; NMOD_RED(Crows[i][j], b, ctx); } } diff --git a/src/nmod_mat/mul_classical.c b/src/nmod_mat/mul_classical.c index 061948a642..ff05ffb864 100644 --- a/src/nmod_mat/mul_classical.c +++ b/src/nmod_mat/mul_classical.c @@ -23,11 +23,11 @@ with op = -1, computes D = C - A*B */ static inline void -_nmod_mat_addmul_basic_op(mp_ptr * D, mp_ptr * const C, mp_ptr * const A, - mp_ptr * const B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) +_nmod_mat_addmul_basic_op(nn_ptr * D, nn_ptr * const C, nn_ptr * const A, + nn_ptr * const B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) { slong i, j; - mp_limb_t c; + ulong c; for (i = 0; i < m; i++) { @@ -46,14 +46,14 @@ _nmod_mat_addmul_basic_op(mp_ptr * D, mp_ptr * const C, mp_ptr * const A, } static inline void -_nmod_mat_addmul_transpose_op(mp_ptr * D, const mp_ptr * C, const mp_ptr * A, - const mp_ptr * B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) +_nmod_mat_addmul_transpose_op(nn_ptr * D, const nn_ptr * C, const nn_ptr * A, + const nn_ptr * B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) { - mp_ptr tmp; - mp_limb_t c; + nn_ptr tmp; + ulong c; slong i, j; - tmp = flint_malloc(sizeof(mp_limb_t) * k * n); + tmp = flint_malloc(sizeof(ulong) * k * n); for (i = 0; i < k; i++) for (j = 0; j < n; j++) @@ -79,15 +79,15 @@ _nmod_mat_addmul_transpose_op(mp_ptr * D, const mp_ptr * C, const mp_ptr * A, /* Assumes nlimbs = 1 */ static void -_nmod_mat_addmul_packed_op(mp_ptr * D, const mp_ptr * C, const mp_ptr * A, - const mp_ptr * B, slong M, slong N, slong K, int op, nmod_t mod) +_nmod_mat_addmul_packed_op(nn_ptr * D, const nn_ptr * C, const nn_ptr * A, + const nn_ptr * B, slong M, slong N, slong K, int op, nmod_t mod) { slong i, j, k; slong Kpack; int pack, pack_bits; - mp_limb_t c, d, mask; - mp_ptr tmp; - mp_ptr Aptr, Tptr; + ulong c, d, mask; + nn_ptr tmp; + nn_ptr Aptr, Tptr; /* bound unreduced entry */ c = N * (mod.n-1) * (mod.n-1); diff --git a/src/nmod_mat/mul_classical_threaded.c b/src/nmod_mat/mul_classical_threaded.c index 56bb17121f..24b6d3ec5d 100644 --- a/src/nmod_mat/mul_classical_threaded.c +++ b/src/nmod_mat/mul_classical_threaded.c @@ -25,11 +25,11 @@ with op = -1, computes D = C - A*B */ static inline void -_nmod_mat_addmul_basic_op(mp_ptr * D, mp_ptr * const C, mp_ptr * const A, - mp_ptr * const B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) +_nmod_mat_addmul_basic_op(nn_ptr * D, nn_ptr * const C, nn_ptr * const A, + nn_ptr * const B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs) { slong i, j; - mp_limb_t c; + ulong c; for (i = 0; i < m; i++) { @@ -56,10 +56,10 @@ typedef struct slong m; slong n; slong nlimbs; - const mp_ptr * A; - const mp_ptr * C; - mp_ptr * D; - mp_ptr tmp; + const nn_ptr * A; + const nn_ptr * C; + nn_ptr * D; + nn_ptr tmp; nmod_t mod; #if FLINT_USES_PTHREAD pthread_mutex_t * mutex; @@ -77,13 +77,13 @@ _nmod_mat_addmul_transpose_worker(void * arg_ptr) slong m = arg.m; slong n = arg.n; slong nlimbs = arg.nlimbs; - const mp_ptr * A = arg.A; - const mp_ptr * C = arg.C; - mp_ptr * D = arg.D; - mp_ptr tmp = arg.tmp; + const nn_ptr * A = arg.A; + const nn_ptr * C = arg.C; + nn_ptr * D = arg.D; + nn_ptr tmp = arg.tmp; nmod_t mod = arg.mod; int op = arg.op; - mp_limb_t c; + ulong c; while (1) { @@ -128,12 +128,12 @@ _nmod_mat_addmul_transpose_worker(void * arg_ptr) } static inline void -_nmod_mat_addmul_transpose_threaded_pool_op(mp_ptr * D, const mp_ptr * C, - const mp_ptr * A, const mp_ptr * B, slong m, +_nmod_mat_addmul_transpose_threaded_pool_op(nn_ptr * D, const nn_ptr * C, + const nn_ptr * A, const nn_ptr * B, slong m, slong k, slong n, int op, nmod_t mod, int nlimbs, thread_pool_handle * threads, slong num_threads) { - mp_ptr tmp; + nn_ptr tmp; slong i, j, block; slong shared_i = 0, shared_j = 0; nmod_mat_transpose_arg_t * args; @@ -141,7 +141,7 @@ _nmod_mat_addmul_transpose_threaded_pool_op(mp_ptr * D, const mp_ptr * C, pthread_mutex_t mutex; #endif - tmp = flint_malloc(sizeof(mp_limb_t) * k * n); + tmp = flint_malloc(sizeof(ulong) * k * n); /* transpose B */ for (i = 0; i < k; i++) @@ -211,12 +211,12 @@ typedef struct slong K; slong N; slong Kpack; - const mp_ptr * A; - const mp_ptr * C; - mp_ptr * D; - mp_ptr tmp; + const nn_ptr * A; + const nn_ptr * C; + nn_ptr * D; + nn_ptr tmp; nmod_t mod; - mp_limb_t mask; + ulong mask; #if FLINT_USES_PTHREAD pthread_mutex_t * mutex; #endif @@ -235,17 +235,17 @@ _nmod_mat_addmul_packed_worker(void * arg_ptr) slong K = arg.K; slong N = arg.N; slong Kpack = arg.Kpack; - const mp_ptr * A = arg.A; - const mp_ptr * C = arg.C; - mp_ptr * D = arg.D; - mp_ptr tmp = arg.tmp; + const nn_ptr * A = arg.A; + const nn_ptr * C = arg.C; + nn_ptr * D = arg.D; + nn_ptr tmp = arg.tmp; nmod_t mod = arg.mod; - mp_limb_t mask = arg.mask; + ulong mask = arg.mask; int pack = arg.pack; int pack_bits = arg.pack_bits; int op = arg.op; - mp_limb_t c, d; - mp_ptr Aptr, Tptr; + ulong c, d; + nn_ptr Aptr, Tptr; while (1) { @@ -314,16 +314,16 @@ _nmod_mat_addmul_packed_worker(void * arg_ptr) /* Assumes nlimbs = 1 */ static void -_nmod_mat_addmul_packed_threaded_pool_op(mp_ptr * D, - const mp_ptr * C, const mp_ptr * A, const mp_ptr * B, +_nmod_mat_addmul_packed_threaded_pool_op(nn_ptr * D, + const nn_ptr * C, const nn_ptr * A, const nn_ptr * B, slong M, slong N, slong K, int op, nmod_t mod, thread_pool_handle * threads, slong num_threads) { slong i, j, k; slong Kpack, block; int pack, pack_bits; - mp_limb_t c, mask; - mp_ptr tmp; + ulong c, mask; + nn_ptr tmp; slong shared_i = 0, shared_j = 0; nmod_mat_packed_arg_t * args; #if FLINT_USES_PTHREAD diff --git a/src/nmod_mat/mul_nmod_vec.c b/src/nmod_mat/mul_nmod_vec.c index be8258665c..ab888196bd 100644 --- a/src/nmod_mat/mul_nmod_vec.c +++ b/src/nmod_mat/mul_nmod_vec.c @@ -14,9 +14,9 @@ #include "nmod_mat.h" void nmod_mat_mul_nmod_vec( - mp_limb_t * c, + ulong * c, const nmod_mat_t A, - const mp_limb_t * b, slong blen) + const ulong * b, slong blen) { nmod_t mod = A->mod; slong i, j; @@ -25,26 +25,26 @@ void nmod_mat_mul_nmod_vec( for (i = A->r - 1; i >= 0; i--) { - const mp_limb_t * Ai = A->rows[i]; + const ulong * Ai = A->rows[i]; NMOD_VEC_DOT(c[i], j, len, Ai[j], b[j], mod, nlimbs); } } void nmod_mat_mul_nmod_vec_ptr( - mp_limb_t * const * c, + ulong * const * c, const nmod_mat_t A, - const mp_limb_t * const * b, slong blen) + const ulong * const * b, slong blen) { slong i; slong len = FLINT_MIN(A->c, blen); slong nrows = A->r; - mp_limb_t * bb, * cc; + ulong * bb, * cc; TMP_INIT; TMP_START; - bb = TMP_ARRAY_ALLOC(len, mp_limb_t); - cc = TMP_ARRAY_ALLOC(nrows, mp_limb_t); + bb = TMP_ARRAY_ALLOC(len, ulong); + cc = TMP_ARRAY_ALLOC(nrows, ulong); for (i = 0; i < len; i++) bb[i] = b[i][0]; diff --git a/src/nmod_mat/nmod_vec_mul.c b/src/nmod_mat/nmod_vec_mul.c index 3cf5e0b705..a0da2651f3 100644 --- a/src/nmod_mat/nmod_vec_mul.c +++ b/src/nmod_mat/nmod_vec_mul.c @@ -14,8 +14,8 @@ /* TODO try delaying the reductions */ void nmod_mat_nmod_vec_mul( - mp_limb_t * c, - const mp_limb_t * a, slong alen, + ulong * c, + const ulong * a, slong alen, const nmod_mat_t B) { slong i; @@ -36,20 +36,20 @@ void nmod_mat_nmod_vec_mul( } void nmod_mat_nmod_vec_mul_ptr( - mp_limb_t * const * c, - const mp_limb_t * const * a, slong alen, + ulong * const * c, + const ulong * const * a, slong alen, const nmod_mat_t B) { slong i; slong len = FLINT_MIN(B->r, alen); slong ncols = B->c; - mp_limb_t * aa, * cc; + ulong * aa, * cc; TMP_INIT; TMP_START; - aa = TMP_ARRAY_ALLOC(len, mp_limb_t); - cc = TMP_ARRAY_ALLOC(ncols, mp_limb_t); + aa = TMP_ARRAY_ALLOC(len, ulong); + cc = TMP_ARRAY_ALLOC(ncols, ulong); for (i = 0; i < len; i++) aa[i] = a[i][0]; diff --git a/src/nmod_mat/nullspace.c b/src/nmod_mat/nullspace.c index 1b8f96cd34..fad9a915d6 100644 --- a/src/nmod_mat/nullspace.c +++ b/src/nmod_mat/nullspace.c @@ -64,7 +64,7 @@ nmod_mat_nullspace(nmod_mat_t X, const nmod_mat_t A) { for (j = 0; j < rank; j++) { - mp_limb_t c = nmod_mat_entry(tmp, j, nonpivots[i]); + ulong c = nmod_mat_entry(tmp, j, nonpivots[i]); nmod_mat_entry(X, pivots[j], i) = nmod_neg(c, A->mod); } diff --git a/src/nmod_mat/permute_rows.c b/src/nmod_mat/permute_rows.c index 72bd5794d2..ac57a6ec9a 100644 --- a/src/nmod_mat/permute_rows.c +++ b/src/nmod_mat/permute_rows.c @@ -21,7 +21,7 @@ void nmod_mat_permute_rows(nmod_mat_t mat, const slong * perm_act, slong * perm_store) { slong i; - mp_limb_t ** mat_tmp = (mp_limb_t **) flint_malloc(mat->r * sizeof(mp_limb_t *)); + ulong ** mat_tmp = (ulong **) flint_malloc(mat->r * sizeof(ulong *)); /* perm_store[i] <- perm_store[perm_act[i]] */ if (perm_store) diff --git a/src/nmod_mat/profile/p-lu.c b/src/nmod_mat/profile/p-lu.c index 3eaa3480c8..4720c1b007 100644 --- a/src/nmod_mat/profile/p-lu.c +++ b/src/nmod_mat/profile/p-lu.c @@ -21,7 +21,7 @@ typedef struct { slong n; - mp_limb_t modulus; + ulong modulus; int algorithm; } mat_lu_t; diff --git a/src/nmod_mat/profile/p-mul.c b/src/nmod_mat/profile/p-mul.c index e748d0a5d3..93ca29d1bd 100644 --- a/src/nmod_mat/profile/p-mul.c +++ b/src/nmod_mat/profile/p-mul.c @@ -26,7 +26,7 @@ typedef struct slong dim_m; slong dim_n; slong dim_k; - mp_limb_t modulus; + ulong modulus; int algorithm; } mat_mul_t; diff --git a/src/nmod_mat/randpermdiag.c b/src/nmod_mat/randpermdiag.c index 3f841bf32f..fbfff12daf 100644 --- a/src/nmod_mat/randpermdiag.c +++ b/src/nmod_mat/randpermdiag.c @@ -14,7 +14,7 @@ int nmod_mat_randpermdiag(nmod_mat_t mat, flint_rand_t state, - mp_srcptr diag, slong n) + nn_srcptr diag, slong n) { int parity; slong i; diff --git a/src/nmod_mat/randrank.c b/src/nmod_mat/randrank.c index c1bae69b7a..fd838aea27 100644 --- a/src/nmod_mat/randrank.c +++ b/src/nmod_mat/randrank.c @@ -16,7 +16,7 @@ void nmod_mat_randrank(nmod_mat_t mat, flint_rand_t state, slong rank) { slong i; - mp_limb_t * diag; + ulong * diag; if (rank < 0 || rank > mat->r || rank > mat->c) { diff --git a/src/nmod_mat/rref.c b/src/nmod_mat/rref.c index 141b5ef83e..b2b38e381d 100644 --- a/src/nmod_mat/rref.c +++ b/src/nmod_mat/rref.c @@ -113,7 +113,7 @@ nmod_mat_rref(nmod_mat_t A) if (A->r == 1) { - mp_limb_t c, cinv; + ulong c, cinv; slong i, j; slong r = 0; diff --git a/src/nmod_mat/scalar.c b/src/nmod_mat/scalar.c index 4f6fea48d2..0e751a92e8 100644 --- a/src/nmod_mat/scalar.c +++ b/src/nmod_mat/scalar.c @@ -17,7 +17,7 @@ void nmod_mat_scalar_addmul_ui(nmod_mat_t dest, const nmod_mat_t X, - const nmod_mat_t Y, const mp_limb_t b) + const nmod_mat_t Y, const ulong b) { slong i, j; @@ -43,7 +43,7 @@ nmod_mat_scalar_addmul_ui(nmod_mat_t dest, const nmod_mat_t X, #define UWORD_HALF (UWORD_MAX / 2 + 1) void -nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, mp_limb_t c) +nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, ulong c) { if (c == UWORD(0)) { @@ -60,7 +60,7 @@ nmod_mat_scalar_mul(nmod_mat_t B, const nmod_mat_t A, mp_limb_t c) else if (A->r * A->c > 10 && A->mod.n < UWORD_HALF) { slong i, j; - mp_limb_t w_pr = n_mulmod_precomp_shoup(c, A->mod.n); + ulong w_pr = n_mulmod_precomp_shoup(c, A->mod.n); for (i = 0; i < A->r; i++) for (j = 0; j < A->c; j++) diff --git a/src/nmod_mat/set_mod.c b/src/nmod_mat/set_mod.c index 0012e4b109..84a3cf92dc 100644 --- a/src/nmod_mat/set_mod.c +++ b/src/nmod_mat/set_mod.c @@ -12,7 +12,7 @@ #include "ulong_extras.h" #include "nmod_mat.h" -void nmod_mat_set_mod(nmod_mat_t mat, mp_limb_t n) +void nmod_mat_set_mod(nmod_mat_t mat, ulong n) { mat->mod.n = n; mat->mod.norm = flint_clz(n); diff --git a/src/nmod_mat/solve_tril.c b/src/nmod_mat/solve_tril.c index 9214e13f24..d378c09d17 100644 --- a/src/nmod_mat/solve_tril.c +++ b/src/nmod_mat/solve_tril.c @@ -19,7 +19,7 @@ nmod_mat_solve_tril_classical(nmod_mat_t X, const nmod_mat_t L, const nmod_mat_t int nlimbs; slong i, j, n, m; nmod_t mod; - mp_ptr inv, tmp; + nn_ptr inv, tmp; n = L->r; m = B->c; @@ -44,7 +44,7 @@ nmod_mat_solve_tril_classical(nmod_mat_t X, const nmod_mat_t L, const nmod_mat_t for (j = 0; j < n; j++) { - mp_limb_t s; + ulong s; s = _nmod_vec_dot(L->rows[j], tmp, j, mod, nlimbs); s = nmod_sub(nmod_mat_entry(B, j, i), s, mod); if (!unit) diff --git a/src/nmod_mat/solve_triu.c b/src/nmod_mat/solve_triu.c index 6ccab04231..9a3e2cf05f 100644 --- a/src/nmod_mat/solve_triu.c +++ b/src/nmod_mat/solve_triu.c @@ -19,7 +19,7 @@ nmod_mat_solve_triu_classical(nmod_mat_t X, const nmod_mat_t U, const nmod_mat_t int nlimbs; slong i, j, n, m; nmod_t mod; - mp_ptr inv, tmp; + nn_ptr inv, tmp; n = U->r; m = B->c; @@ -44,7 +44,7 @@ nmod_mat_solve_triu_classical(nmod_mat_t X, const nmod_mat_t U, const nmod_mat_t for (j = n - 1; j >= 0; j--) { - mp_limb_t s; + ulong s; s = _nmod_vec_dot(U->rows[j] + j + 1, tmp + j + 1, n - j - 1, mod, nlimbs); s = nmod_sub(nmod_mat_entry(B, j, i), s, mod); diff --git a/src/nmod_mat/solve_vec.c b/src/nmod_mat/solve_vec.c index 3f0d3c4224..d2e910ca8d 100644 --- a/src/nmod_mat/solve_vec.c +++ b/src/nmod_mat/solve_vec.c @@ -12,7 +12,7 @@ #include "nmod_mat.h" int -nmod_mat_solve_vec(mp_ptr x, const nmod_mat_t A, mp_srcptr b) +nmod_mat_solve_vec(nn_ptr x, const nmod_mat_t A, nn_srcptr b) { nmod_mat_t X, B; int result; @@ -29,7 +29,7 @@ nmod_mat_solve_vec(mp_ptr x, const nmod_mat_t A, mp_srcptr b) nmod_mat_window_init(B, A, 0, 0, m, 1); for (i = 0; i < m; i++) X->rows[i] = x + i; - for (i = 0; i < m; i++) B->rows[i] = (mp_ptr) (b + i); + for (i = 0; i < m; i++) B->rows[i] = (nn_ptr) (b + i); result = nmod_mat_solve(X, A, B); diff --git a/src/nmod_mat/strong_echelon_form.c b/src/nmod_mat/strong_echelon_form.c index e23491269f..34394045a5 100644 --- a/src/nmod_mat/strong_echelon_form.c +++ b/src/nmod_mat/strong_echelon_form.c @@ -17,7 +17,7 @@ static inline int _nmod_mat_pivot(nmod_mat_t A, slong start_row, slong col) { slong j; - mp_ptr u; + nn_ptr u; if (nmod_mat_entry(A, start_row, col) != 0) return 1; @@ -37,9 +37,9 @@ _nmod_mat_pivot(nmod_mat_t A, slong start_row, slong col) } static void -_n_ppio(mp_ptr ppi, mp_ptr ppo, mp_limb_t a, mp_limb_t b) +_n_ppio(nn_ptr ppi, nn_ptr ppo, ulong a, ulong b) { - mp_limb_t c, n, g; + ulong c, n, g; c = n_gcd(a, b); n = a/c; @@ -54,20 +54,20 @@ _n_ppio(mp_ptr ppi, mp_ptr ppo, mp_limb_t a, mp_limb_t b) *ppo = n; } -static mp_limb_t -_n_stab(mp_limb_t a, mp_limb_t b, nmod_t N) +static ulong +_n_stab(ulong a, ulong b, nmod_t N) { - mp_limb_t g, s, t; + ulong g, s, t; g = n_gcd(a, b); b = n_gcd(g, N.n); _n_ppio(&s, &t, N.n/b, a/b); return t; } -static mp_limb_t -_n_unit(mp_limb_t a, nmod_t N) +static ulong +_n_unit(ulong a, nmod_t N) { - mp_limb_t g, s, l, d; + ulong g, s, l, d; g = n_gcdinv(&s, a, N.n); @@ -85,9 +85,9 @@ _n_unit(mp_limb_t a, nmod_t N) /* test whether q*a = b mod N has a solution */ static int -_n_is_divisible(mp_ptr q, mp_limb_t b, mp_limb_t a, nmod_t N) +_n_is_divisible(nn_ptr q, ulong b, ulong a, nmod_t N) { - mp_limb_t e, g; + ulong e, g; g = n_gcdinv(&e, a, N.n); if (( b % g ) == 0) @@ -102,11 +102,11 @@ _n_is_divisible(mp_ptr q, mp_limb_t b, mp_limb_t a, nmod_t N) void nmod_mat_strong_echelon_form(nmod_mat_t A) { - mp_limb_t s, t, u, v, q, t1, t2, g; + ulong s, t, u, v, q, t1, t2, g; slong m, n, row, col, i, k, l; - mp_limb_t **r; + ulong **r; nmod_t mod; - mp_ptr extra_row; + nn_ptr extra_row; if (nmod_mat_is_empty(A)) return; diff --git a/src/nmod_mat/test/main.c b/src/nmod_mat/test/main.c index 50426575ff..93fc20c62f 100644 --- a/src/nmod_mat/test/main.c +++ b/src/nmod_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/nmod_mat/test/t-addmul_submul.c b/src/nmod_mat/test/t-addmul_submul.c index d5db056b97..4fbc8b5cf9 100644 --- a/src/nmod_mat/test/t-addmul_submul.c +++ b/src/nmod_mat/test/t-addmul_submul.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_addmul_submul, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D, T, E; - mp_limb_t mod = n_randtest_not_zero(state); + ulong mod = n_randtest_not_zero(state); int type, operation; slong m, k, n; diff --git a/src/nmod_mat/test/t-can_solve.c b/src/nmod_mat/test/t-can_solve.c index 5b80f7c889..0f3e4cca52 100644 --- a/src/nmod_mat/test/t-can_solve.c +++ b/src/nmod_mat/test/t-can_solve.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(nmod_mat_can_solve, state) { nmod_mat_t A, X, X2, B, AX; slong i, k, m, n; - mp_limb_t mod; + ulong mod; int solved; /* test random systems */ diff --git a/src/nmod_mat/test/t-det.c b/src/nmod_mat/test/t-det.c index d143448511..0381c9bd1d 100644 --- a/src/nmod_mat/test/t-det.c +++ b/src/nmod_mat/test/t-det.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_mat_det, state) { nmod_mat_t A; fmpz_mat_t B; - mp_limb_t Adet; + ulong Adet; fmpz_t Bdet; ulong t; diff --git a/src/nmod_mat/test/t-det_howell.c b/src/nmod_mat/test/t-det_howell.c index ab08958574..ddae2b0c1e 100644 --- a/src/nmod_mat/test/t-det_howell.c +++ b/src/nmod_mat/test/t-det_howell.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_mat_det_howell, state) { nmod_mat_t A; fmpz_mat_t B; - mp_limb_t Adet; + ulong Adet; fmpz_t Bdet; ulong t; diff --git a/src/nmod_mat/test/t-howell_form.c b/src/nmod_mat/test/t-howell_form.c index b8c5faf68a..d17c59c039 100644 --- a/src/nmod_mat/test/t-howell_form.c +++ b/src/nmod_mat/test/t-howell_form.c @@ -22,8 +22,8 @@ nmod_mat_is_in_howell_form(const nmod_mat_t A) slong i, j, r; int numberpivots = 0; int prevrowzero = 0; - mp_ptr extra_row; - mp_limb_t g; + nn_ptr extra_row; + ulong g; if (nmod_mat_is_zero(A)) return 1; @@ -150,11 +150,11 @@ TEST_FUNCTION_START(nmod_mat_howell_form, state) for (i = 0; i < 10000*flint_test_multiplier(); i++) { nmod_mat_t A, B, D; - mp_limb_t mod; + ulong mod; slong j, k, m, n, r1, r2; slong *perm; int equal; - mp_limb_t c; + ulong c; mod = n_randtest_not_zero(state); diff --git a/src/nmod_mat/test/t-inv.c b/src/nmod_mat/test/t-inv.c index 491ea62ac9..7ccb52523a 100644 --- a/src/nmod_mat/test/t-inv.c +++ b/src/nmod_mat/test/t-inv.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(nmod_mat_inv, state) { nmod_mat_t A, B, C, I; slong i, j, m, r; - mp_limb_t mod; + ulong mod; int result; for (i = 0; i < 1000 * flint_test_multiplier(); i++) diff --git a/src/nmod_mat/test/t-lu_classical.c b/src/nmod_mat/test/t-lu_classical.c index baeeea23ae..f95ebadc8f 100644 --- a/src/nmod_mat/test/t-lu_classical.c +++ b/src/nmod_mat/test/t-lu_classical.c @@ -19,12 +19,12 @@ void perm(nmod_mat_t A, slong * P) { slong i; - mp_ptr * tmp; + nn_ptr * tmp; if (A->c == 0 || A->r == 0) return; - tmp = flint_malloc(sizeof(mp_ptr) * A->r); + tmp = flint_malloc(sizeof(nn_ptr) * A->r); for (i = 0; i < A->r; i++) tmp[P[i]] = A->rows[i]; for (i = 0; i < A->r; i++) A->rows[i] = tmp[i]; @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_mat_lu_classical, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, LU; - mp_limb_t mod; + ulong mod; slong m, n, r, d, rank; slong * P; diff --git a/src/nmod_mat/test/t-lu_classical_delayed.c b/src/nmod_mat/test/t-lu_classical_delayed.c index 54f9db1940..8b4431fb02 100644 --- a/src/nmod_mat/test/t-lu_classical_delayed.c +++ b/src/nmod_mat/test/t-lu_classical_delayed.c @@ -20,12 +20,12 @@ void perm(nmod_mat_t A, slong * P) { slong i; - mp_ptr * tmp; + nn_ptr * tmp; if (A->c == 0 || A->r == 0) return; - tmp = flint_malloc(sizeof(mp_ptr) * A->r); + tmp = flint_malloc(sizeof(nn_ptr) * A->r); for (i = 0; i < A->r; i++) tmp[P[i]] = A->rows[i]; for (i = 0; i < A->r; i++) A->rows[i] = tmp[i]; @@ -88,7 +88,7 @@ TEST_FUNCTION_START(nmod_mat_lu_classical_delayed, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, LU, LU2; - mp_limb_t mod; + ulong mod; slong m, n, r, d, rank, rank2; slong *P, *P2; diff --git a/src/nmod_mat/test/t-lu_recursive.c b/src/nmod_mat/test/t-lu_recursive.c index a77f0b88b9..7daf297131 100644 --- a/src/nmod_mat/test/t-lu_recursive.c +++ b/src/nmod_mat/test/t-lu_recursive.c @@ -19,12 +19,12 @@ void perm(nmod_mat_t A, slong * P) { slong i; - mp_ptr * tmp; + nn_ptr * tmp; if (A->c == 0 || A->r == 0) return; - tmp = flint_malloc(sizeof(mp_ptr) * A->r); + tmp = flint_malloc(sizeof(nn_ptr) * A->r); for (i = 0; i < A->r; i++) tmp[P[i]] = A->rows[i]; for (i = 0; i < A->r; i++) A->rows[i] = tmp[i]; @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_mat_lu_recursive, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, LU; - mp_limb_t mod; + ulong mod; slong m, n, r, d, rank; slong * P; diff --git a/src/nmod_mat/test/t-mul.c b/src/nmod_mat/test/t-mul.c index cc2c98dcda..631510a804 100644 --- a/src/nmod_mat/test/t-mul.c +++ b/src/nmod_mat/test/t-mul.c @@ -22,8 +22,8 @@ nmod_mat_mul_check(nmod_mat_t C, const nmod_mat_t A, const nmod_mat_t B) { slong i, j, k; - mp_limb_t s0, s1, s2; - mp_limb_t t0, t1; + ulong s0, s1, s2; + ulong t0, t1; for (i = 0; i < A->r; i++) { @@ -52,7 +52,7 @@ TEST_FUNCTION_START(nmod_mat_mul, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t mod; + ulong mod; slong m, k, n; diff --git a/src/nmod_mat/test/t-mul_blas.c b/src/nmod_mat/test/t-mul_blas.c index c48fad8c76..f777ea367d 100644 --- a/src/nmod_mat/test/t-mul_blas.c +++ b/src/nmod_mat/test/t-mul_blas.c @@ -36,7 +36,7 @@ TEST_FUNCTION_START(nmod_mat_mul_blas, state) for (i = 0; i < 1 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t modulus; + ulong modulus; slong m, k, n; m = n_randint(state, 150) + 2; diff --git a/src/nmod_mat/test/t-mul_classical_threaded.c b/src/nmod_mat/test/t-mul_classical_threaded.c index 0038a60704..9bb6bf774d 100644 --- a/src/nmod_mat/test/t-mul_classical_threaded.c +++ b/src/nmod_mat/test/t-mul_classical_threaded.c @@ -26,8 +26,8 @@ nmod_mat_mul_check(nmod_mat_t C, const nmod_mat_t A, const nmod_mat_t B) { slong i, j, k; - mp_limb_t s0, s1, s2; - mp_limb_t t0, t1; + ulong s0, s1, s2; + ulong t0, t1; for (i = 0; i < A->r; i++) { @@ -57,7 +57,7 @@ TEST_FUNCTION_START(nmod_mat_mul_classical_threaded, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t mod; + ulong mod; slong m, k, n; diff --git a/src/nmod_mat/test/t-mul_nmod_vec.c b/src/nmod_mat/test/t-mul_nmod_vec.c index 8d6209907c..d4a543f402 100644 --- a/src/nmod_mat/test/t-mul_nmod_vec.c +++ b/src/nmod_mat/test/t-mul_nmod_vec.c @@ -19,10 +19,10 @@ TEST_FUNCTION_START(nmod_mat_mul_nmod_vec, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t p; + ulong p; nmod_mat_t A, B, C; - mp_limb_t * b, * c; - mp_limb_t ** bb, ** cc; + ulong * b, * c; + ulong ** bb, ** cc; slong j, m, n, blen; p = n_randtest_not_zero(state); @@ -40,22 +40,22 @@ TEST_FUNCTION_START(nmod_mat_mul_nmod_vec, state) _nmod_vec_randtest(c, state, m, A->mod); _nmod_vec_randtest(b, state, blen, A->mod); - cc = FLINT_ARRAY_ALLOC(m, mp_limb_t*); + cc = FLINT_ARRAY_ALLOC(m, ulong*); for (j = 0; j < m; j++) { - cc[j] = FLINT_ARRAY_ALLOC(1, mp_limb_t); + cc[j] = FLINT_ARRAY_ALLOC(1, ulong); cc[j][0] = c[j]; } - bb = FLINT_ARRAY_ALLOC(blen, mp_limb_t*); + bb = FLINT_ARRAY_ALLOC(blen, ulong*); for (j = 0; j < blen; j++) { - bb[j] = FLINT_ARRAY_ALLOC(1, mp_limb_t); + bb[j] = FLINT_ARRAY_ALLOC(1, ulong); bb[j][0] = b[j]; } nmod_mat_mul_nmod_vec(c, A, b, blen); - nmod_mat_mul_nmod_vec_ptr(cc, A, (const mp_limb_t * const *)bb, blen); + nmod_mat_mul_nmod_vec_ptr(cc, A, (const ulong * const *)bb, blen); /* supposed to match mul of the chopped or zero-extended b */ for (j = 0; j < n && j < blen; j++) diff --git a/src/nmod_mat/test/t-mul_strassen.c b/src/nmod_mat/test/t-mul_strassen.c index dcef56ec39..b5e4446eb0 100644 --- a/src/nmod_mat/test/t-mul_strassen.c +++ b/src/nmod_mat/test/t-mul_strassen.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(nmod_mat_mul_strassen, state) for (i = 0; i < 20 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t mod = n_randtest_not_zero(state); + ulong mod = n_randtest_not_zero(state); slong m, k, n; diff --git a/src/nmod_mat/test/t-nmod_vec_mul.c b/src/nmod_mat/test/t-nmod_vec_mul.c index 0ecd4655ed..891e4121f1 100644 --- a/src/nmod_mat/test/t-nmod_vec_mul.c +++ b/src/nmod_mat/test/t-nmod_vec_mul.c @@ -19,10 +19,10 @@ TEST_FUNCTION_START(nmod_mat_nmod_vec_mul, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t p; + ulong p; nmod_mat_t A, B, C; - mp_limb_t * a, * c; - mp_limb_t ** aa, ** cc; + ulong * a, * c; + ulong ** aa, ** cc; slong j, m, n, alen; p = n_randtest_not_zero(state); @@ -40,22 +40,22 @@ TEST_FUNCTION_START(nmod_mat_nmod_vec_mul, state) _nmod_vec_randtest(c, state, n, B->mod); _nmod_vec_randtest(a, state, alen, B->mod); - cc = FLINT_ARRAY_ALLOC(n, mp_limb_t*); + cc = FLINT_ARRAY_ALLOC(n, ulong*); for (j = 0; j < n; j++) { - cc[j] = FLINT_ARRAY_ALLOC(1, mp_limb_t); + cc[j] = FLINT_ARRAY_ALLOC(1, ulong); cc[j][0] = c[j]; } - aa = FLINT_ARRAY_ALLOC(alen, mp_limb_t*); + aa = FLINT_ARRAY_ALLOC(alen, ulong*); for (j = 0; j < alen; j++) { - aa[j] = FLINT_ARRAY_ALLOC(1, mp_limb_t); + aa[j] = FLINT_ARRAY_ALLOC(1, ulong); aa[j][0] = a[j]; } nmod_mat_nmod_vec_mul(c, a, alen, B); - nmod_mat_nmod_vec_mul_ptr(cc, (const mp_limb_t * const *)aa, alen, B); + nmod_mat_nmod_vec_mul_ptr(cc, (const ulong * const *)aa, alen, B); /* supposed to match mul of the chopped or zero-extended a */ for (j = 0; j < m && j < alen; j++) diff --git a/src/nmod_mat/test/t-nullspace.c b/src/nmod_mat/test/t-nullspace.c index e2b5548242..58ea714a7b 100644 --- a/src/nmod_mat/test/t-nullspace.c +++ b/src/nmod_mat/test/t-nullspace.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_nullspace, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, B, ker; - mp_limb_t mod; + ulong mod; slong m, n, d, r, nullity, nulrank; m = n_randint(state, 30); diff --git a/src/nmod_mat/test/t-pow.c b/src/nmod_mat/test/t-pow.c index a9b65e4897..3f2b9610dd 100644 --- a/src/nmod_mat/test/t-pow.c +++ b/src/nmod_mat/test/t-pow.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(nmod_mat_pow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t mod; + ulong mod; slong m, j; ulong exp; diff --git a/src/nmod_mat/test/t-rank.c b/src/nmod_mat/test/t-rank.c index 4f53209dec..48fe8d8c51 100644 --- a/src/nmod_mat/test/t-rank.c +++ b/src/nmod_mat/test/t-rank.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(nmod_mat_rank, state) { nmod_mat_t A; slong i, m, n, d, r; - mp_limb_t mod; + ulong mod; /* Maximally sparse matrices of given rank */ for (i = 0; i < 1000 * flint_test_multiplier(); i++) diff --git a/src/nmod_mat/test/t-rref.c b/src/nmod_mat/test/t-rref.c index 8b75266f91..ca4de2ed27 100644 --- a/src/nmod_mat/test/t-rref.c +++ b/src/nmod_mat/test/t-rref.c @@ -57,11 +57,11 @@ TEST_FUNCTION_START(nmod_mat_rref, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_mat_t A, B, C, D; - mp_limb_t mod; + ulong mod; slong j, k, m, n, rank1, rank2; slong *perm; int equal; - mp_limb_t c; + ulong c; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_mat/test/t-scalar_addmul_ui.c b/src/nmod_mat/test/t-scalar_addmul_ui.c index fcea17f464..53e7843314 100644 --- a/src/nmod_mat/test/t-scalar_addmul_ui.c +++ b/src/nmod_mat/test/t-scalar_addmul_ui.c @@ -15,7 +15,7 @@ TEST_FUNCTION_START(nmod_mat_scalar_addmul_ui, state) { slong m, n, mod, rep; - mp_limb_t x; + ulong x; for (rep = 0; rep < 1000 * flint_test_multiplier(); rep++) { diff --git a/src/nmod_mat/test/t-scalar_mul.c b/src/nmod_mat/test/t-scalar_mul.c index fd269b0362..e0cea4c6fb 100644 --- a/src/nmod_mat/test/t-scalar_mul.c +++ b/src/nmod_mat/test/t-scalar_mul.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_scalar_mul, state) for (rep = 0; rep < 1000 * flint_test_multiplier(); rep++) { nmod_mat_t A, B, C, D; - mp_limb_t c; + ulong c; m = n_randint(state, 20); n = n_randint(state, 20); diff --git a/src/nmod_mat/test/t-solve.c b/src/nmod_mat/test/t-solve.c index ba176bc5be..6ded3dd9a5 100644 --- a/src/nmod_mat/test/t-solve.c +++ b/src/nmod_mat/test/t-solve.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(nmod_mat_solve, state) { nmod_mat_t A, X, B, AX; slong i, m, n, r; - mp_limb_t mod; + ulong mod; int solved; for (i = 0; i < 1000 * flint_test_multiplier(); i++) diff --git a/src/nmod_mat/test/t-solve_tril.c b/src/nmod_mat/test/t-solve_tril.c index 4611cc8b43..e0dcb34824 100644 --- a/src/nmod_mat/test/t-solve_tril.c +++ b/src/nmod_mat/test/t-solve_tril.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_tril, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_tril_classical.c b/src/nmod_mat/test/t-solve_tril_classical.c index 81cb7b1ee1..5672014289 100644 --- a/src/nmod_mat/test/t-solve_tril_classical.c +++ b/src/nmod_mat/test/t-solve_tril_classical.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_tril_classical, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_tril_recursive.c b/src/nmod_mat/test/t-solve_tril_recursive.c index 47d3aa6a73..6d722b19f6 100644 --- a/src/nmod_mat/test/t-solve_tril_recursive.c +++ b/src/nmod_mat/test/t-solve_tril_recursive.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_tril_recursive, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_triu.c b/src/nmod_mat/test/t-solve_triu.c index 497e72a721..243fbc2592 100644 --- a/src/nmod_mat/test/t-solve_triu.c +++ b/src/nmod_mat/test/t-solve_triu.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_triu, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_triu_classical.c b/src/nmod_mat/test/t-solve_triu_classical.c index 80e727315f..629e86d475 100644 --- a/src/nmod_mat/test/t-solve_triu_classical.c +++ b/src/nmod_mat/test/t-solve_triu_classical.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_triu_classical, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_triu_recursive.c b/src/nmod_mat/test/t-solve_triu_recursive.c index a1529f9711..6ece5e12a5 100644 --- a/src/nmod_mat/test/t-solve_triu_recursive.c +++ b/src/nmod_mat/test/t-solve_triu_recursive.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mat_solve_triu_recursive, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_mat_t A, X, B, Y; - mp_limb_t m; + ulong m; slong rows, cols; int unit; diff --git a/src/nmod_mat/test/t-solve_vec.c b/src/nmod_mat/test/t-solve_vec.c index 2787809f3f..18fb83210d 100644 --- a/src/nmod_mat/test/t-solve_vec.c +++ b/src/nmod_mat/test/t-solve_vec.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(nmod_mat_solve_vec, state) nmod_mat_t A, x, b, Ax; slong i, m, r; int solved; - mp_limb_t mod; + ulong mod; for (i = 0; i < 2000 * flint_test_multiplier(); i++) { diff --git a/src/nmod_mat/test/t-trace.c b/src/nmod_mat/test/t-trace.c index 2fd077782b..5905bf7fd7 100644 --- a/src/nmod_mat/test/t-trace.c +++ b/src/nmod_mat/test/t-trace.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mat_trace, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_mat_t A, B, AB, BA; - mp_limb_t mod, trab, trba; + ulong mod, trab, trba; slong m, n; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_mat/trace.c b/src/nmod_mat/trace.c index 7a26448b31..9da13d60fe 100644 --- a/src/nmod_mat/trace.c +++ b/src/nmod_mat/trace.c @@ -12,10 +12,10 @@ #include "nmod.h" #include "nmod_mat.h" -mp_limb_t +ulong nmod_mat_trace(const nmod_mat_t mat) { - mp_limb_t t; + ulong t; slong i, n = nmod_mat_nrows(mat); if (n == 0) diff --git a/src/nmod_mat/transpose.c b/src/nmod_mat/transpose.c index 495682d2ec..09f4e46261 100644 --- a/src/nmod_mat/transpose.c +++ b/src/nmod_mat/transpose.c @@ -14,7 +14,7 @@ void nmod_mat_transpose(nmod_mat_t B, const nmod_mat_t A) { - mp_limb_t tmp; + ulong tmp; slong i, j; diff --git a/src/nmod_mat/window.c b/src/nmod_mat/window.c index 14fab17a75..be5dd3a1b9 100644 --- a/src/nmod_mat/window.c +++ b/src/nmod_mat/window.c @@ -22,7 +22,7 @@ nmod_mat_window_init(nmod_mat_t window, const nmod_mat_t mat, window->entries = NULL; if (r2 > r1) - window->rows = (mp_limb_t **) flint_malloc((r2 - r1) * sizeof(mp_limb_t *)); + window->rows = (ulong **) flint_malloc((r2 - r1) * sizeof(ulong *)); else window->rows = NULL; diff --git a/src/nmod_mpoly.h b/src/nmod_mpoly.h index b7d582268b..ac5dd77f18 100644 --- a/src/nmod_mpoly.h +++ b/src/nmod_mpoly.h @@ -31,7 +31,7 @@ extern "C" { #endif FLINT_FORCE_INLINE -mp_limb_t * nmod_mpoly_term_coeff_ref(nmod_mpoly_t A, slong i, +ulong * nmod_mpoly_term_coeff_ref(nmod_mpoly_t A, slong i, const nmod_mpoly_ctx_t FLINT_UNUSED(ctx)) { FLINT_ASSERT(i < A->length); @@ -96,7 +96,7 @@ typedef struct slong degb_alloc; slong * deg_bounds; slong coeff_alloc; - mp_limb_t * coeffs; + ulong * coeffs; } nmod_mpolyd_struct; typedef nmod_mpolyd_struct nmod_mpolyd_t[1]; @@ -241,14 +241,14 @@ slong nmod_poly_stack_size_mpolyn(const nmod_poly_stack_t S) /* Context object ************************************************************/ void nmod_mpoly_ctx_init(nmod_mpoly_ctx_t ctx, - slong nvars, const ordering_t ord, mp_limb_t modulus); + slong nvars, const ordering_t ord, ulong modulus); void nmod_mpoly_ctx_init_rand(nmod_mpoly_ctx_t ctx, flint_rand_t state, - slong max_nvars, mp_limb_t modulus); + slong max_nvars, ulong modulus); void nmod_mpoly_ctx_clear(nmod_mpoly_ctx_t ctx); -void nmod_mpoly_ctx_set_modulus(nmod_mpoly_ctx_t ctx, mp_limb_t modulus); +void nmod_mpoly_ctx_set_modulus(nmod_mpoly_ctx_t ctx, ulong modulus); NMOD_MPOLY_INLINE slong nmod_mpoly_ctx_nvars(const nmod_mpoly_ctx_t ctx) @@ -263,7 +263,7 @@ ordering_t nmod_mpoly_ctx_ord(const nmod_mpoly_ctx_t ctx) } NMOD_MPOLY_INLINE -mp_limb_t nmod_mpoly_ctx_modulus(const nmod_mpoly_ctx_t ctx) +ulong nmod_mpoly_ctx_modulus(const nmod_mpoly_ctx_t ctx) { return ctx->mod.n; } @@ -312,7 +312,7 @@ void nmod_mpoly_fit_length_reset_bits(nmod_mpoly_t A, NMOD_MPOLY_INLINE void _nmod_mpoly_fit_length( - mp_limb_t ** coeffs, + ulong ** coeffs, slong * coeffs_alloc, ulong ** exps, slong * exps_alloc, @@ -322,14 +322,14 @@ void _nmod_mpoly_fit_length( if (length > *coeffs_alloc) { *coeffs_alloc = FLINT_MAX(length, *coeffs_alloc*2); - *coeffs = (mp_limb_t *) flint_realloc(*coeffs, - *coeffs_alloc*sizeof(mp_limb_t)); + *coeffs = (ulong *) flint_realloc(*coeffs, + *coeffs_alloc*sizeof(ulong)); } if (N*length > *exps_alloc) { *exps_alloc = FLINT_MAX(N*length, *exps_alloc*2); - *exps = (mp_limb_t *) flint_realloc(*exps, *exps_alloc*sizeof(ulong)); + *exps = (ulong *) flint_realloc(*exps, *exps_alloc*sizeof(ulong)); } } @@ -472,7 +472,7 @@ void nmod_mpoly_get_coeff_vars_ui(nmod_mpoly_t C, const nmod_mpoly_t A, const slong * vars, const ulong * exps, slong length, const nmod_mpoly_ctx_t ctx); -NMOD_MPOLY_INLINE mp_limb_t nmod_mpoly_leadcoeff( +NMOD_MPOLY_INLINE ulong nmod_mpoly_leadcoeff( nmod_mpoly_t A, const nmod_mpoly_ctx_t FLINT_UNUSED(ctx)) { FLINT_ASSERT(A->length > 0); @@ -491,7 +491,7 @@ int nmod_mpoly_get_nmod_poly(nmod_poly_t A, const nmod_mpoly_t B, slong var, const nmod_mpoly_ctx_t ctx); void _nmod_mpoly_set_nmod_poly(nmod_mpoly_t A, flint_bitcnt_t Abits, - const mp_limb_t * Bcoeffs, slong Blen, + const ulong * Bcoeffs, slong Blen, slong var, const nmod_mpoly_ctx_t ctx); void nmod_mpoly_set_n_poly_mod(nmod_mpoly_t A, const n_poly_t B, @@ -650,9 +650,9 @@ void nmod_mpoly_add(nmod_mpoly_t A, const nmod_mpoly_t B, void nmod_mpoly_sub(nmod_mpoly_t A, const nmod_mpoly_t B, const nmod_mpoly_t C, const nmod_mpoly_ctx_t ctx); -slong _nmod_mpoly_add(mp_limb_t * coeff1, ulong * exp1, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, +slong _nmod_mpoly_add(ulong * coeff1, ulong * exp1, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, slong N, const ulong * cmpmask, nmod_t fctx); slong _nmod_mpoly_sub(ulong * coeff1, ulong * exp1, @@ -673,7 +673,7 @@ void nmod_mpoly_make_monic(nmod_mpoly_t A, const nmod_mpoly_t B, const nmod_mpoly_ctx_t ctx); void nmod_mpoly_scalar_mul_nmod_invertible(nmod_mpoly_t A, - const nmod_mpoly_t B, mp_limb_t c, const nmod_mpoly_ctx_t ctx); + const nmod_mpoly_t B, ulong c, const nmod_mpoly_ctx_t ctx); void nmod_mpoly_scalar_addmul_ui(nmod_mpoly_t A, const nmod_mpoly_t B, const nmod_mpoly_t C, ulong d, @@ -691,9 +691,9 @@ int _ff_poly_pow_fmpz_is_not_feasible(slong length, const fmpz_t e); int _ff_poly_pow_ui_is_not_feasible(slong length, ulong e); -mp_limb_t _nmod_mpoly_eval_all_ui(const mp_limb_t * Acoeffs, +ulong _nmod_mpoly_eval_all_ui(const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, - const mp_limb_t * alphas, const mpoly_ctx_t mctx, nmod_t mod); + const ulong * alphas, const mpoly_ctx_t mctx, nmod_t mod); ulong nmod_mpoly_evaluate_all_ui(const nmod_mpoly_t A, const ulong * vals, const nmod_mpoly_ctx_t ctx); @@ -747,8 +747,8 @@ int nmod_mpoly_mul_dense(nmod_mpoly_t A, const nmod_mpoly_t B, const nmod_mpoly_t C, const nmod_mpoly_ctx_t ctx); slong _nmod_mpoly_mul_johnson(nmod_mpoly_t A, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, flint_bitcnt_t bits, slong N, const ulong * cmpmask, nmod_t fctx); void _nmod_mpoly_mul_johnson_maxfields(nmod_mpoly_t A, @@ -788,7 +788,7 @@ int _nmod_mpoly_mul_dense(nmod_mpoly_t P, /* Powering ******************************************************************/ -void _nmod_mpoly_pow_rmul(nmod_mpoly_t A, const mp_limb_t * Bcoeffs, +void _nmod_mpoly_pow_rmul(nmod_mpoly_t A, const ulong * Bcoeffs, const ulong * Bexps, slong Blen, ulong k, slong N, const ulong * cmpmask, nmod_t mod, nmod_mpoly_t T); @@ -849,8 +849,8 @@ void nmod_mpoly_divexact(nmod_mpoly_t Q, const nmod_mpoly_t A, } int _nmod_mpoly_divides_monagan_pearce(nmod_mpoly_t Q, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, flint_bitcnt_t bits, slong N, const ulong * cmpmask, nmod_t fctx); @@ -991,7 +991,7 @@ typedef struct _nmod_mpoly_stripe_struct slong N; flint_bitcnt_t bits; nmod_t mod; - mp_limb_t lc_minus_inv; + ulong lc_minus_inv; const ulong * cmpmask; slong * startidx; slong * endidx; @@ -1241,7 +1241,7 @@ void nmod_mpolyu_shift_left(nmod_mpolyu_t A, ulong s); int nmod_mpolyu_content_mpoly(nmod_mpoly_t g, const nmod_mpolyu_t A, const nmod_mpoly_ctx_t ctx); -void nmod_mpolyu_scalar_mul_nmod(nmod_mpolyu_t A, mp_limb_t c, +void nmod_mpolyu_scalar_mul_nmod(nmod_mpolyu_t A, ulong c, const nmod_mpoly_ctx_t ctx); void nmod_mpolyu_set(nmod_mpolyu_t A, const nmod_mpolyu_t B, @@ -1286,7 +1286,7 @@ int nmod_mpolyu_gcdm_zippel(nmod_mpolyu_t G, nmod_mpolyu_t Abar, nmod_mpolyu_t Bbar, nmod_mpolyu_t A, nmod_mpolyu_t B, nmod_mpoly_ctx_t ctx, flint_rand_t randstate); -NMOD_MPOLY_INLINE mp_limb_t nmod_mpolyu_leadcoeff( +NMOD_MPOLY_INLINE ulong nmod_mpolyu_leadcoeff( nmod_mpolyu_t A, const nmod_mpoly_ctx_t ctx) { FLINT_ASSERT(A->length > 0); @@ -1336,7 +1336,7 @@ void nmod_mpolyn_mul_poly(nmod_mpolyn_t A, const nmod_mpolyn_t B, void nmod_mpoly_cvtto_mpolyn(nmod_mpolyn_t A, const nmod_mpoly_t B, slong var, const nmod_mpoly_ctx_t ctx); -NMOD_MPOLY_INLINE mp_limb_t nmod_mpolyn_leadcoeff(nmod_mpolyn_t A, +NMOD_MPOLY_INLINE ulong nmod_mpolyn_leadcoeff(nmod_mpolyn_t A, const nmod_mpoly_ctx_t FLINT_UNUSED(ctx)) { n_poly_struct * leadpoly; @@ -1393,7 +1393,7 @@ void nmod_mpolyn_one(nmod_mpolyn_t A, const nmod_mpoly_ctx_t ctx); void nmod_mpolyun_one(nmod_mpolyun_t A, const nmod_mpoly_ctx_t ctx); -mp_limb_t nmod_mpolyun_leadcoeff_last(nmod_mpolyun_t A, +ulong nmod_mpolyun_leadcoeff_last(nmod_mpolyun_t A, const nmod_mpoly_ctx_t ctx); void nmod_mpolyn_set_mod(nmod_mpolyn_t FLINT_UNUSED(A), const nmod_t FLINT_UNUSED(mod)); @@ -1406,10 +1406,10 @@ int nmod_mpolyun_is_nonzero_nmod(const nmod_mpolyun_t A, const nmod_mpoly_ctx_t void nmod_mpolyn_scalar_mul_nmod( nmod_mpolyn_t A, - mp_limb_t c, + ulong c, const nmod_mpoly_ctx_t ctx); -void nmod_mpolyun_scalar_mul_nmod(nmod_mpolyun_t A, mp_limb_t c, +void nmod_mpolyun_scalar_mul_nmod(nmod_mpolyun_t A, ulong c, const nmod_mpoly_ctx_t ctx); void nmod_mpolyn_mul_last(nmod_mpolyn_t A, n_poly_t b, @@ -1481,7 +1481,7 @@ void nmod_mpoly_from_mpolyn_perm_inflate(nmod_mpoly_t A, const nmod_mpolyn_t B, const nmod_mpoly_ctx_t nctx, const slong * perm, const ulong * shift, const ulong * stride); -NMOD_MPOLY_INLINE mp_limb_t nmod_mpolyun_leadcoeff( +NMOD_MPOLY_INLINE ulong nmod_mpolyun_leadcoeff( nmod_mpolyun_t A, const nmod_mpoly_ctx_t ctx) { FLINT_ASSERT(A->length > 0); @@ -1594,16 +1594,16 @@ int nmod_mpolyl_gcd_hensel_medprime( void _nmod_mpoly_monomial_evals_cache(n_poly_t E, const ulong * Aexps, flint_bitcnt_t Abits, slong Alen, - const mp_limb_t * betas, slong start, slong stop, + const ulong * betas, slong start, slong stop, const mpoly_ctx_t mctx, nmod_t mod); void _nmod_mpoly_monomial_evals2_cache(n_polyun_t E, const ulong * Aexps, flint_bitcnt_t Abits, slong Alen, - const mp_limb_t * betas, slong m, const mpoly_ctx_t ctx, nmod_t mod); + const ulong * betas, slong m, const mpoly_ctx_t ctx, nmod_t mod); /* interp ********************************************************************/ -void _nmod_poly_eval2_pow(mp_limb_t * vp, mp_limb_t * vm, +void _nmod_poly_eval2_pow(ulong * vp, ulong * vm, n_poly_t P, n_poly_t alphapow, nmod_t fctx); void nmod_mpolyn_interp_reduce_2sm_poly(n_poly_t E, @@ -1612,7 +1612,7 @@ void nmod_mpolyn_interp_reduce_2sm_poly(n_poly_t E, void nmod_mpolyn_interp_lift_2sm_poly(slong * lastdeg_, nmod_mpolyn_t F, const n_poly_t A, const n_poly_t B, - mp_limb_t alpha, const nmod_mpoly_ctx_t ctx); + ulong alpha, const nmod_mpoly_ctx_t ctx); int nmod_mpolyn_interp_crt_2sm_poly(slong * lastdeg_, nmod_mpolyn_t F, nmod_mpolyn_t T, const n_poly_t A, @@ -1632,7 +1632,7 @@ void nmod_mpolyn_interp_reduce_2sm_mpolyn(nmod_mpolyn_t E, void nmod_mpolyn_interp_lift_2sm_mpolyn(slong * lastdeg, nmod_mpolyn_t T, nmod_mpolyn_t A, nmod_mpolyn_t B, - slong var, mp_limb_t alpha, const nmod_mpoly_ctx_t ctx); + slong var, ulong alpha, const nmod_mpoly_ctx_t ctx); int nmod_mpolyn_interp_crt_2sm_mpolyn(slong * lastdeg, nmod_mpolyn_t F, nmod_mpolyn_t T, nmod_mpolyn_t A, @@ -1640,7 +1640,7 @@ int nmod_mpolyn_interp_crt_2sm_mpolyn(slong * lastdeg, n_poly_t alphapow, const nmod_mpoly_ctx_t ctx); void nmod_mpolyun_interp_reduce_sm_mpolyu(nmod_mpolyu_t B, - nmod_mpolyun_t A, mp_limb_t alpha, const nmod_mpoly_ctx_t ctx); + nmod_mpolyun_t A, ulong alpha, const nmod_mpoly_ctx_t ctx); void nmod_mpolyn_interp_lift_sm_mpoly(nmod_mpolyn_t A, const nmod_mpoly_t B, const nmod_mpoly_ctx_t ctx); @@ -1650,11 +1650,11 @@ void nmod_mpolyun_interp_lift_sm_mpolyu(nmod_mpolyun_t A, int nmod_mpolyn_interp_crt_sm_mpoly(slong * lastdeg, nmod_mpolyn_t F, nmod_mpolyn_t T, nmod_mpoly_t A, n_poly_t modulus, - mp_limb_t alpha, const nmod_mpoly_ctx_t ctx); + ulong alpha, const nmod_mpoly_ctx_t ctx); int nmod_mpolyun_interp_crt_sm_mpolyu(slong * lastdeg, nmod_mpolyun_t F, nmod_mpolyun_t T, nmod_mpolyu_t A, - n_poly_t modulus, mp_limb_t alpha, const nmod_mpoly_ctx_t ctx); + n_poly_t modulus, ulong alpha, const nmod_mpoly_ctx_t ctx); int nmod_mpolyn_interp_mcrt_sm_mpoly(slong * lastdeg_, nmod_mpolyn_t F, const nmod_mpoly_t A, const n_poly_t modulus, diff --git a/src/nmod_mpoly/add.c b/src/nmod_mpoly/add.c index 15611ab7ca..468f01ca83 100644 --- a/src/nmod_mpoly/add.c +++ b/src/nmod_mpoly/add.c @@ -14,9 +14,9 @@ #include "nmod_mpoly.h" slong _nmod_mpoly_add1( - mp_limb_t * Acoeffs, ulong * Aexps, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, + ulong * Acoeffs, ulong * Aexps, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, ulong maskhi, nmod_t fctx) { @@ -66,9 +66,9 @@ slong _nmod_mpoly_add1( return k; } -slong _nmod_mpoly_add(mp_limb_t * Acoeffs, ulong * Aexps, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, +slong _nmod_mpoly_add(ulong * Acoeffs, ulong * Aexps, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, slong N, const ulong * cmpmask, nmod_t fctx) { slong i = 0, j = 0, k = 0; diff --git a/src/nmod_mpoly/cmp.c b/src/nmod_mpoly/cmp.c index 31ef6bbc0c..cab42ebde4 100644 --- a/src/nmod_mpoly/cmp.c +++ b/src/nmod_mpoly/cmp.c @@ -18,8 +18,8 @@ int nmod_mpoly_cmp(const nmod_mpoly_t A, const nmod_mpoly_t B, int cmp; slong i; slong length = A->length; - mp_limb_t * Acoeffs = A->coeffs; - mp_limb_t * Bcoeffs = B->coeffs; + ulong * Acoeffs = A->coeffs; + ulong * Bcoeffs = B->coeffs; if (A->length != B->length) return A->length < B->length ? -1 : 1; diff --git a/src/nmod_mpoly/compose_mat.c b/src/nmod_mpoly/compose_mat.c index cd55e6ab47..06e4e46ad2 100644 --- a/src/nmod_mpoly/compose_mat.c +++ b/src/nmod_mpoly/compose_mat.c @@ -30,7 +30,7 @@ void _nmod_mpoly_compose_mat( flint_bitcnt_t Bbits = B->bits; slong BN = mpoly_words_per_exp(Bbits, ctxB->minfo); const ulong * Bexp = B->exps; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; slong AN; FLINT_ASSERT(A != B); diff --git a/src/nmod_mpoly/compose_nmod_mpoly_geobucket.c b/src/nmod_mpoly/compose_nmod_mpoly_geobucket.c index ecc33d079a..5030c62008 100644 --- a/src/nmod_mpoly/compose_nmod_mpoly_geobucket.c +++ b/src/nmod_mpoly/compose_nmod_mpoly_geobucket.c @@ -21,7 +21,7 @@ int nmod_mpoly_compose_nmod_mpoly_geobucket(nmod_mpoly_t A, int success = 1; slong i, j; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; const ulong * Bexp = B->exps; flint_bitcnt_t Bbits = B->bits; slong BN = mpoly_words_per_exp(Bbits, ctxB->minfo); diff --git a/src/nmod_mpoly/compose_nmod_mpoly_horner.c b/src/nmod_mpoly/compose_nmod_mpoly_horner.c index beb7e39f22..9461011c4c 100644 --- a/src/nmod_mpoly/compose_nmod_mpoly_horner.c +++ b/src/nmod_mpoly/compose_nmod_mpoly_horner.c @@ -90,7 +90,7 @@ int nmod_mpoly_compose_nmod_mpoly_horner(nmod_mpoly_t A, ulong * counts; slong Blen = B->length; slong * Blist; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; ulong * Bexp = B->exps; flint_bitcnt_t Bbits = B->bits; slong BN = mpoly_words_per_exp(Bbits, ctxB->minfo); diff --git a/src/nmod_mpoly/compose_nmod_poly.c b/src/nmod_mpoly/compose_nmod_poly.c index a84b2ce999..82867030d9 100644 --- a/src/nmod_mpoly/compose_nmod_poly.c +++ b/src/nmod_mpoly/compose_nmod_poly.c @@ -49,7 +49,7 @@ int _nmod_mpoly_compose_nmod_poly_sp(nmod_poly_t A, const nmod_mpoly_t B, slong shift, off; slong entries, k_len; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; ulong * Bexp = B->exps; slong * degrees; slong * offs; @@ -146,7 +146,7 @@ int _nmod_mpoly_compose_nmod_poly_mp(nmod_poly_t A, const nmod_mpoly_t B, slong i, k, N, nvars = ctx->minfo->nvars; slong off, entries, k_len; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; ulong * Bexp = B->exps; fmpz * degrees; slong * offs; diff --git a/src/nmod_mpoly/ctx.c b/src/nmod_mpoly/ctx.c index 84f53bfbce..acaa81e241 100644 --- a/src/nmod_mpoly/ctx.c +++ b/src/nmod_mpoly/ctx.c @@ -14,20 +14,20 @@ #include "nmod_mpoly.h" void nmod_mpoly_ctx_init(nmod_mpoly_ctx_t ctx, slong nvars, - const ordering_t ord, mp_limb_t modulus) + const ordering_t ord, ulong modulus) { mpoly_ctx_init(ctx->minfo, nvars, ord); nmod_init(&ctx->mod, modulus); } void nmod_mpoly_ctx_init_rand(nmod_mpoly_ctx_t ctx, flint_rand_t state, - slong max_nvars, mp_limb_t modulus) + slong max_nvars, ulong modulus) { mpoly_ctx_init_rand(ctx->minfo, state, max_nvars); nmod_init(&ctx->mod, modulus); } -void nmod_mpoly_ctx_set_modulus(nmod_mpoly_ctx_t ctx, mp_limb_t modulus) +void nmod_mpoly_ctx_set_modulus(nmod_mpoly_ctx_t ctx, ulong modulus) { nmod_init(&ctx->mod, modulus); } diff --git a/src/nmod_mpoly/derivative.c b/src/nmod_mpoly/derivative.c index 061fadbc00..de5c22eac8 100644 --- a/src/nmod_mpoly/derivative.c +++ b/src/nmod_mpoly/derivative.c @@ -14,8 +14,8 @@ #include "nmod_mpoly.h" static slong _nmod_mpoly_derivative( - mp_limb_t * coeff1, ulong * exp1, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, + ulong * coeff1, ulong * exp1, + const ulong * coeff2, const ulong * exp2, slong len2, flint_bitcnt_t bits, slong N, slong offset, @@ -30,7 +30,7 @@ static slong _nmod_mpoly_derivative( len1 = 0; for (i = 0; i < len2; i++) { - mp_limb_t cr; + ulong cr; ulong c = (exp2[N*i + offset] >> shift) & mask; if (c == 0) continue; @@ -47,8 +47,8 @@ static slong _nmod_mpoly_derivative( static slong _nmod_mpoly_derivative_mp( - mp_limb_t * coeff1, ulong * exp1, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, + ulong * coeff1, ulong * exp1, + const ulong * coeff2, const ulong * exp2, slong len2, flint_bitcnt_t bits, slong N, slong offset, @@ -57,17 +57,17 @@ static slong _nmod_mpoly_derivative_mp( { slong i, len1; slong esize = bits/FLINT_BITS; - mp_limb_t * t; + ulong * t; TMP_INIT; TMP_START; - t = (mp_limb_t *) TMP_ALLOC(esize*sizeof(mp_limb_t)); + t = (ulong *) TMP_ALLOC(esize*sizeof(ulong)); /* x^c -> c*x^(c-1) */ len1 = 0; for (i = 0; i < len2; i++) { - mp_limb_t cr = mpn_divrem_1(t, 0, exp2 + N*i + offset, esize, fctx.n); + ulong cr = mpn_divrem_1(t, 0, exp2 + N*i + offset, esize, fctx.n); coeff1[len1] = nmod_mul(coeff2[i], cr, fctx); if (coeff1[len1] == 0) continue; diff --git a/src/nmod_mpoly/div_monagan_pearce.c b/src/nmod_mpoly/div_monagan_pearce.c index 9cf44f56bb..ac82811903 100644 --- a/src/nmod_mpoly/div_monagan_pearce.c +++ b/src/nmod_mpoly/div_monagan_pearce.c @@ -15,8 +15,8 @@ static int _nmod_mpoly_div_monagan_pearce1( nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, ulong maskhi, nmod_t fctx) @@ -27,12 +27,12 @@ static int _nmod_mpoly_div_monagan_pearce1( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; slong * hind; ulong mask, exp; int lt_divides; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; TMP_START; @@ -227,8 +227,8 @@ static int _nmod_mpoly_div_monagan_pearce1( static int _nmod_mpoly_div_monagan_pearce( nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -241,7 +241,7 @@ static int _nmod_mpoly_div_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong * exp, * exps; ulong ** exp_list; @@ -249,7 +249,7 @@ static int _nmod_mpoly_div_monagan_pearce( ulong mask; slong * hind; int lt_divides; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; if (N == 1) diff --git a/src/nmod_mpoly/divides_heap_threaded.c b/src/nmod_mpoly/divides_heap_threaded.c index fcec2294fd..179ebad0b3 100644 --- a/src/nmod_mpoly/divides_heap_threaded.c +++ b/src/nmod_mpoly/divides_heap_threaded.c @@ -84,20 +84,20 @@ static void vec_slong_print(const vec_slong_t v) */ typedef struct _nmod_mpoly_ts_struct { - mp_limb_t * volatile coeffs; /* this is coeff_array[idx] */ + ulong * volatile coeffs; /* this is coeff_array[idx] */ ulong * volatile exps; /* this is exp_array[idx] */ volatile slong length; slong alloc; flint_bitcnt_t bits; flint_bitcnt_t idx; - mp_limb_t * exp_array[FLINT_BITS]; + ulong * exp_array[FLINT_BITS]; ulong * coeff_array[FLINT_BITS]; } nmod_mpoly_ts_struct; typedef nmod_mpoly_ts_struct nmod_mpoly_ts_t[1]; static void nmod_mpoly_ts_init(nmod_mpoly_ts_t A, - mp_limb_t * Bcoeff, ulong * Bexp, slong Blen, + ulong * Bcoeff, ulong * Bexp, slong Blen, flint_bitcnt_t bits, slong N) { slong i; @@ -114,7 +114,7 @@ static void nmod_mpoly_ts_init(nmod_mpoly_ts_t A, A->exps = A->exp_array[idx] = (ulong *) flint_malloc(N*A->alloc*sizeof(ulong)); A->coeffs = A->coeff_array[idx] - = (mp_limb_t *) flint_malloc(A->alloc*sizeof(mp_limb_t)); + = (ulong *) flint_malloc(A->alloc*sizeof(ulong)); A->length = Blen; for (i = 0; i < Blen; i++) { @@ -140,7 +140,7 @@ static void nmod_mpoly_ts_clear(nmod_mpoly_ts_t A) /* put B on the end of A */ static void nmod_mpoly_ts_append(nmod_mpoly_ts_t A, - mp_limb_t * Bcoeff, ulong * Bexps, slong Blen, slong N) + ulong * Bcoeff, ulong * Bexps, slong Blen, slong N) { /* TODO: this needs barriers on non-x86 */ @@ -163,7 +163,7 @@ static void nmod_mpoly_ts_append(nmod_mpoly_ts_t A, { slong newalloc; ulong * newexps; - mp_limb_t * newcoeffs; + ulong * newcoeffs; flint_bitcnt_t newidx; newidx = FLINT_BIT_COUNT(newlength - 1); newidx = (newidx > 8) ? newidx - 8 : 0; @@ -174,7 +174,7 @@ static void nmod_mpoly_ts_append(nmod_mpoly_ts_t A, newexps = A->exp_array[newidx] = (ulong *) flint_malloc(N*newalloc*sizeof(ulong)); newcoeffs = A->coeff_array[newidx] - = (mp_limb_t *) flint_malloc(newalloc*sizeof(mp_limb_t)); + = (ulong *) flint_malloc(newalloc*sizeof(ulong)); for (i = 0; i < oldlength; i++) { @@ -242,7 +242,7 @@ typedef struct slong length; slong N; flint_bitcnt_t bits; - mp_limb_t lc_inv; + ulong lc_inv; ulong * cmpmask; int failed; #if PROFILE_THIS @@ -367,9 +367,9 @@ static void divides_heap_base_add_chunk(divides_heap_base_t H, divides_heap_chun */ static void _nmod_mpoly_mulsub_stripe1( nmod_mpoly_t A, - const mp_limb_t * Dcoeff, const ulong * Dexp, slong Dlen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong Clen, + const ulong * Dcoeff, const ulong * Dexp, slong Dlen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong Clen, const nmod_mpoly_stripe_t S) { int upperclosed; @@ -387,7 +387,7 @@ static void _nmod_mpoly_mulsub_stripe1( mpoly_heap_t * x; slong Di; slong Alen; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; ulong * Aexp = A->exps; ulong acc0, acc1, acc2, pp0, pp1; ulong exp; @@ -589,9 +589,9 @@ static void _nmod_mpoly_mulsub_stripe1( static void _nmod_mpoly_mulsub_stripe( nmod_mpoly_t A, - const mp_limb_t * Dcoeff, const ulong * Dexp, slong Dlen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong Clen, + const ulong * Dcoeff, const ulong * Dexp, slong Dlen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong Clen, const nmod_mpoly_stripe_t S) { int upperclosed; @@ -609,7 +609,7 @@ static void _nmod_mpoly_mulsub_stripe( mpoly_heap_t * x; slong Di; slong Alen; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; ulong * Aexp = A->exps; ulong acc0, acc1, acc2, pp0, pp1; ulong * exp, * exps; @@ -836,8 +836,8 @@ static void _nmod_mpoly_mulsub_stripe( */ static int _nmod_mpoly_divides_stripe1( nmod_mpoly_t Q, - const mp_limb_t * Acoeff, const ulong * Aexp, slong Alen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Acoeff, const ulong * Aexp, slong Alen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, const nmod_mpoly_stripe_t S) { flint_bitcnt_t bits = S->bits; @@ -852,10 +852,10 @@ static int _nmod_mpoly_divides_stripe1( slong * store, * store_base; mpoly_heap_t * x; slong Qlen; - mp_limb_t * Qcoeff = Q->coeffs; + ulong * Qcoeff = Q->coeffs; ulong * Qexp = Q->exps; ulong exp; - mp_limb_t acc0, acc1, acc2, pp1, pp0; + ulong acc0, acc1, acc2, pp1, pp0; ulong mask; slong * hind; @@ -1062,8 +1062,8 @@ static int _nmod_mpoly_divides_stripe1( static int _nmod_mpoly_divides_stripe( nmod_mpoly_t Q, - const mp_limb_t * Acoeff, const ulong * Aexp, slong Alen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Acoeff, const ulong * Aexp, slong Alen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, const nmod_mpoly_stripe_t S) { flint_bitcnt_t bits = S->bits; @@ -1076,12 +1076,12 @@ static int _nmod_mpoly_divides_stripe( slong * store, * store_base; mpoly_heap_t * x; slong Qlen; - mp_limb_t * Qcoeff = Q->coeffs; + ulong * Qcoeff = Q->coeffs; ulong * Qexp = Q->exps; ulong * exp, * exps; ulong ** exp_list; slong exp_next; - mp_limb_t acc0, acc1, acc2, pp1, pp0; + ulong acc0, acc1, acc2, pp1, pp0; ulong mask; slong * hind; @@ -1513,7 +1513,7 @@ static void trychunk(worker_arg_t W, divides_heap_chunk_t L) if (L->producer == 1) { divides_heap_chunk_struct * next; - mp_limb_t * Rcoeff; + ulong * Rcoeff; ulong * Rexp; slong Rlen; @@ -1709,7 +1709,7 @@ int _nmod_mpoly_divides_heap_threaded_pool( ulong * Aexp, * Bexp; int freeAexp, freeBexp; worker_arg_struct * worker_args; - mp_limb_t qcoeff; + ulong qcoeff; ulong * texps, * qexps; divides_heap_base_t H; #if PROFILE_THIS diff --git a/src/nmod_mpoly/divides_monagan_pearce.c b/src/nmod_mpoly/divides_monagan_pearce.c index 7045a11893..96e10dac55 100644 --- a/src/nmod_mpoly/divides_monagan_pearce.c +++ b/src/nmod_mpoly/divides_monagan_pearce.c @@ -17,8 +17,8 @@ static int _nmod_mpoly_divides_monagan_pearce1( nmod_mpoly_t Q, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, slong bits, ulong maskhi, nmod_t fctx) @@ -30,11 +30,11 @@ static int _nmod_mpoly_divides_monagan_pearce1( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * q_coeff = Q->coeffs; + ulong * q_coeff = Q->coeffs; ulong * q_exp = Q->exps; slong * hind; ulong mask, exp, maxexp = exp2[len2 - 1]; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; TMP_START; @@ -204,8 +204,8 @@ static int _nmod_mpoly_divides_monagan_pearce1( int _nmod_mpoly_divides_monagan_pearce( nmod_mpoly_t Q, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -218,12 +218,12 @@ int _nmod_mpoly_divides_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * q_coeff = Q->coeffs; + ulong * q_coeff = Q->coeffs; ulong * q_exp = Q->exps; ulong * exp, * exps; ulong ** exp_list; slong exp_next; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; ulong mask; slong * hind; TMP_INIT; diff --git a/src/nmod_mpoly/divrem_ideal_monagan_pearce.c b/src/nmod_mpoly/divrem_ideal_monagan_pearce.c index 9428ffde55..e323575fb6 100644 --- a/src/nmod_mpoly/divrem_ideal_monagan_pearce.c +++ b/src/nmod_mpoly/divrem_ideal_monagan_pearce.c @@ -21,7 +21,7 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce1( nmod_mpoly_struct ** Q, nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, nmod_mpoly_struct * const * Bs, ulong * const * Bexps, slong Blen, flint_bitcnt_t bits, const nmod_mpoly_ctx_t ctx, @@ -36,13 +36,13 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce1( mpoly_nheap_t ** chains, * chains_ptr; slong ** hinds, * hinds_ptr; mpoly_nheap_t * x; - mp_limb_t * r_coeff = R->coeffs; + ulong * r_coeff = R->coeffs; ulong * r_exp = R->exps; slong r_len; ulong exp, texp; ulong mask; slong * q_len, * s; - mp_limb_t * lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong * lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; TMP_START; @@ -91,7 +91,7 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce1( HEAP_ASSIGN(heap[1], Aexps[0], x); /* precompute leading coeff info */ - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(Blen*sizeof(mp_limb_t)); + lc_minus_inv = (ulong *) TMP_ALLOC(Blen*sizeof(ulong)); for (w = 0; w < Blen; w++) lc_minus_inv[w] = ctx->mod.n - nmod_inv(Bs[w]->coeffs[0], ctx->mod); @@ -250,7 +250,7 @@ break_continue:; int _nmod_mpoly_divrem_ideal_monagan_pearce( nmod_mpoly_struct ** Q, nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, nmod_mpoly_struct * const * Bs, ulong * const * Bexps, slong Blen, slong N, flint_bitcnt_t bits, @@ -266,7 +266,7 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce( mpoly_nheap_t ** chains; slong ** hinds; mpoly_nheap_t * x; - mp_limb_t * r_coeff = R->coeffs; + ulong * r_coeff = R->coeffs; ulong * r_exp = R->exps; slong r_len; ulong * exp, * exps, * texp; @@ -274,7 +274,7 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce( slong exp_next; ulong mask; slong * q_len, * s; - mp_limb_t * lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong * lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; if (N == 1) @@ -331,7 +331,7 @@ int _nmod_mpoly_divrem_ideal_monagan_pearce( mpoly_monomial_set(heap[1].exp, Aexps, N); /* precompute leading coeff info */ - lc_minus_inv = (mp_limb_t *) TMP_ALLOC(Blen*sizeof(mp_limb_t)); + lc_minus_inv = (ulong *) TMP_ALLOC(Blen*sizeof(ulong)); for (w = 0; w < Blen; w++) lc_minus_inv[w] = ctx->mod.n - nmod_inv(Bs[w]->coeffs[0], ctx->mod); diff --git a/src/nmod_mpoly/divrem_monagan_pearce.c b/src/nmod_mpoly/divrem_monagan_pearce.c index c7a313d1b7..53c306d3b9 100644 --- a/src/nmod_mpoly/divrem_monagan_pearce.c +++ b/src/nmod_mpoly/divrem_monagan_pearce.c @@ -16,20 +16,20 @@ static int _nmod_mpoly_divrem_monagan_pearce1_binomial( nmod_mpoly_t Q, nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, flint_bitcnt_t bits, ulong maskhi, nmod_t mod) { - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; ulong lexp, mask = mpoly_overflow_mask_sp(bits); - mp_limb_t lcoeff; - mp_limb_t lc_inv = nmod_inv(Bcoeffs[0], mod); - mp_limb_t mBcoeff1 = mod.n - Bcoeffs[1]; + ulong lcoeff; + ulong lc_inv = nmod_inv(Bcoeffs[0], mod); + ulong mBcoeff1 = mod.n - Bcoeffs[1]; slong Qlen = 0; slong Rlen = 0; slong Aidx = 0; @@ -139,8 +139,8 @@ static int _nmod_mpoly_divrem_monagan_pearce1_binomial( static int _nmod_mpoly_divrem_monagan_pearce1( nmod_mpoly_t Q, nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, flint_bitcnt_t bits, ulong maskhi, nmod_t fctx) @@ -151,14 +151,14 @@ static int _nmod_mpoly_divrem_monagan_pearce1( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; slong * hind; ulong mask, exp; int lt_divides; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; TMP_START; @@ -346,8 +346,8 @@ static int _nmod_mpoly_divrem_monagan_pearce1( static int _nmod_mpoly_divrem_monagan_pearce( nmod_mpoly_t Q, nmod_mpoly_t R, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, slong bits, slong N, const ulong * cmpmask, @@ -360,8 +360,8 @@ static int _nmod_mpoly_divrem_monagan_pearce( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; - mp_limb_t * Rcoeffs = R->coeffs; + ulong * Qcoeffs = Q->coeffs; + ulong * Rcoeffs = R->coeffs; ulong * Qexps = Q->exps; ulong * Rexps = R->exps; ulong * exp, * exps; @@ -370,7 +370,7 @@ static int _nmod_mpoly_divrem_monagan_pearce( ulong mask; slong * hind; int lt_divides; - mp_limb_t lc_minus_inv, acc0, acc1, acc2, pp1, pp0; + ulong lc_minus_inv, acc0, acc1, acc2, pp1, pp0; TMP_INIT; if (N == 1) diff --git a/src/nmod_mpoly/equal.c b/src/nmod_mpoly/equal.c index c6b2a43508..365cc711f2 100644 --- a/src/nmod_mpoly/equal.c +++ b/src/nmod_mpoly/equal.c @@ -13,8 +13,8 @@ #include "mpoly.h" #include "nmod_mpoly.h" -int _nmod_mpoly_equal(const mp_limb_t * coeff1, const ulong * exp1, - const mp_limb_t * coeff2, const ulong * exp2, +int _nmod_mpoly_equal(const ulong * coeff1, const ulong * exp1, + const ulong * coeff2, const ulong * exp2, slong len, slong N) { slong i; diff --git a/src/nmod_mpoly/evaluate_all.c b/src/nmod_mpoly/evaluate_all.c index 46409c0699..a5c836662d 100644 --- a/src/nmod_mpoly/evaluate_all.c +++ b/src/nmod_mpoly/evaluate_all.c @@ -15,12 +15,12 @@ #include "mpoly.h" #include "nmod_mpoly.h" -mp_limb_t _nmod_mpoly_eval_all_ui( - const mp_limb_t * Acoeffs, +ulong _nmod_mpoly_eval_all_ui( + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, - const mp_limb_t * alphas, + const ulong * alphas, const mpoly_ctx_t mctx, nmod_t mod) { @@ -32,7 +32,7 @@ mp_limb_t _nmod_mpoly_eval_all_ui( fmpz_t varexp_mp; slong * offsets, * shifts; n_poly_struct * caches; - mp_limb_t eval, t; + ulong eval, t; TMP_INIT; TMP_START; diff --git a/src/nmod_mpoly/evaluate_one.c b/src/nmod_mpoly/evaluate_one.c index 153ff21757..1f167d23d2 100644 --- a/src/nmod_mpoly/evaluate_one.c +++ b/src/nmod_mpoly/evaluate_one.c @@ -27,11 +27,11 @@ void _nmod_mpoly_evaluate_one_ui_sp( slong i, N, off, shift; ulong * cmpmask, * one; slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t bits = B->bits; slong Alen; - mp_limb_t * Acoeffs; + ulong * Acoeffs; ulong * Aexps; ulong mask, k; int need_sort = 0, cmp; @@ -112,11 +112,11 @@ static void _nmod_mpoly_evaluate_one_ui_mp( slong i, N, off; ulong * cmpmask, * one, * tmp; slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t bits = B->bits; slong Alen; - mp_limb_t * Acoeffs; + ulong * Acoeffs; ulong * Aexps; fmpz_t k; int need_sort = 0, cmp; diff --git a/src/nmod_mpoly/fit_length.c b/src/nmod_mpoly/fit_length.c index 4e4e659480..6f38f054bc 100644 --- a/src/nmod_mpoly/fit_length.c +++ b/src/nmod_mpoly/fit_length.c @@ -34,7 +34,7 @@ void nmod_mpoly_fit_length_fit_bits( if (len > A->coeffs_alloc) { A->coeffs_alloc = FLINT_MAX(len, 2*A->coeffs_alloc); - A->coeffs = flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(mp_limb_t)); + A->coeffs = flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); } if (bits > A->bits) diff --git a/src/nmod_mpoly/gcd.c b/src/nmod_mpoly/gcd.c index 3e545af537..8104e3f6e1 100644 --- a/src/nmod_mpoly/gcd.c +++ b/src/nmod_mpoly/gcd.c @@ -41,7 +41,7 @@ static void nmod_mpoly_evals( ulong * Amin_exp, ulong * FLINT_UNUSED(Amax_exp), ulong * Astride, - mp_limb_t * alpha, + ulong * alpha, const nmod_mpoly_ctx_t ctx) { slong i, j; @@ -52,7 +52,7 @@ static void nmod_mpoly_evals( ulong * varexps; ulong varexp; slong total_degree, lo, hi; - mp_limb_t meval, t; + ulong meval, t; n_poly_struct * caches; offsets = FLINT_ARRAY_ALLOC(2*nvars, slong); @@ -260,8 +260,8 @@ static void nmod_mpoly_evals_lgprime( ulong varexp, lo, hi; slong total_degree; n_poly_struct * caches; - mp_limb_t * t = FLINT_ARRAY_ALLOC(2*d, mp_limb_t); - mp_limb_t * meval = t + d; + ulong * t = FLINT_ARRAY_ALLOC(2*d, ulong); + ulong * meval = t + d; offsets = FLINT_ARRAY_ALLOC(2*nvars, slong); shifts = offsets + nvars; @@ -354,7 +354,7 @@ static void _set_estimates( slong i, j; n_poly_t Geval; n_poly_struct * Aevals, * Bevals; - mp_limb_t * alpha; + ulong * alpha; flint_rand_t state; slong ignore_limit; int * ignore; @@ -362,7 +362,7 @@ static void _set_estimates( flint_rand_init(state); ignore = FLINT_ARRAY_ALLOC(nvars, int); - alpha = FLINT_ARRAY_ALLOC(nvars, mp_limb_t); + alpha = FLINT_ARRAY_ALLOC(nvars, ulong); Aevals = FLINT_ARRAY_ALLOC(nvars, n_poly_struct); Bevals = FLINT_ARRAY_ALLOC(nvars, n_poly_struct); @@ -844,7 +844,7 @@ static int _try_monomial_cofactors( slong NA, NG; slong nvars = ctx->minfo->nvars; fmpz * Abarexps, * Bbarexps, * Texps; - mp_limb_t a0, b0, a0inv; + ulong a0, b0, a0inv; nmod_mpoly_t T; flint_bitcnt_t Gbits = FLINT_MIN(A->bits, B->bits); flint_bitcnt_t Abarbits = A->bits; diff --git a/src/nmod_mpoly/gcd_brown.c b/src/nmod_mpoly/gcd_brown.c index 393b723284..b2c13ebd10 100644 --- a/src/nmod_mpoly/gcd_brown.c +++ b/src/nmod_mpoly/gcd_brown.c @@ -36,7 +36,7 @@ typedef struct _splitbase_struct * base; nmod_mpolyn_t G, Abar, Bbar; n_poly_t modulus; - mp_limb_t alpha; + ulong alpha; slong required_images; } _splitworker_arg_struct; @@ -50,8 +50,8 @@ static void _splitworker_bivar(void * varg) n_poly_t Aevalp, Bevalp, Gevalp, Abarevalp, Bbarevalp; n_poly_t Aevalm, Bevalm, Gevalm, Abarevalm, Bbarevalm; nmod_mpolyn_t T; - mp_limb_t gammaevalp, alpha, temp; - mp_limb_t gammaevalm; + ulong gammaevalp, alpha, temp; + ulong gammaevalm; int gstab, astab, bstab, use_stab; slong ldeg; slong N, off, shift; @@ -267,8 +267,8 @@ static void _splitworker(void * varg) nmod_mpolyn_t Aevalp, Bevalp, Gevalp, Abarevalp, Bbarevalp; nmod_mpolyn_t Aevalm, Bevalm, Gevalm, Abarevalm, Bbarevalm; nmod_mpolyn_t T; - mp_limb_t gammaevalp, alpha, temp; - mp_limb_t gammaevalm; + ulong gammaevalp, alpha, temp; + ulong gammaevalm; slong ldeg; int success; nmod_poly_stack_t Sp; @@ -825,7 +825,7 @@ int nmod_mpolyn_gcd_brown_smprime_threaded_pool( int success; slong bound, best_est; slong g_stab_est, abar_stab_est, bbar_stab_est, upper_limit; - mp_limb_t alpha; + ulong alpha; slong deggamma, ldegA, ldegB; slong ldegGs_Abars_Bbars[3]; n_poly_t cA, cB, cG, cAbar, cBbar, gamma; diff --git a/src/nmod_mpoly/gcd_hensel.c b/src/nmod_mpoly/gcd_hensel.c index ab59aa128a..ef34498025 100644 --- a/src/nmod_mpoly/gcd_hensel.c +++ b/src/nmod_mpoly/gcd_hensel.c @@ -50,8 +50,8 @@ int nmod_mpolyl_gcd_hensel_smprime( const slong n = ctx->minfo->nvars - 1; slong i, k; flint_bitcnt_t bits = A->bits; - mp_limb_t * alphas, * prev_alphas; - mp_limb_t q, mu1, mu2; + ulong * alphas, * prev_alphas; + ulong q, mu1, mu2; nmod_mpoly_struct * Aevals, * Bevals, * Hevals; nmod_mpoly_struct * H; /* points to A, B, or Hevals + n */ nmod_mpoly_struct * Glcs, * Hlcs; @@ -83,7 +83,7 @@ int nmod_mpolyl_gcd_hensel_smprime( nmod_mpoly_init(Hevals + i, ctx); } - alphas = FLINT_ARRAY_ALLOC(2*n, mp_limb_t); + alphas = FLINT_ARRAY_ALLOC(2*n, ulong); prev_alphas = alphas + n; Aevals = FLINT_ARRAY_ALLOC(2*(n + 1), nmod_mpoly_struct); Bevals = Aevals + (n + 1); diff --git a/src/nmod_mpoly/gcd_zippel2.c b/src/nmod_mpoly/gcd_zippel2.c index c7f3442d3b..791bdf90a4 100644 --- a/src/nmod_mpoly/gcd_zippel2.c +++ b/src/nmod_mpoly/gcd_zippel2.c @@ -31,7 +31,7 @@ void _nmod_mpoly_monomial_evals_cache( const ulong * Aexps, flint_bitcnt_t Abits, slong Alen, - const mp_limb_t * betas, + const ulong * betas, slong start, slong stop, const mpoly_ctx_t mctx, @@ -42,7 +42,7 @@ void _nmod_mpoly_monomial_evals_cache( slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; n_poly_struct * caches; - mp_limb_t * c; + ulong * c; slong num = stop - start; FLINT_ASSERT(Abits <= FLINT_BITS); @@ -104,7 +104,7 @@ void _nmod_mpoly_monomial_evals2_cache( const ulong * Aexps, flint_bitcnt_t Abits, slong Alen, - const mp_limb_t * betas, + const ulong * betas, slong m, const mpoly_ctx_t mctx, nmod_t mod) @@ -115,7 +115,7 @@ void _nmod_mpoly_monomial_evals2_cache( slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; n_poly_struct * caches; - mp_limb_t * c; + ulong * c; FLINT_ASSERT(Abits <= FLINT_BITS); FLINT_ASSERT(Alen > 0); @@ -351,7 +351,7 @@ int nmod_mpoly_gcd_get_use_new( return use; } -mp_limb_t n_poly_mod_eval_step_sep( +ulong n_poly_mod_eval_step_sep( n_poly_t cur, const n_poly_t inc, const nmod_mpoly_t A, @@ -363,7 +363,7 @@ mp_limb_t n_poly_mod_eval_step_sep( } static void n_fq_poly_eval_step_sep( - mp_limb_t * res, + ulong * res, n_fq_poly_t cur, const n_fq_poly_t inc, const fq_nmod_mpoly_t A, @@ -383,7 +383,7 @@ static void n_bpoly_mod_eval_step_sep( { slong i, Ai; slong e0, e1; - mp_limb_t c; + ulong c; n_bpoly_zero(E); @@ -412,7 +412,7 @@ static void n_bpoly_mod_eval_step_sep( static void nmod_mpoly_monomial_evals( n_poly_t E, const nmod_mpoly_t A, - const mp_limb_t * betas, + const ulong * betas, slong start, slong stop, const nmod_mpoly_ctx_t ctx) @@ -436,7 +436,7 @@ static void fq_nmod_mpoly_monomial_evals( static void nmod_mpoly_monomial_evals2( n_polyun_t E, const nmod_mpoly_t A, - const mp_limb_t * betas, + const ulong * betas, slong m, const nmod_mpoly_ctx_t ctx) { @@ -499,13 +499,13 @@ int nmod_mpolyl_gcd_zippel_smprime( slong i, m; slong nvars = ctx->minfo->nvars; flint_bitcnt_t bits = A->bits; - mp_limb_t * alphas, * betas; + ulong * alphas, * betas; flint_rand_t state; nmod_mpoly_t cont; nmod_mpoly_t T, G, Abar, Bbar; n_polyun_t HG, HAbar, HBbar, MG, MAbar, MBbar, ZG, ZAbar, ZBbar; n_bpoly_t Aev, Bev, Gev, Abarev, Bbarev; - mp_limb_t gammaev; + ulong gammaev; nmod_mpolyn_t Tn, Gn, Abarn, Bbarn; slong lastdeg; slong cur_zip_image, req_zip_images, this_length; @@ -515,7 +515,7 @@ int nmod_mpolyl_gcd_zippel_smprime( nmod_mpoly_struct * Aevals, * Bevals; nmod_mpoly_struct * gammaevals; n_poly_bpoly_stack_t St; - mp_limb_t c, start_alpha; + ulong c, start_alpha; ulong GdegboundXY, newdegXY, Abideg, Bbideg; slong degxAB, degyAB; @@ -591,8 +591,8 @@ int nmod_mpolyl_gcd_zippel_smprime( n_poly_stack_init(St->poly_stack); n_bpoly_stack_init(St->bpoly_stack); - betas = FLINT_ARRAY_ALLOC(nvars, mp_limb_t); - alphas = FLINT_ARRAY_ALLOC(nvars, mp_limb_t); + betas = FLINT_ARRAY_ALLOC(nvars, ulong); + alphas = FLINT_ARRAY_ALLOC(nvars, ulong); flint_rand_init(state); Aevals = FLINT_ARRAY_ALLOC(nvars + 1, nmod_mpoly_struct); @@ -1209,7 +1209,7 @@ int nmod_mpolyl_gcd_zippel_lgprime( fq_nmod_mpolyn_t qTn, qGn, qAbarn, qBbarn; n_fq_polyun_t HG, HAbar, HBbar, MG, MAbar, MBbar, ZG, ZAbar, ZBbar; n_fq_bpoly_t Aev, Bev, Gev, Abarev, Bbarev; - const mp_limb_t * gammaev; + const ulong * gammaev; slong lastdeg; slong cur_zip_image, req_zip_images, this_length; n_polyun_t Aeh_cur, Aeh_inc, Beh_cur, Beh_inc; diff --git a/src/nmod_mpoly/get_coeff.c b/src/nmod_mpoly/get_coeff.c index 3476e60fdf..4db747c40a 100644 --- a/src/nmod_mpoly/get_coeff.c +++ b/src/nmod_mpoly/get_coeff.c @@ -81,7 +81,7 @@ void nmod_mpoly_get_coeff_vars_ui(nmod_mpoly_t C, const nmod_mpoly_t A, ulong * uexp; ulong * tmask, * texp; slong nvars = ctx->minfo->nvars; - mp_limb_t * Ccoeff; + ulong * Ccoeff; ulong * Cexp; slong Clen; TMP_INIT; diff --git a/src/nmod_mpoly/get_set_is_nmod_poly.c b/src/nmod_mpoly/get_set_is_nmod_poly.c index e77617c4f4..4f2d245ac0 100644 --- a/src/nmod_mpoly/get_set_is_nmod_poly.c +++ b/src/nmod_mpoly/get_set_is_nmod_poly.c @@ -38,7 +38,7 @@ int nmod_mpoly_get_n_poly( const nmod_mpoly_ctx_t ctx) { slong Blen = B->length; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; const ulong * Bexps = B->exps; flint_bitcnt_t Bbits = B->bits; slong i, N = mpoly_words_per_exp(Bbits, ctx->minfo); @@ -89,7 +89,7 @@ int nmod_mpoly_get_n_poly( void _nmod_mpoly_set_nmod_poly( nmod_mpoly_t A, flint_bitcnt_t Abits, - const mp_limb_t * Bcoeffs, + const ulong * Bcoeffs, slong Blen, slong var, const nmod_mpoly_ctx_t ctx) diff --git a/src/nmod_mpoly/get_str_pretty.c b/src/nmod_mpoly/get_str_pretty.c index bf17729082..f8aaf2f21e 100644 --- a/src/nmod_mpoly/get_str_pretty.c +++ b/src/nmod_mpoly/get_str_pretty.c @@ -17,7 +17,7 @@ #define ALLOC_PER_VAR ((FLINT_BITS+4)/3) static char * -_nmod_mpoly_get_str_pretty(const mp_limb_t * coeff, const ulong * exp, slong len, +_nmod_mpoly_get_str_pretty(const ulong * coeff, const ulong * exp, slong len, const char ** x_in, slong bits, const mpoly_ctx_t mctx, nmod_t fctx) { char * str, ** x = (char **) x_in, *xtmp; diff --git a/src/nmod_mpoly/init.c b/src/nmod_mpoly/init.c index 468bd66b8b..bbf951952b 100644 --- a/src/nmod_mpoly/init.c +++ b/src/nmod_mpoly/init.c @@ -23,7 +23,7 @@ void nmod_mpoly_init3( if (alloc > 0) { A->coeffs_alloc = alloc; - A->coeffs = FLINT_ARRAY_ALLOC(A->coeffs_alloc, mp_limb_t); + A->coeffs = FLINT_ARRAY_ALLOC(A->coeffs_alloc, ulong); A->exps_alloc = N*alloc; A->exps = FLINT_ARRAY_ALLOC(A->exps_alloc, ulong); } diff --git a/src/nmod_mpoly/interp.c b/src/nmod_mpoly/interp.c index ddf9561c9c..5545d25dc8 100644 --- a/src/nmod_mpoly/interp.c +++ b/src/nmod_mpoly/interp.c @@ -23,16 +23,16 @@ void _nmod_poly_eval2_pow( - mp_limb_t * vp, - mp_limb_t * vm, + ulong * vp, + ulong * vm, n_poly_t P, n_poly_t alphapow, nmod_t fctx) { - mp_limb_t * Pcoeffs = P->coeffs; + ulong * Pcoeffs = P->coeffs; slong Plen = P->length; - mp_limb_t * alpha_powers = alphapow->coeffs; - mp_limb_t p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; + ulong * alpha_powers = alphapow->coeffs; + ulong p1, p0, a0, a1, a2, q1, q0, b0, b1, b2; slong k; a0 = a1 = a2 = UWORD(0); @@ -85,7 +85,7 @@ void nmod_mpolyn_interp_reduce_2sm_poly( n_poly_t alphapow, const nmod_mpoly_ctx_t ctx) { - mp_limb_t u, v; + ulong u, v; slong Ai, Alen, k; n_poly_struct * Acoeff; ulong * Aexp; @@ -114,14 +114,14 @@ void nmod_mpolyn_interp_lift_2sm_poly( nmod_mpolyn_t F, const n_poly_t A, const n_poly_t B, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { slong lastdeg = -WORD(1); - mp_limb_t u, v, d0, d1, Avalue, Bvalue; + ulong u, v, d0, d1, Avalue, Bvalue; slong Fi, Aexp, Bexp; - mp_limb_t * Acoeff = A->coeffs; - mp_limb_t * Bcoeff = B->coeffs; + ulong * Acoeff = A->coeffs; + ulong * Bcoeff = B->coeffs; n_poly_struct * Fcoeff; ulong * Fexp; slong e; @@ -209,12 +209,12 @@ int nmod_mpolyn_interp_crt_2sm_poly( const nmod_mpoly_ctx_t ctx) { int changed = 0, Finc; - mp_limb_t alpha = n_poly_get_coeff(alphapow, 1); + ulong alpha = n_poly_get_coeff(alphapow, 1); slong lastdeg = -WORD(1); - mp_limb_t u, v, FvalueA, FvalueB; + ulong u, v, FvalueA, FvalueB; slong Fi, Toff, Aexp, Bexp, e, fexp; - mp_limb_t * Acoeff = A->coeffs; - mp_limb_t * Bcoeff = B->coeffs; + ulong * Acoeff = A->coeffs; + ulong * Bcoeff = B->coeffs; slong Flen = F->length; n_poly_struct * Fcoeff = F->coeffs; ulong * Fexp = F->exps; @@ -403,7 +403,7 @@ int nmod_mpolyn_interp_crt_sm_bpoly( n_poly_struct * Fcoeffs = F->coeffs; ulong * Texps = T->exps; n_poly_struct * Tcoeffs = T->coeffs; - mp_limb_t v; + ulong v; ulong Fexpi, mask; mask = (-UWORD(1)) >> (FLINT_BITS - F->bits); @@ -539,13 +539,13 @@ void nmod_mpolyn_interp_reduce_sm_mpolyn( nmod_mpolyn_t E, nmod_mpolyn_t A, slong var, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { slong N = mpoly_words_per_exp_sp(A->bits, ctx->minfo); slong offset, shift, k; ulong mask; - mp_limb_t v; + ulong v; n_poly_struct * Acoeff = A->coeffs; ulong * Aexp = A->exps; slong Alen = A->length; @@ -661,7 +661,7 @@ int nmod_mpolyn_interp_crt_sm_mpolyn( nmod_mpolyn_t A, slong var, n_poly_t modulus, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { int changed = 0; @@ -669,7 +669,7 @@ int nmod_mpolyn_interp_crt_sm_mpolyn( slong lastdeg = -WORD(1); slong offset, shift; slong vi; - mp_limb_t v; + ulong v; n_poly_t tp; n_poly_struct * Tcoeff; ulong * Texp; @@ -810,7 +810,7 @@ void nmod_mpolyn_interp_reduce_2sm_mpolyn( slong N = mpoly_words_per_exp(A->bits, ctx->minfo); slong offset, shift, k; ulong mask; - mp_limb_t e, f; + ulong e, f; n_poly_struct * Acoeff = A->coeffs; ulong * Aexp = A->exps; slong Alen = A->length; @@ -898,7 +898,7 @@ void nmod_mpolyn_interp_lift_2sm_mpolyn( nmod_mpolyn_t A, nmod_mpolyn_t B, slong var, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { slong N = mpoly_words_per_exp_sp(A->bits, ctx->minfo); @@ -917,9 +917,9 @@ void nmod_mpolyn_interp_lift_2sm_mpolyn( slong Blen = B->length; ulong * Bexp = B->exps; slong Bi, bi; - mp_limb_t u, v, Avalue, Bvalue, FvalueA, FvalueB; + ulong u, v, Avalue, Bvalue, FvalueA, FvalueB; int cmp; - mp_limb_t d0 = n_invmod(alpha + alpha, ctx->mod.n); + ulong d0 = n_invmod(alpha + alpha, ctx->mod.n); n_poly_init(tp); n_poly_init(zero); @@ -1062,9 +1062,9 @@ int nmod_mpolyn_interp_crt_2sm_mpolyn( ulong * Bexp = B->exps; slong Bi, bi; n_poly_struct * Fvalue; - mp_limb_t u, v, Avalue, Bvalue, FvalueA, FvalueB; + ulong u, v, Avalue, Bvalue, FvalueA, FvalueB; int texp_set, cmp; - mp_limb_t alpha = n_poly_get_coeff(alphapow, 1); + ulong alpha = n_poly_get_coeff(alphapow, 1); #if FLINT_WANT_ASSERT u = n_poly_mod_evaluate_nmod(modulus, alpha, ctx->mod); @@ -1245,7 +1245,7 @@ int nmod_mpolyn_interp_crt_2sm_mpolyn( void nmod_mpolyn_interp_reduce_sm_mpoly( nmod_mpoly_t B, nmod_mpolyn_t A, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { slong i, N, k; @@ -1270,7 +1270,7 @@ void nmod_mpolyn_interp_reduce_sm_mpoly( void nmod_mpolyun_interp_reduce_sm_mpolyu( nmod_mpolyu_t B, nmod_mpolyun_t A, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { slong i, k; @@ -1296,7 +1296,7 @@ void nmod_mpolyn_interp_lift_sm_mpoly( { slong i, N; n_poly_struct * Acoeff; - mp_limb_t * Bcoeff; + ulong * Bcoeff; ulong * Aexp, * Bexp; slong Blen; @@ -1348,18 +1348,18 @@ int nmod_mpolyn_interp_crt_sm_mpoly( nmod_mpolyn_t T, nmod_mpoly_t A, n_poly_t modulus, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { int changed = 0; slong i, j, k; slong N; - mp_limb_t v; + ulong v; flint_bitcnt_t bits = A->bits; slong Flen = F->length, Alen = A->length; ulong * Fexp = F->exps, * Aexp = A->exps; ulong * Texp; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; n_poly_struct * Fcoeff = F->coeffs; n_poly_struct * Tcoeff; n_poly_t tp; @@ -1463,7 +1463,7 @@ int nmod_mpolyun_interp_crt_sm_mpolyu( nmod_mpolyun_t T, nmod_mpolyu_t A, n_poly_t modulus, - mp_limb_t alpha, + ulong alpha, const nmod_mpoly_ctx_t ctx) { int changed = 0; @@ -1567,8 +1567,8 @@ int nmod_mpolyn_interp_mcrt_sm_mpoly( slong lastdeg = -1; int changed = 0; slong i; - mp_limb_t v; - mp_limb_t * Acoeff = A->coeffs; + ulong v; + ulong * Acoeff = A->coeffs; slong Flen = F->length; FLINT_ASSERT(Flen == A->length); diff --git a/src/nmod_mpoly/io.c b/src/nmod_mpoly/io.c index 29899258a4..bf9bbb79d4 100644 --- a/src/nmod_mpoly/io.c +++ b/src/nmod_mpoly/io.c @@ -18,7 +18,7 @@ /* printing *******************************************************************/ static int _nmod_mpoly_fprint_pretty(FILE * file, - const mp_limb_t * coeff, const ulong * exp, slong len, + const ulong * coeff, const ulong * exp, slong len, const char ** x_in, slong bits, const mpoly_ctx_t mctx) { slong i, j, N; diff --git a/src/nmod_mpoly/mpolyd.c b/src/nmod_mpoly/mpolyd.c index ef897b58dc..e75e432f7f 100644 --- a/src/nmod_mpoly/mpolyd.c +++ b/src/nmod_mpoly/mpolyd.c @@ -42,7 +42,7 @@ void nmod_mpolyd_init(nmod_mpolyd_t poly, slong nvars) poly->deg_bounds[i] = WORD(1); } poly->coeff_alloc = WORD(16); - poly->coeffs = (mp_limb_t *) flint_malloc(poly->coeff_alloc*sizeof(mp_limb_t)); + poly->coeffs = (ulong *) flint_malloc(poly->coeff_alloc*sizeof(ulong)); for (i = 0; i < poly->coeff_alloc; i++) { poly->coeffs[i] = UWORD(0); @@ -52,7 +52,7 @@ void nmod_mpolyd_init(nmod_mpolyd_t poly, slong nvars) void nmod_mpolyd_fit_length(nmod_mpolyd_t poly, slong len) { if (poly->coeff_alloc < len) { /*flint_printf("realloc %wd -> %wd\n",poly->coeff_alloc, len);*/ - poly->coeffs = (mp_limb_t *) flint_realloc(poly->coeffs, len*sizeof(mp_limb_t)); + poly->coeffs = (ulong *) flint_realloc(poly->coeffs, len*sizeof(ulong)); poly->coeff_alloc = len; } } diff --git a/src/nmod_mpoly/mpolyn_gcd_brown.c b/src/nmod_mpoly/mpolyn_gcd_brown.c index 99dbcc35bb..78d9f2fd64 100644 --- a/src/nmod_mpoly/mpolyn_gcd_brown.c +++ b/src/nmod_mpoly/mpolyn_gcd_brown.c @@ -28,7 +28,7 @@ int nmod_mpolyn_gcd_brown_smprime_bivar( { int success; slong bound; - mp_limb_t alpha, temp, gammaevalp, gammaevalm; + ulong alpha, temp, gammaevalp, gammaevalm; n_poly_struct * Aevalp, * Bevalp, * Gevalp, * Abarevalp, * Bbarevalp; n_poly_struct * Aevalm, * Bevalm, * Gevalm, * Abarevalm, * Bbarevalm; nmod_mpolyn_struct * T; @@ -338,7 +338,7 @@ int nmod_mpolyn_gcd_brown_smprime( slong bound; slong upper_limit; slong offset, shift; - mp_limb_t alpha, temp, gammaevalp, gammaevalm; + ulong alpha, temp, gammaevalp, gammaevalm; nmod_mpolyn_struct * Aevalp, * Bevalp, * Gevalp, * Abarevalp, * Bbarevalp; nmod_mpolyn_struct * Aevalm, * Bevalm, * Gevalm, * Abarevalm, * Bbarevalm; nmod_mpolyn_struct * T1, * T2; diff --git a/src/nmod_mpoly/mpolyu.c b/src/nmod_mpoly/mpolyu.c index 34793b46ed..ee67f05d62 100644 --- a/src/nmod_mpoly/mpolyu.c +++ b/src/nmod_mpoly/mpolyu.c @@ -362,7 +362,7 @@ void nmod_mpoly_from_mpolyu_perm_inflate( slong i, j, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * uexps; ulong * Aexps; @@ -679,7 +679,7 @@ static void nmod_mpoly_from_mpolyuu_perm_inflate( /* only for 2 main vars */ slong i, j, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * uexps; ulong * Aexps; @@ -758,7 +758,7 @@ void nmod_mpolyu_shift_left(nmod_mpolyu_t A, ulong s) } } -void nmod_mpolyu_scalar_mul_nmod(nmod_mpolyu_t A, mp_limb_t c, +void nmod_mpolyu_scalar_mul_nmod(nmod_mpolyu_t A, ulong c, const nmod_mpoly_ctx_t ctx) { slong i, j; @@ -829,7 +829,7 @@ void nmod_mpoly_cvtfrom_poly_notmain(nmod_mpoly_t A, nmod_poly_t a, k = 0; for (i = nmod_poly_length(a) - 1; i >= 0; i--) { - mp_limb_t c = nmod_poly_get_coeff_ui(a, i); + ulong c = nmod_poly_get_coeff_ui(a, i); if (c != UWORD(0)) { A->coeffs[k] = c; @@ -886,7 +886,7 @@ void nmod_mpolyu_cvtfrom_poly(nmod_mpolyu_t A, nmod_poly_t a, k = 0; for (i = nmod_poly_length(a) - 1; i >= 0; i--) { - mp_limb_t c = nmod_poly_get_coeff_ui(a, i); + ulong c = nmod_poly_get_coeff_ui(a, i); if (c != UWORD(0)) { nmod_mpolyu_fit_length(A, k + 1, ctx); diff --git a/src/nmod_mpoly/mpolyu_divides.c b/src/nmod_mpoly/mpolyu_divides.c index 1b07226985..8382fab252 100644 --- a/src/nmod_mpoly/mpolyu_divides.c +++ b/src/nmod_mpoly/mpolyu_divides.c @@ -16,9 +16,9 @@ /* A = D - B*C */ slong _nmod_mpoly_mulsub1(nmod_mpoly_t A, - const mp_limb_t * Dcoeff, const ulong * Dexp, slong Dlen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong Clen, + const ulong * Dcoeff, const ulong * Dexp, slong Dlen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong Clen, ulong maskhi, nmod_t fctx) { slong i, j; @@ -30,11 +30,11 @@ slong _nmod_mpoly_mulsub1(nmod_mpoly_t A, mpoly_heap_t * x; slong Di; slong Alen; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; ulong * Aexp = A->exps; ulong exp; slong * hind; - mp_limb_t acc0, acc1, acc2, pp1, pp0; + ulong acc0, acc1, acc2, pp1, pp0; TMP_INIT; FLINT_ASSERT(Blen > 0); @@ -175,9 +175,9 @@ slong _nmod_mpoly_mulsub1(nmod_mpoly_t A, /* A = D - B*C */ void _nmod_mpoly_mulsub(nmod_mpoly_t A, - const mp_limb_t * Dcoeff, const ulong * Dexp, slong Dlen, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong Clen, + const ulong * Dcoeff, const ulong * Dexp, slong Dlen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong Clen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, nmod_t fctx) { slong i, j; @@ -189,13 +189,13 @@ void _nmod_mpoly_mulsub(nmod_mpoly_t A, mpoly_heap_t * x; slong Di; slong Alen; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; ulong * Aexp = A->exps; ulong * exp, * exps; ulong ** exp_list; slong exp_next; slong * hind; - mp_limb_t acc0, acc1, acc2, pp1, pp0; + ulong acc0, acc1, acc2, pp1, pp0; TMP_INIT; FLINT_ASSERT(Blen > 0); diff --git a/src/nmod_mpoly/mpolyu_gcdp_zippel.c b/src/nmod_mpoly/mpolyu_gcdp_zippel.c index ba4ec13439..9e9d84e8c0 100644 --- a/src/nmod_mpoly/mpolyu_gcdp_zippel.c +++ b/src/nmod_mpoly/mpolyu_gcdp_zippel.c @@ -18,7 +18,7 @@ /* store in each coefficient the evaluation of the corresponding monomial */ void nmod_mpoly_evalsk(nmod_mpoly_t A, nmod_mpoly_t B, - slong entries, slong * offs, ulong * masks, mp_limb_t * powers, + slong entries, slong * offs, ulong * masks, ulong * powers, const nmod_mpoly_ctx_t ctx) { slong i, j; @@ -29,7 +29,7 @@ void nmod_mpoly_evalsk(nmod_mpoly_t A, nmod_mpoly_t B, N = mpoly_words_per_exp(B->bits, ctx->minfo); for (i = 0; i < B->length; i++) { - mp_limb_t prod = UWORD(1); + ulong prod = UWORD(1); for (j = 0; j < entries; j++) { @@ -46,7 +46,7 @@ void nmod_mpoly_evalsk(nmod_mpoly_t A, nmod_mpoly_t B, } void nmod_mpolyu_evalsk(nmod_mpolyu_t A, nmod_mpolyu_t B, - slong entries, slong * offs, ulong * masks, mp_limb_t * powers, + slong entries, slong * offs, ulong * masks, ulong * powers, const nmod_mpoly_ctx_t ctx) { slong i; @@ -98,7 +98,7 @@ int nmod_mpolyu_evalfromsk(nmod_poly_t e, nmod_mpolyu_t A, nmod_poly_zero(e); for (i = 0; i < A->length; i++) { - mp_limb_t v, pp0, pp1, ac0 = 0, ac1 = 0, ac2 = 0; + ulong v, pp0, pp1, ac0 = 0, ac1 = 0, ac2 = 0; FLINT_ASSERT((A->coeffs + i)->length == (SK->coeffs + i)->length); @@ -129,13 +129,13 @@ int nmod_mpolyu_evalfromsk(nmod_poly_t e, nmod_mpolyu_t A, for x */ -int nmod_vandsolve(mp_limb_t * x, mp_limb_t * a, mp_limb_t * b, +int nmod_vandsolve(ulong * x, ulong * a, ulong * b, slong n, nmod_t mod) { int success = 0; slong i, j; - mp_limb_t t; - mp_limb_t Dinv; + ulong t; + ulong Dinv; nmod_poly_t Q, P, R, u; for (i = 0; i < n; i++) @@ -197,7 +197,7 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, nmod_gcds_ret_t success; nmod_mpolyu_t Aevalsk1, Bevalsk1, fevalsk1, Aevalski, Bevalski, fevalski; nmod_poly_t Aeval, Beval, Geval; - mp_limb_t * alpha, * b; + ulong * alpha, * b; nmod_mat_struct * M, * ML; nmod_mat_t MF, Msol; int lc_ok; @@ -207,11 +207,11 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, slong i, j, k, s, S, nullity; slong * d; slong l; - mp_limb_t * W; + ulong * W; slong entries; slong * offs; ulong * masks; - mp_limb_t * powers; + ulong * powers; TMP_INIT; FLINT_ASSERT(A->length > 0); @@ -298,10 +298,10 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, /* one extra test image */ l += 1; - alpha = (mp_limb_t *) TMP_ALLOC(var*sizeof(mp_limb_t)); + alpha = (ulong *) TMP_ALLOC(var*sizeof(ulong)); ML = (nmod_mat_struct *) TMP_ALLOC(f->length*sizeof(nmod_mat_struct)); - b = (mp_limb_t *) TMP_ALLOC((f->coeffs + d[f->length - 1])->length - *sizeof(mp_limb_t)); + b = (ulong *) TMP_ALLOC((f->coeffs + d[f->length - 1])->length + *sizeof(ulong)); nmod_mat_init(MF, 0, l, ctx->mod.n); @@ -313,7 +313,7 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, ML_is_initialized[i] = 0; } - W = (mp_limb_t *) flint_malloc(l*f->length*sizeof(mp_limb_t)); + W = (ulong *) flint_malloc(l*f->length*sizeof(ulong)); nmod_mat_init(Msol, l, 1, ctx->mod.n); @@ -321,7 +321,7 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, entries = f->bits * var; offs = (slong *) TMP_ALLOC(entries*sizeof(slong)); masks = (ulong *) TMP_ALLOC(entries*sizeof(slong)); - powers = (mp_limb_t *) TMP_ALLOC(entries*sizeof(mp_limb_t)); + powers = (ulong *) TMP_ALLOC(entries*sizeof(ulong)); /***** evaluation loop head *******/ @@ -421,7 +421,7 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, j = WORD(0); while ((--k) >= 0) { - mp_limb_t ck = nmod_poly_get_coeff_ui(Geval, k); + ulong ck = nmod_poly_get_coeff_ui(Geval, k); if (ck != UWORD(0)) { while (j < f->length && f->exps[j] > (ulong) k) @@ -556,7 +556,7 @@ nmod_gcds_ret_t nmod_mpolyu_gcds_zippel(nmod_mpolyu_t G, /* check solution */ for (s = 0; s < f->length; s++) { - mp_limb_t pp0, pp1, ac0, ac1, ac2, u, v; + ulong pp0, pp1, ac0, ac1, ac2, u, v; for (i = 0; i < l; i++) { @@ -680,7 +680,7 @@ int nmod_mpolyu_gcdp_zippel_bivar( n_poly_t a, b, c, g, modulus, tempmod; nmod_mpolyu_t Aeval, Beval, Geval; nmod_mpolyun_t An, Bn, H, Ht; - mp_limb_t geval, temp, alpha; + ulong geval, temp, alpha; FLINT_ASSERT(ctx->minfo->ord == ORD_LEX); FLINT_ASSERT(var >= -WORD(1)); @@ -796,7 +796,7 @@ int nmod_mpolyu_gcdp_zippel_bivar( /* update interpolant H */ if (n_poly_degree(modulus) > 0) { - mp_limb_t t = n_poly_mod_evaluate_nmod(modulus, alpha, ctx->mod); + ulong t = n_poly_mod_evaluate_nmod(modulus, alpha, ctx->mod); t = nmod_inv(t, ctx->mod); _n_poly_mod_scalar_mul_nmod_inplace(modulus, t, ctx->mod); @@ -885,8 +885,8 @@ int nmod_mpolyu_gcdp_zippel( n_poly_t modulus, tempmod; nmod_mpolyu_t Aeval, Beval, Geval, Abareval, Bbareval, Gform; nmod_mpolyun_t H, Ht; - mp_limb_t geval, temp; - mp_limb_t alpha, start_alpha; + ulong geval, temp; + ulong alpha, start_alpha; FLINT_ASSERT(ctx->minfo->ord == ORD_LEX); FLINT_ASSERT(var >= -WORD(1)); diff --git a/src/nmod_mpoly/mpolyun.c b/src/nmod_mpoly/mpolyun.c index c929e4a002..bae8c64b37 100644 --- a/src/nmod_mpoly/mpolyun.c +++ b/src/nmod_mpoly/mpolyun.c @@ -265,7 +265,7 @@ void nmod_mpolyun_set_mod(nmod_mpolyun_t FLINT_UNUSED(A), const nmod_t FLINT_UNU void nmod_mpolyn_scalar_mul_nmod( nmod_mpolyn_t A, - mp_limb_t c, + ulong c, const nmod_mpoly_ctx_t ctx) { slong i; @@ -279,7 +279,7 @@ void nmod_mpolyn_scalar_mul_nmod( void nmod_mpolyun_scalar_mul_nmod( nmod_mpolyun_t A, - mp_limb_t c, + ulong c, const nmod_mpoly_ctx_t ctx) { slong i; @@ -785,7 +785,7 @@ void nmod_mpoly_from_mpolyun_perm_inflate( slong i, j, h, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * uexps; ulong * Aexps, * tAexp, * tAgexp; @@ -844,7 +844,7 @@ void nmod_mpoly_from_mpolyun_perm_inflate( &Aexp, &A->exps_alloc, NA, Alen + h); for (h--; h >= 0; h--) { - mp_limb_t c = (Bc->coeffs + j)->coeffs[h]; + ulong c = (Bc->coeffs + j)->coeffs[h]; if (c == 0) continue; mpoly_monomial_madd(Aexp + NA*Alen, tAexp, h, tAgexp, NA); @@ -876,7 +876,7 @@ void nmod_mpoly_from_mpolyn_perm_inflate( slong i, h, k, l; slong NA, NB; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * Bexps; ulong * Aexps, * tAexp, * tAgexp; @@ -926,7 +926,7 @@ void nmod_mpoly_from_mpolyn_perm_inflate( &Aexp, &A->exps_alloc, NA, Alen + h); for (h--; h >= 0; h--) { - mp_limb_t c = (B->coeffs + i)->coeffs[h]; + ulong c = (B->coeffs + i)->coeffs[h]; if (c == 0) continue; mpoly_monomial_madd(Aexp + NA*Alen, tAexp, h, tAgexp, NA); @@ -1048,7 +1048,7 @@ void nmod_mpoly_cvtfrom_mpolyn( { for (j = B->coeffs[i].length - 1; j >= 0; j--) { - mp_limb_t c = B->coeffs[i].coeffs[j]; + ulong c = B->coeffs[i].coeffs[j]; if (c == 0) continue; diff --git a/src/nmod_mpoly/mul_array.c b/src/nmod_mpoly/mul_array.c index 045fb2e889..3ca9039b79 100644 --- a/src/nmod_mpoly/mul_array.c +++ b/src/nmod_mpoly/mul_array.c @@ -248,7 +248,7 @@ void _nmod_mpoly_mul_array_chunked_LEX( { /* compute bound on coeffs of output chunk */ slong len = 0; - mp_limb_t t2, t1, t0, u1, u0; + ulong t2, t1, t0, u1, u0; for (i = 0, j = Pi; i < Al && j >= 0; i++, j--) { @@ -720,7 +720,7 @@ void _nmod_mpoly_mul_array_chunked_DEG( { /* compute bound on coeffs of output chunk */ slong len = 0; - mp_limb_t t2, t1, t0, u1, u0; + ulong t2, t1, t0, u1, u0; for (i = 0, j = Pi; i < Al && j >= 0; i++, j--) { diff --git a/src/nmod_mpoly/mul_array_threaded.c b/src/nmod_mpoly/mul_array_threaded.c index ea8c1f84c4..07a148af7d 100644 --- a/src/nmod_mpoly/mul_array_threaded.c +++ b/src/nmod_mpoly/mul_array_threaded.c @@ -39,7 +39,7 @@ typedef struct volatile int idx; slong nthreads; slong Al, Bl, Pl; - mp_limb_t * Acoeffs, * Bcoeffs; + ulong * Acoeffs, * Bcoeffs; slong * Amain, * Bmain; ulong * Apexp, * Bpexp; slong * perm; @@ -101,7 +101,7 @@ static void _nmod_mpoly_mul_array_threaded_worker_LEX(void * varg) while (Pi < Pl) { slong len; - mp_limb_t t2, t1, t0, u1, u0; + ulong t2, t1, t0, u1, u0; Pi = base->perm[Pi]; @@ -332,7 +332,7 @@ void _nmod_mpoly_mul_array_chunked_threaded_LEX( FLINT_ASSERT((Pchunks + Pi)->poly->exps != NULL); memcpy(P->exps + Plen, (Pchunks + Pi)->poly->exps, (Pchunks + Pi)->len*sizeof(ulong)); - memcpy(P->coeffs + Plen, (Pchunks + Pi)->poly->coeffs, (Pchunks + Pi)->len*sizeof(mp_limb_t)); + memcpy(P->coeffs + Plen, (Pchunks + Pi)->poly->coeffs, (Pchunks + Pi)->len*sizeof(ulong)); Plen += (Pchunks + Pi)->len; @@ -490,7 +490,7 @@ static void _nmod_mpoly_mul_array_threaded_worker_DEG(void * varg) while (Pi < Pl) { slong len; - mp_limb_t t2, t1, t0, u1, u0; + ulong t2, t1, t0, u1, u0; Pi = base->perm[Pi]; @@ -716,7 +716,7 @@ void _nmod_mpoly_mul_array_chunked_threaded_DEG( FLINT_ASSERT((Pchunks + Pi)->poly->exps != NULL); memcpy(P->exps + Plen, (Pchunks + Pi)->poly->exps, (Pchunks + Pi)->len*sizeof(ulong)); - memcpy(P->coeffs + Plen, (Pchunks + Pi)->poly->coeffs, (Pchunks + Pi)->len*sizeof(mp_limb_t)); + memcpy(P->coeffs + Plen, (Pchunks + Pi)->poly->coeffs, (Pchunks + Pi)->len*sizeof(ulong)); Plen += (Pchunks + Pi)->len; diff --git a/src/nmod_mpoly/mul_heap_threaded.c b/src/nmod_mpoly/mul_heap_threaded.c index ff1b706296..975d1ebb9a 100644 --- a/src/nmod_mpoly/mul_heap_threaded.c +++ b/src/nmod_mpoly/mul_heap_threaded.c @@ -25,8 +25,8 @@ */ static void _nmod_mpoly_mul_heap_part1( nmod_mpoly_t A, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong FLINT_UNUSED(Clen), + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong FLINT_UNUSED(Clen), slong * start, slong * end, slong * hind, @@ -43,8 +43,8 @@ static void _nmod_mpoly_mul_heap_part1( slong * store, * store_base; slong Alen; ulong * Aexp = A->exps; - mp_limb_t * Acoeff = A->coeffs; - mp_limb_t acc0, acc1, acc2, pp0, pp1; + ulong * Acoeff = A->coeffs; + ulong acc0, acc1, acc2, pp0, pp1; FLINT_ASSERT(S->N == 1); @@ -165,8 +165,8 @@ static void _nmod_mpoly_mul_heap_part1( static void _nmod_mpoly_mul_heap_part( nmod_mpoly_t A, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong FLINT_UNUSED(Clen), + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong FLINT_UNUSED(Clen), slong * start, slong * end, slong * hind, @@ -187,7 +187,7 @@ static void _nmod_mpoly_mul_heap_part( slong * store, * store_base; slong Alen; ulong * Aexp = A->exps; - mp_limb_t * Acoeff = A->coeffs; + ulong * Acoeff = A->coeffs; ulong acc0, acc1, acc2, pp0, pp1; /* tmp allocs from S->big_mem */ @@ -352,12 +352,12 @@ typedef struct slong nthreads; slong ndivs; const nmod_mpoly_ctx_struct * ctx; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; - const mp_limb_t * Bcoeff; + const ulong * Bcoeff; const ulong * Bexp; slong Blen; - const mp_limb_t * Ccoeff; + const ulong * Ccoeff; const ulong * Cexp; slong Clen; slong N; @@ -580,7 +580,7 @@ static void _join_worker(void * varg) FLINT_ASSERT(divs[i].A->exps != NULL); memcpy(base->Acoeff + divs[i].Aoffset, divs[i].A->coeffs, - divs[i].A->length*sizeof(mp_limb_t)); + divs[i].A->length*sizeof(ulong)); memcpy(base->Aexp + N*divs[i].Aoffset, divs[i].A->exps, N*divs[i].A->length*sizeof(ulong)); @@ -592,8 +592,8 @@ static void _join_worker(void * varg) static void _nmod_mpoly_mul_heap_threaded( nmod_mpoly_t A, - const mp_limb_t * Bcoeff, const ulong * Bexp, slong Blen, - const mp_limb_t * Ccoeff, const ulong * Cexp, slong Clen, + const ulong * Bcoeff, const ulong * Bexp, slong Blen, + const ulong * Ccoeff, const ulong * Cexp, slong Clen, flint_bitcnt_t bits, slong N, const ulong * cmpmask, diff --git a/src/nmod_mpoly/mul_johnson.c b/src/nmod_mpoly/mul_johnson.c index 9de8b601ec..1160c327ec 100644 --- a/src/nmod_mpoly/mul_johnson.c +++ b/src/nmod_mpoly/mul_johnson.c @@ -17,8 +17,8 @@ slong _nmod_mpoly_mul_johnson1( nmod_mpoly_t A, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, ulong maskhi, nmod_t fctx) { @@ -30,7 +30,7 @@ slong _nmod_mpoly_mul_johnson1( slong * Q; mpoly_heap_t * x; slong len1; - mp_limb_t * p1 = A->coeffs; + ulong * p1 = A->coeffs; ulong * e1 = A->exps; slong * hind; ulong exp; @@ -144,8 +144,8 @@ slong _nmod_mpoly_mul_johnson1( slong _nmod_mpoly_mul_johnson( nmod_mpoly_t A, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, flint_bitcnt_t bits, slong N, const ulong * cmpmask, @@ -159,7 +159,7 @@ slong _nmod_mpoly_mul_johnson( slong * Q; mpoly_heap_t * x; slong len1; - mp_limb_t * p1 = A->coeffs; + ulong * p1 = A->coeffs; ulong * e1 = A->exps; ulong * exp, * exps; ulong ** exp_list; diff --git a/src/nmod_mpoly/pow_rmul.c b/src/nmod_mpoly/pow_rmul.c index 76db9526f1..64714ad25e 100644 --- a/src/nmod_mpoly/pow_rmul.c +++ b/src/nmod_mpoly/pow_rmul.c @@ -15,7 +15,7 @@ void _nmod_mpoly_pow_rmul( nmod_mpoly_t A, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, ulong k, slong N, const ulong * cmpmask, diff --git a/src/nmod_mpoly/profile/p-sqrt.c b/src/nmod_mpoly/profile/p-sqrt.c index a28b9621d8..cacb22ec01 100644 --- a/src/nmod_mpoly/profile/p-sqrt.c +++ b/src/nmod_mpoly/profile/p-sqrt.c @@ -21,7 +21,7 @@ int main(void) timeit_t timer; slong iters, j, n; nmod_mpoly_ctx_t ctx; - mp_limb_t p = n_nextprime(UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX), 1); + ulong p = n_nextprime(UWORD(1) << (SMALL_FMPZ_BITCOUNT_MAX), 1); const char * vars[] = {"x", "y", "z", "t", "u"}; nmod_mpoly_ctx_init(ctx, 5, ORD_LEX, p); diff --git a/src/nmod_mpoly/quadratic_root.c b/src/nmod_mpoly/quadratic_root.c index d95f70fda7..39c7750c80 100644 --- a/src/nmod_mpoly/quadratic_root.c +++ b/src/nmod_mpoly/quadratic_root.c @@ -37,7 +37,7 @@ static int _nmod_mpoly_quadratic_root_heap( mpoly_heap_t * chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong * exp, * exps; ulong ** exp_list; @@ -345,7 +345,7 @@ int nmod_mpoly_quadratic_root( if (ctx->mod.n != 2) { - mp_limb_t c = (ctx->mod.n - 1)/2; + ulong c = (ctx->mod.n - 1)/2; nmod_mpoly_t t1, t2; nmod_mpoly_init(t1, ctx); diff --git a/src/nmod_mpoly/randtest.c b/src/nmod_mpoly/randtest.c index ffd431b254..23e4a43ab0 100644 --- a/src/nmod_mpoly/randtest.c +++ b/src/nmod_mpoly/randtest.c @@ -16,7 +16,7 @@ void nmod_mpoly_randtest_bits(nmod_mpoly_t A, flint_rand_t state, slong length, flint_bitcnt_t exp_bits, const nmod_mpoly_ctx_t ctx) { - mp_limb_t p = ctx->mod.n; + ulong p = ctx->mod.n; slong i, j, nvars = ctx->minfo->nvars; fmpz * exp; TMP_INIT; @@ -46,7 +46,7 @@ void nmod_mpoly_randtest_bits(nmod_mpoly_t A, flint_rand_t state, void nmod_mpoly_randtest_bound(nmod_mpoly_t A, flint_rand_t state, slong length, ulong exp_bound, const nmod_mpoly_ctx_t ctx) { - mp_limb_t p = ctx->mod.n; + ulong p = ctx->mod.n; slong i, j, nvars = ctx->minfo->nvars; ulong * exp; TMP_INIT; @@ -71,7 +71,7 @@ void nmod_mpoly_randtest_bound(nmod_mpoly_t A, flint_rand_t state, void nmod_mpoly_randtest_bounds(nmod_mpoly_t A, flint_rand_t state, slong length, ulong * exp_bounds, const nmod_mpoly_ctx_t ctx) { - mp_limb_t p = ctx->mod.n; + ulong p = ctx->mod.n; slong i, j, nvars = ctx->minfo->nvars; ulong * exp; TMP_INIT; diff --git a/src/nmod_mpoly/realloc.c b/src/nmod_mpoly/realloc.c index 4da87cc640..3bb155ca0a 100644 --- a/src/nmod_mpoly/realloc.c +++ b/src/nmod_mpoly/realloc.c @@ -30,5 +30,5 @@ void nmod_mpoly_realloc( A->exps = (ulong *) flint_realloc(A->exps, A->exps_alloc*sizeof(ulong)); A->coeffs_alloc = alloc; - A->coeffs = (mp_limb_t *) flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); + A->coeffs = (ulong *) flint_realloc(A->coeffs, A->coeffs_alloc*sizeof(ulong)); } diff --git a/src/nmod_mpoly/reverse.c b/src/nmod_mpoly/reverse.c index 7f94755e11..e83c712a9f 100644 --- a/src/nmod_mpoly/reverse.c +++ b/src/nmod_mpoly/reverse.c @@ -29,7 +29,7 @@ void nmod_mpoly_reverse(nmod_mpoly_t A, else { for (i = 0; i < Blen/2; i++) - FLINT_SWAP(mp_limb_t, A->coeffs[i], A->coeffs[Blen - i - 1]); + FLINT_SWAP(ulong, A->coeffs[i], A->coeffs[Blen - i - 1]); } mpoly_reverse(A->exps, B->exps, Blen, N); diff --git a/src/nmod_mpoly/scalar.c b/src/nmod_mpoly/scalar.c index fe4bdf8daf..142252058a 100644 --- a/src/nmod_mpoly/scalar.c +++ b/src/nmod_mpoly/scalar.c @@ -16,10 +16,10 @@ #include "nmod_mpoly.h" slong _nmod_mpoly_scalar_addmul_ui1( - mp_limb_t * Acoeffs, ulong * Aexps, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, - mp_limb_t d, + ulong * Acoeffs, ulong * Aexps, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, + ulong d, ulong maskhi, nmod_t fctx) { @@ -71,10 +71,10 @@ slong _nmod_mpoly_scalar_addmul_ui1( } static slong _nmod_mpoly_scalar_addmul_ui( - mp_limb_t * Acoeffs, ulong * Aexps, - const mp_limb_t * Bcoeffs, const ulong * Bexps, slong Blen, - const mp_limb_t * Ccoeffs, const ulong * Cexps, slong Clen, - mp_limb_t d, + ulong * Acoeffs, ulong * Aexps, + const ulong * Bcoeffs, const ulong * Bexps, slong Blen, + const ulong * Ccoeffs, const ulong * Cexps, slong Clen, + ulong d, slong N, const ulong * cmpmask, nmod_t fctx) @@ -139,7 +139,7 @@ void nmod_mpoly_scalar_addmul_ui( nmod_mpoly_t A, const nmod_mpoly_t B, const nmod_mpoly_t C, - mp_limb_t d, + ulong d, const nmod_mpoly_ctx_t ctx) { ulong Abits; @@ -220,7 +220,7 @@ void nmod_mpoly_scalar_addmul_ui( void nmod_mpoly_scalar_mul_nmod_invertible( nmod_mpoly_t A, const nmod_mpoly_t B, - mp_limb_t c, + ulong c, const nmod_mpoly_ctx_t ctx) { FLINT_ASSERT(c != 0); @@ -258,13 +258,13 @@ void nmod_mpoly_scalar_mul_nmod_invertible( void nmod_mpoly_scalar_mul_nmod_general( nmod_mpoly_t A, const nmod_mpoly_t B, - mp_limb_t c, + ulong c, const nmod_mpoly_ctx_t ctx) { slong i, N; slong Alen, Blen; ulong * Aexp, * Bexp; - mp_limb_t * Acoeff, * Bcoeff; + ulong * Acoeff, * Bcoeff; FLINT_ASSERT(c < ctx->mod.n); diff --git a/src/nmod_mpoly/sort_terms.c b/src/nmod_mpoly/sort_terms.c index af87fd3d33..22738c0cab 100644 --- a/src/nmod_mpoly/sort_terms.c +++ b/src/nmod_mpoly/sort_terms.c @@ -60,7 +60,7 @@ void _nmod_mpoly_radix_sort1(nmod_mpoly_t A, slong left, slong right, if (((A->exps + 1*cur)[0] & mask) != cmp) { { - mp_limb_t t; + ulong t; t = A->coeffs[mid]; A->coeffs[mid] = A->coeffs[cur]; A->coeffs[cur] = t; @@ -118,7 +118,7 @@ void _nmod_mpoly_radix_sort(nmod_mpoly_t A, slong left, slong right, if (((A->exps + N*check)[off] & mask) != cmp) { { - mp_limb_t t; + ulong t; t = A->coeffs[mid]; A->coeffs[mid] = A->coeffs[check]; A->coeffs[check] = t; diff --git a/src/nmod_mpoly/sqrt_heap.c b/src/nmod_mpoly/sqrt_heap.c index 4f2c27a067..795f975693 100644 --- a/src/nmod_mpoly/sqrt_heap.c +++ b/src/nmod_mpoly/sqrt_heap.c @@ -24,7 +24,7 @@ static int _is_proved_not_square_medprime( int count, flint_rand_t state, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, @@ -94,7 +94,7 @@ static int _is_proved_not_square_medprime( static int _is_proved_not_square( int count, flint_rand_t state, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t Abits, @@ -103,7 +103,7 @@ static int _is_proved_not_square( { int tries_left, success = 0; slong i, N = mpoly_words_per_exp(Abits, mctx); - mp_limb_t eval, * alphas; + ulong eval, * alphas; ulong * t; TMP_INIT; @@ -121,7 +121,7 @@ static int _is_proved_not_square( tries_left = 3*count; - alphas = (mp_limb_t *) TMP_ALLOC(mctx->nvars*sizeof(mp_limb_t)); + alphas = (ulong *) TMP_ALLOC(mctx->nvars*sizeof(ulong)); next_p: @@ -148,7 +148,7 @@ static int _is_proved_not_square( static int _nmod_mpoly_sqrt_heap1( nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t bits, @@ -163,7 +163,7 @@ static int _nmod_mpoly_sqrt_heap1( slong exp_alloc; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong mask, exp, exp3 = 0; ulong maskhi; @@ -367,7 +367,7 @@ static int _nmod_mpoly_sqrt_heap1( static int _nmod_mpoly_sqrt_heap( nmod_mpoly_t Q, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, flint_bitcnt_t bits, @@ -385,7 +385,7 @@ static int _nmod_mpoly_sqrt_heap( mpoly_heap_t ** chain; slong * store, * store_base; mpoly_heap_t * x; - mp_limb_t * Qcoeffs = Q->coeffs; + ulong * Qcoeffs = Q->coeffs; ulong * Qexps = Q->exps; ulong * exp, * exp3; ulong * exps[64]; @@ -663,7 +663,7 @@ int nmod_mpoly_sqrt_heap(nmod_mpoly_t Q, const nmod_mpoly_t A, if ((ctx->mod.n % 2) == 0) { flint_bitcnt_t bits = A->bits; - mp_limb_t * Aexps = A->exps; + ulong * Aexps = A->exps; slong Alen = A->length; slong i, N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (bits <= FLINT_BITS) ? mpoly_overflow_mask_sp(bits) : 0; diff --git a/src/nmod_mpoly/sub.c b/src/nmod_mpoly/sub.c index 698542c73a..235027a9ee 100644 --- a/src/nmod_mpoly/sub.c +++ b/src/nmod_mpoly/sub.c @@ -13,9 +13,9 @@ #include "mpoly.h" #include "nmod_mpoly.h" -slong _nmod_mpoly_sub1(mp_limb_t * coeff1, ulong * exp1, - const mp_limb_t * coeff2, const ulong * exp2, slong len2, - const mp_limb_t * coeff3, const ulong * exp3, slong len3, +slong _nmod_mpoly_sub1(ulong * coeff1, ulong * exp1, + const ulong * coeff2, const ulong * exp2, slong len2, + const ulong * coeff3, const ulong * exp3, slong len3, ulong maskhi, nmod_t fctx) { slong i = 0, j = 0, k = 0; diff --git a/src/nmod_mpoly/test/main.c b/src/nmod_mpoly/test/main.c index 589c7c5ce2..77e978029e 100644 --- a/src/nmod_mpoly/test/main.c +++ b/src/nmod_mpoly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub.c" diff --git a/src/nmod_mpoly/test/t-add_sub.c b/src/nmod_mpoly/test/t-add_sub.c index f232d5f9d6..c70c205c29 100644 --- a/src/nmod_mpoly/test/t-add_sub.c +++ b/src/nmod_mpoly/test/t-add_sub.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -76,7 +76,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -130,7 +130,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -185,7 +185,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k1, k2; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -244,7 +244,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k1, k2; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -303,7 +303,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -357,7 +357,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; diff --git a/src/nmod_mpoly/test/t-add_sub_ui.c b/src/nmod_mpoly/test/t-add_sub_ui.c index 829de72885..1b618a9481 100644 --- a/src/nmod_mpoly/test/t-add_sub_ui.c +++ b/src/nmod_mpoly/test/t-add_sub_ui.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub_ui, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; ulong c; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -77,7 +77,7 @@ TEST_FUNCTION_START(nmod_mpoly_add_sub_ui, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g; - mp_limb_t modulus; + ulong modulus; ulong c; slong len1, len2; slong exp_bits1, exp_bits2; diff --git a/src/nmod_mpoly/test/t-cmp.c b/src/nmod_mpoly/test/t-cmp.c index 3b6594e8ea..cb3332ebcc 100644 --- a/src/nmod_mpoly/test/t-cmp.c +++ b/src/nmod_mpoly/test/t-cmp.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_mpoly_cmp, state) nmod_mpoly_t f, g, mf, mg; slong len; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(1) + n_randint(state, -UWORD(1)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); @@ -78,7 +78,7 @@ TEST_FUNCTION_START(nmod_mpoly_cmp, state) int b_a, b_b, b_c, b_aa, b_bb, b_cc; int c_a, c_b, c_c, c_aa, c_bb, c_cc; flint_bitcnt_t newbits; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(1) + n_randint(state, -UWORD(1)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-compose_nmod_mpoly.c b/src/nmod_mpoly/test/t-compose_nmod_mpoly.c index 669d6be262..5127a5dc82 100644 --- a/src/nmod_mpoly/test/t-compose_nmod_mpoly.c +++ b/src/nmod_mpoly/test/t-compose_nmod_mpoly.c @@ -99,7 +99,7 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) slong nvarsB, nvarsAC; slong len; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -195,7 +195,7 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) nmod_mpoly_struct ** vals1; nmod_mpoly_t f, g, g1, g2; nmod_mpoly_ctx_t ctx; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -265,13 +265,13 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) nmod_mpoly_ctx_t ctx1, ctx2; nmod_mpoly_t f, g, g1, g2; nmod_mpoly_struct ** vals1; - mp_limb_t fe, ge; - mp_limb_t * vals2, * vals3; + ulong fe, ge; + ulong * vals2, * vals3; slong nvars1, nvars2; slong len1, len2; slong exp_bound1; flint_bitcnt_t exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -302,13 +302,13 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) nmod_mpoly_randtest_bound(vals1[v], state, len2, exp_bits2, ctx2); } - vals2 = (mp_limb_t *) flint_malloc(nvars2*sizeof(mp_limb_t)); + vals2 = (ulong *) flint_malloc(nvars2*sizeof(ulong)); for (v = 0; v < nvars2; v++) { vals2[v] = n_randlimb(state); } - vals3 = (mp_limb_t *) flint_malloc(nvars1*sizeof(mp_limb_t)); + vals3 = (ulong *) flint_malloc(nvars1*sizeof(ulong)); for (v = 0; v < nvars1; v++) { vals3[v] = nmod_mpoly_evaluate_all_ui(vals1[v], vals2, ctx2); @@ -367,11 +367,11 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) nmod_mpoly_ctx_t ctx1, ctx2; nmod_mpoly_t f, g, g1, g2; nmod_mpoly_struct ** vals1; - mp_limb_t * vals2; + ulong * vals2; slong nvars1; slong len1; flint_bitcnt_t exp_bits1; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -391,7 +391,7 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_mpoly, state) vals1 = (nmod_mpoly_struct **) flint_malloc(nvars1 * sizeof(nmod_mpoly_struct *)); - vals2 = (mp_limb_t *) flint_malloc(nvars1*sizeof(mp_limb_t)); + vals2 = (ulong *) flint_malloc(nvars1*sizeof(ulong)); for (v = 0; v < nvars1; v++) { vals1[v] = (nmod_mpoly_struct *) flint_malloc( diff --git a/src/nmod_mpoly/test/t-compose_nmod_poly.c b/src/nmod_mpoly/test/t-compose_nmod_poly.c index e4f3671db0..a03b246e98 100644 --- a/src/nmod_mpoly/test/t-compose_nmod_poly.c +++ b/src/nmod_mpoly/test/t-compose_nmod_poly.c @@ -89,12 +89,12 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_poly, state) nmod_mpoly_t f; nmod_poly_t g; nmod_poly_struct ** vals1; - mp_limb_t fe, ge; - mp_limb_t vals2, * vals3; + ulong fe, ge; + ulong vals2, * vals3; slong nvars1; slong len1, len2; slong exp_bound1; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -122,7 +122,7 @@ TEST_FUNCTION_START(nmod_mpoly_compose_nmod_poly, state) vals2 = n_randint(state, modulus); - vals3 = (mp_limb_t *) flint_malloc(nvars1*sizeof(mp_limb_t)); + vals3 = (ulong *) flint_malloc(nvars1*sizeof(ulong)); for (v = 0; v < nvars1; v++) { vals3[v] = nmod_poly_evaluate_nmod(vals1[v], vals2); diff --git a/src/nmod_mpoly/test/t-content_vars.c b/src/nmod_mpoly/test/t-content_vars.c index f17bc92e8b..dea7791f24 100644 --- a/src/nmod_mpoly/test/t-content_vars.c +++ b/src/nmod_mpoly/test/t-content_vars.c @@ -49,7 +49,7 @@ TEST_FUNCTION_START(nmod_mpoly_content_vars, state) slong nvars, num_vars, len; ulong * exp_bounds; slong * vars; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-degree.c b/src/nmod_mpoly/test/t-degree.c index 8fe63e17af..01f8ee7286 100644 --- a/src/nmod_mpoly/test/t-degree.c +++ b/src/nmod_mpoly/test/t-degree.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(nmod_mpoly_degree, state) fmpz_t fdeg, gdeg, hdeg; slong len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); @@ -83,7 +83,7 @@ TEST_FUNCTION_START(nmod_mpoly_degree, state) fmpz_t fdeg, gdeg, hdeg; slong len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); diff --git a/src/nmod_mpoly/test/t-derivative.c b/src/nmod_mpoly/test/t-derivative.c index 1ccab71f81..03286de173 100644 --- a/src/nmod_mpoly/test/t-derivative.c +++ b/src/nmod_mpoly/test/t-derivative.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_mpoly_derivative, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, fp, gp, hp, t1, t2; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; slong idx; @@ -114,7 +114,7 @@ TEST_FUNCTION_START(nmod_mpoly_derivative, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, fp, gp, t1, t2; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; slong idx; diff --git a/src/nmod_mpoly/test/t-div.c b/src/nmod_mpoly/test/t-div.c index 9955ec162d..4a5c35feea 100644 --- a/src/nmod_mpoly/test/t-div.c +++ b/src/nmod_mpoly/test/t-div.c @@ -56,7 +56,7 @@ TEST_FUNCTION_START(nmod_mpoly_div, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, l; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; @@ -139,9 +139,9 @@ TEST_FUNCTION_START(nmod_mpoly_div, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, q, r, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; fmpz * shifts, * strides; slong n, nvars; @@ -163,9 +163,9 @@ TEST_FUNCTION_START(nmod_mpoly_div, state) n = FLINT_MAX(WORD(1), nvars); max_bound = 1 + 400/n/n; - exp_bound = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(nvars*sizeof(fmpz)); for (j = 0; j < nvars; j++) diff --git a/src/nmod_mpoly/test/t-div_monagan_pearce.c b/src/nmod_mpoly/test/t-div_monagan_pearce.c index fedb2bd8d5..1304a7d99c 100644 --- a/src/nmod_mpoly/test/t-div_monagan_pearce.c +++ b/src/nmod_mpoly/test/t-div_monagan_pearce.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mpoly_div_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, l; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; @@ -105,7 +105,7 @@ TEST_FUNCTION_START(nmod_mpoly_div_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, q, r, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n; diff --git a/src/nmod_mpoly/test/t-divides.c b/src/nmod_mpoly/test/t-divides.c index 75d1fd4720..bc5c821753 100644 --- a/src/nmod_mpoly/test/t-divides.c +++ b/src/nmod_mpoly/test/t-divides.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) nmod_mpoly_t f, g, h, k, hsave, gsave; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -101,8 +101,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -122,9 +122,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 150/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -176,8 +176,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; fmpz * shifts, * strides; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -196,9 +196,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) len2 = n_randint(state, 50); max_bound = 1 + 20/FLINT_MAX(WORD(1), ctx->minfo->nvars); - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); shifts = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); strides = (fmpz *) flint_malloc(ctx->minfo->nvars*sizeof(fmpz)); for (j = 0; j < ctx->minfo->nvars; j++) @@ -268,8 +268,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -289,9 +289,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -345,8 +345,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -366,9 +366,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/nmod_mpoly/test/t-divides_dense.c b/src/nmod_mpoly/test/t-divides_dense.c index e39515666c..04ee8121d8 100644 --- a/src/nmod_mpoly/test/t-divides_dense.c +++ b/src/nmod_mpoly/test/t-divides_dense.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -43,9 +43,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -96,8 +96,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -117,9 +117,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 20/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -169,8 +169,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -190,9 +190,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); @@ -244,8 +244,8 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; slong len, len1, len2; - mp_limb_t max_bound, * exp_bound, * exp_bound1, * exp_bound2; - mp_limb_t modulus; + ulong max_bound, * exp_bound, * exp_bound1, * exp_bound2; + ulong modulus; slong n; modulus = n_randint(state, (i % 4 == 0) ? 4: FLINT_BITS - 1) + 1; @@ -265,9 +265,9 @@ TEST_FUNCTION_START(nmod_mpoly_divides_dense, state) n = FLINT_MAX(WORD(1), ctx->minfo->nvars); max_bound = 1 + 100/n/n; - exp_bound = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound1 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); - exp_bound2 = (mp_limb_t *) flint_malloc(ctx->minfo->nvars*sizeof(mp_limb_t)); + exp_bound = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound1 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); + exp_bound2 = (ulong *) flint_malloc(ctx->minfo->nvars*sizeof(ulong)); for (j = 0; j < ctx->minfo->nvars; j++) { exp_bound[j] = UWORD(1) << (FLINT_BITS - 1); diff --git a/src/nmod_mpoly/test/t-divides_heap_threaded.c b/src/nmod_mpoly/test/t-divides_heap_threaded.c index 11eb1f31e8..a0199e4451 100644 --- a/src/nmod_mpoly/test/t-divides_heap_threaded.c +++ b/src/nmod_mpoly/test/t-divides_heap_threaded.c @@ -69,7 +69,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, h, k; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -130,7 +130,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -188,7 +188,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -246,7 +246,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, p, h1, h2; slong len1, len2, len3; flint_bitcnt_t exp_bits1, exp_bits2, exp_bound3; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -310,7 +310,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, p, h1; slong len1, len2, len3; flint_bitcnt_t exp_bits1, exp_bits2, exp_bound3; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -372,7 +372,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_heap_threaded, state) nmod_mpoly_t f, g, p, h1; slong len1, len2, len3; flint_bitcnt_t exp_bits1, exp_bits2, exp_bound3; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-divides_monagan_pearce.c b/src/nmod_mpoly/test/t-divides_monagan_pearce.c index 5a7e93d8c3..0882dd4c41 100644 --- a/src/nmod_mpoly/test/t-divides_monagan_pearce.c +++ b/src/nmod_mpoly/test/t-divides_monagan_pearce.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) nmod_mpoly_t f, g, h, k; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -80,7 +80,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n; @@ -145,7 +145,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -208,7 +208,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n; @@ -267,7 +267,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -330,7 +330,7 @@ TEST_FUNCTION_START(nmod_mpoly_divides_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n; diff --git a/src/nmod_mpoly/test/t-divrem.c b/src/nmod_mpoly/test/t-divrem.c index b657c29d4d..614d41af81 100644 --- a/src/nmod_mpoly/test/t-divrem.c +++ b/src/nmod_mpoly/test/t-divrem.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; @@ -85,7 +85,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -150,7 +150,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, r1, r2; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -216,7 +216,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, r1, r2; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -281,7 +281,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r1; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -350,7 +350,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r1; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; diff --git a/src/nmod_mpoly/test/t-divrem_ideal.c b/src/nmod_mpoly/test/t-divrem_ideal.c index 950c2fb76b..c9afc64cb7 100644 --- a/src/nmod_mpoly/test/t-divrem_ideal.c +++ b/src/nmod_mpoly/test/t-divrem_ideal.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; nmod_mpoly_struct * qarr[1], * darr[1]; @@ -91,7 +91,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, r, k1, k2; nmod_mpoly_struct * g, * q; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2, num; nmod_mpoly_struct * qarr[5], * darr[5]; fmpz * shifts, * strides; @@ -210,7 +210,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, r, k1, k2; nmod_mpoly_struct * g, * q; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2, num; nmod_mpoly_struct * qarr[5], * darr[5]; diff --git a/src/nmod_mpoly/test/t-divrem_ideal_monagan_pearce.c b/src/nmod_mpoly/test/t-divrem_ideal_monagan_pearce.c index 33947aeb5e..d10ad633cf 100644 --- a/src/nmod_mpoly/test/t-divrem_ideal_monagan_pearce.c +++ b/src/nmod_mpoly/test/t-divrem_ideal_monagan_pearce.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal_monagan_pearce, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; ordering_t ord; - mp_limb_t modulus; + ulong modulus; slong maxbits; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong exp_bits, exp_bits1, exp_bits2; @@ -100,7 +100,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal_monagan_pearce, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, r, k1, k2; nmod_mpoly_struct * g, * q; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2, num; nmod_mpoly_struct * qarr[5], * darr[5]; @@ -197,7 +197,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_ideal_monagan_pearce, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, r, k1, k2; nmod_mpoly_struct * g, * q; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2, num; nmod_mpoly_struct * qarr[5], * darr[5]; diff --git a/src/nmod_mpoly/test/t-divrem_monagan_pearce.c b/src/nmod_mpoly/test/t-divrem_monagan_pearce.c index 034fca2389..8692a434aa 100644 --- a/src/nmod_mpoly/test/t-divrem_monagan_pearce.c +++ b/src/nmod_mpoly/test/t-divrem_monagan_pearce.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; @@ -85,7 +85,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -150,7 +150,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, r1, r2; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -216,7 +216,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, r1, r2; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -281,7 +281,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r1; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -350,7 +350,7 @@ TEST_FUNCTION_START(nmod_mpoly_divrem_monagan_pearce, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k, r1; - mp_limb_t modulus; + ulong modulus; slong nvars, len, len1, len2, exp_bound, exp_bound1, exp_bound2; modulus = n_randint(state, FLINT_BITS - 1) + 1; diff --git a/src/nmod_mpoly/test/t-evaluate.c b/src/nmod_mpoly/test/t-evaluate.c index b3d78b7512..37e58c6b62 100644 --- a/src/nmod_mpoly/test/t-evaluate.c +++ b/src/nmod_mpoly/test/t-evaluate.c @@ -23,12 +23,12 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g; - mp_limb_t fe; - mp_limb_t * vals; + ulong fe; + ulong * vals; slong * perm; slong nvars, len; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -42,7 +42,7 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) exp_bits = n_randint(state, 200) + 1; perm = (slong *) flint_malloc(nvars*sizeof(slong)); - vals = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + vals = (ulong *) flint_malloc(nvars*sizeof(ulong)); for (v = 0; v < nvars; v++) { vals[v] = n_randlimb(state); @@ -101,11 +101,11 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, fg; - mp_limb_t fe, ge, fge; - mp_limb_t * vals; + ulong fe, ge, fge; + ulong * vals; slong nvars, len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) exp_bits1 = n_randint(state, 200) + 1; exp_bits2 = n_randint(state, 200) + 1; - vals = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + vals = (ulong *) flint_malloc(nvars*sizeof(ulong)); for (v = 0; v < nvars; v++) { vals[v] = n_randlimb(state); @@ -159,11 +159,11 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, fg; - mp_limb_t fe, ge, fge; - mp_limb_t * vals; + ulong fe, ge, fge; + ulong * vals; slong nvars, len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -179,7 +179,7 @@ TEST_FUNCTION_START(nmod_mpoly_evaluate, state) exp_bits1 = n_randint(state, 200) + 1; exp_bits2 = n_randint(state, 200) + 1; - vals = (mp_limb_t *) flint_malloc(nvars*sizeof(mp_limb_t)); + vals = (ulong *) flint_malloc(nvars*sizeof(ulong)); for (v = 0; v < nvars; v++) { vals[v] = n_randlimb(state); diff --git a/src/nmod_mpoly/test/t-gcd.c b/src/nmod_mpoly/test/t-gcd.c index 9c3e74de8f..0a89ea040f 100644 --- a/src/nmod_mpoly/test/t-gcd.c +++ b/src/nmod_mpoly/test/t-gcd.c @@ -276,7 +276,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -331,7 +331,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) nmod_mpoly_t a, b, g, t1, t2; slong len, len1; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -383,12 +383,12 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) /* one input divides the other */ for (i = 0; i < tmul * flint_test_multiplier(); i++) { - mp_limb_t c; + ulong c; nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong exp_bound, exp_bound1, exp_bound2; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -445,7 +445,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -492,11 +492,11 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -562,7 +562,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) flint_bitcnt_t stride_bits, shift_bits; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -645,7 +645,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -699,14 +699,14 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len1, len2, len3, len4; ulong degbounds1[4]; ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -783,7 +783,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd, state) ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-gcd_brown.c b/src/nmod_mpoly/test/t-gcd_brown.c index 3c9569f442..2db44784df 100644 --- a/src/nmod_mpoly/test/t-gcd_brown.c +++ b/src/nmod_mpoly/test/t-gcd_brown.c @@ -144,7 +144,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_brown, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; slong degbound; - mp_limb_t p; + ulong p; p = n_randint(state, FLINT_BITS - 1) + 1; p = n_randbits(state, p); @@ -192,7 +192,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_brown, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; slong n, degbound; - mp_limb_t p; + ulong p; p = n_randint(state, FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly/test/t-gcd_cofactors.c b/src/nmod_mpoly/test/t-gcd_cofactors.c index eb2ecf4962..71e7ec2da9 100644 --- a/src/nmod_mpoly/test/t-gcd_cofactors.c +++ b/src/nmod_mpoly/test/t-gcd_cofactors.c @@ -389,7 +389,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) nmod_mpoly_t a, b, g, abar, bbar, t; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -448,7 +448,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) nmod_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -504,12 +504,12 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) /* one input divides the other */ for (i = 0; i < tmul * flint_test_multiplier(); i++) { - mp_limb_t c; + ulong c; nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, abar, bbar, t1, t2; slong len, len1, len2; - mp_limb_t exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong exp_bound, exp_bound1, exp_bound2; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -570,7 +570,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) nmod_mpoly_t a, b, g, abar, bbar, t; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -621,11 +621,11 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -696,7 +696,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) flint_bitcnt_t stride_bits, shift_bits; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -791,7 +791,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -850,14 +850,14 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, g, abar, bbar, t; - mp_limb_t rlimb; + ulong rlimb; flint_bitcnt_t newbits; slong len1, len2, len3, len4; ulong degbounds1[4]; ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -939,7 +939,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_cofactors, state) ulong degbounds2[4]; ulong degbounds3[4]; flint_bitcnt_t bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-gcd_hensel.c b/src/nmod_mpoly/test/t-gcd_hensel.c index d15bb6751a..5aec1e9e46 100644 --- a/src/nmod_mpoly/test/t-gcd_hensel.c +++ b/src/nmod_mpoly/test/t-gcd_hensel.c @@ -114,7 +114,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_hensel, state) slong len, len1, len2; ulong degbound; ulong * degbounds, * degbounds1, * degbounds2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-gcd_zippel.c b/src/nmod_mpoly/test/t-gcd_zippel.c index 8797f7bb86..f54e44407f 100644 --- a/src/nmod_mpoly/test/t-gcd_zippel.c +++ b/src/nmod_mpoly/test/t-gcd_zippel.c @@ -114,7 +114,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_zippel, state) slong len, len1, len2; ulong degbound; ulong * degbounds, * degbounds1, * degbounds2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-gcd_zippel2.c b/src/nmod_mpoly/test/t-gcd_zippel2.c index fc97133921..c446a9098f 100644 --- a/src/nmod_mpoly/test/t-gcd_zippel2.c +++ b/src/nmod_mpoly/test/t-gcd_zippel2.c @@ -114,7 +114,7 @@ TEST_FUNCTION_START(nmod_mpoly_gcd_zippel2, state) slong len, len1, len2; ulong degbound; ulong * degbounds, * degbounds1, * degbounds2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-gen.c b/src/nmod_mpoly/test/t-gen.c index 2640ba590d..e74d86de6c 100644 --- a/src/nmod_mpoly/test/t-gen.c +++ b/src/nmod_mpoly/test/t-gen.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_mpoly_gen, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t f1, f2; slong len, exp_bits, k1, k2; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-get_coeff_vars_ui.c b/src/nmod_mpoly/test/t-get_coeff_vars_ui.c index 10318c3eee..8a0a12fb8f 100644 --- a/src/nmod_mpoly/test/t-get_coeff_vars_ui.c +++ b/src/nmod_mpoly/test/t-get_coeff_vars_ui.c @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_coeff_vars_ui, state) slong vars[1]; ulong exps[1]; slong var1; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nvars = 1 + n_randint(state, 20); @@ -116,7 +116,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_coeff_vars_ui, state) slong vars[2]; ulong exps[2]; slong var1, var2; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nvars = 2 + n_randint(state, 20); diff --git a/src/nmod_mpoly/test/t-get_set_string_pretty.c b/src/nmod_mpoly/test/t-get_set_string_pretty.c index c7baaacbf3..2da4195343 100644 --- a/src/nmod_mpoly/test/t-get_set_string_pretty.c +++ b/src/nmod_mpoly/test/t-get_set_string_pretty.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_set_string_pretty, state) { slong len1; flint_bitcnt_t exp_bits1; - mp_limb_t modulus; + ulong modulus; nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, f1; char * str; diff --git a/src/nmod_mpoly/test/t-get_set_term_coeff_ui.c b/src/nmod_mpoly/test/t-get_set_term_coeff_ui.c index 26bdc71551..3a3cdfe42b 100644 --- a/src/nmod_mpoly/test/t-get_set_term_coeff_ui.c +++ b/src/nmod_mpoly/test/t-get_set_term_coeff_ui.c @@ -23,8 +23,8 @@ TEST_FUNCTION_START(nmod_mpoly_get_set_term_coeff_ui, state) nmod_mpoly_t f; slong len, index; flint_bitcnt_t exp_bits; - mp_limb_t c, d; - mp_limb_t modulus; + ulong c, d; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-get_set_term_exp_si.c b/src/nmod_mpoly/test/t-get_set_term_exp_si.c index 1416a5eed5..ae97f5b367 100644 --- a/src/nmod_mpoly/test/t-get_set_term_exp_si.c +++ b/src/nmod_mpoly/test/t-get_set_term_exp_si.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_set_term_exp_si, state) nmod_mpoly_t f; slong nvars, len, index; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-get_set_term_exp_ui.c b/src/nmod_mpoly/test/t-get_set_term_exp_ui.c index ad7b13710e..ed6280465f 100644 --- a/src/nmod_mpoly/test/t-get_set_term_exp_ui.c +++ b/src/nmod_mpoly/test/t-get_set_term_exp_ui.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_set_term_exp_ui, state) nmod_mpoly_t f; slong nvars, len, index; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-get_term.c b/src/nmod_mpoly/test/t-get_term.c index 2b73150dad..e83f17e839 100644 --- a/src/nmod_mpoly/test/t-get_term.c +++ b/src/nmod_mpoly/test/t-get_term.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_get_term, state) nmod_mpoly_t f, g, h; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3; slong len1, len2, len3; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-get_term_monomial.c b/src/nmod_mpoly/test/t-get_term_monomial.c index a612128e5c..9be60c2494 100644 --- a/src/nmod_mpoly/test/t-get_term_monomial.c +++ b/src/nmod_mpoly/test/t-get_term_monomial.c @@ -19,12 +19,12 @@ TEST_FUNCTION_START(nmod_mpoly_get_term_monomial, state) /* Check getting a coeff by its monomial */ for (i = 0; i < 100 * flint_test_multiplier(); i++) { - mp_limb_t c, d; + ulong c, d; nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3; slong len1, len2, len3; - mp_limb_t modulus; + ulong modulus; modulus = UWORD(2) + n_randint(state, -UWORD(2)); nmod_mpoly_ctx_init_rand(ctx, state, 20, modulus); diff --git a/src/nmod_mpoly/test/t-inflate_deflate.c b/src/nmod_mpoly/test/t-inflate_deflate.c index 073fc72123..920bf268bd 100644 --- a/src/nmod_mpoly/test/t-inflate_deflate.c +++ b/src/nmod_mpoly/test/t-inflate_deflate.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(nmod_mpoly_inflate_deflate, state) slong len1, len2, len3; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3; flint_bitcnt_t stride_bits, shift_bits; - mp_limb_t modulus; + ulong modulus; modulus = FLINT_MAX(UWORD(2), n_randlimb(state)); @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_mpoly_inflate_deflate, state) slong len1, len2, len3; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3; flint_bitcnt_t stride_bits, shift_bits; - mp_limb_t modulus; + ulong modulus; modulus = FLINT_MAX(UWORD(2), n_randlimb(state)); diff --git a/src/nmod_mpoly/test/t-mpolyn_divides_threaded_pool.c b/src/nmod_mpoly/test/t-mpolyn_divides_threaded_pool.c index 3e8dbd2e9d..1f78208ef8 100644 --- a/src/nmod_mpoly/test/t-mpolyn_divides_threaded_pool.c +++ b/src/nmod_mpoly/test/t-mpolyn_divides_threaded_pool.c @@ -142,7 +142,7 @@ TEST_FUNCTION_START(nmod_mpolyn_divides_threaded_pool, state) nmod_mpoly_t a, b; slong len1, len2; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -180,7 +180,7 @@ TEST_FUNCTION_START(nmod_mpolyn_divides_threaded_pool, state) nmod_mpoly_t a, b, c; slong len1, len2, len3; ulong exp_bound, exp_bound2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-mpolyuu_divides.c b/src/nmod_mpoly/test/t-mpolyuu_divides.c index 39a6bccac4..81d8657b45 100644 --- a/src/nmod_mpoly/test/t-mpolyuu_divides.c +++ b/src/nmod_mpoly/test/t-mpolyuu_divides.c @@ -132,7 +132,7 @@ TEST_FUNCTION_START(nmod_mpoly_mpolyuu_divides, state) nmod_mpoly_t a, b; slong len1, len2; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -168,7 +168,7 @@ TEST_FUNCTION_START(nmod_mpoly_mpolyuu_divides, state) nmod_mpoly_t a, b, c; slong len1, len2, len3; flint_bitcnt_t exp_bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 10 == 0) ? 4: FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-mul.c b/src/nmod_mpoly/test/t-mul.c index 58446fdddb..e27008f973 100644 --- a/src/nmod_mpoly/test/t-mul.c +++ b/src/nmod_mpoly/test/t-mul.c @@ -27,7 +27,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul, state) nmod_mpoly_t f, g, h, k1, k2, t1, t2; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; modulus = n_randbits(state, modulus); @@ -96,7 +96,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; slong n; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; @@ -160,7 +160,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; slong n; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; diff --git a/src/nmod_mpoly/test/t-mul_array.c b/src/nmod_mpoly/test/t-mul_array.c index 4d8916838e..8a566bb770 100644 --- a/src/nmod_mpoly/test/t-mul_array.c +++ b/src/nmod_mpoly/test/t-mul_array.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array, state) nmod_mpoly_t f, g, h, k; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, -UWORD(2)) + UWORD(2); nmod_mpoly_ctx_init_rand(ctx, state, 5, modulus); @@ -84,7 +84,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array, state) nmod_mpoly_t f, g, h; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, -UWORD(2)) + UWORD(2); nmod_mpoly_ctx_init_rand(ctx, state, 5, modulus); @@ -140,7 +140,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array, state) nmod_mpoly_t f, g, h; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, -UWORD(2)) + UWORD(2); nmod_mpoly_ctx_init_rand(ctx, state, 5, modulus); diff --git a/src/nmod_mpoly/test/t-mul_array_threaded.c b/src/nmod_mpoly/test/t-mul_array_threaded.c index beaf43131b..2199d452b1 100644 --- a/src/nmod_mpoly/test/t-mul_array_threaded.c +++ b/src/nmod_mpoly/test/t-mul_array_threaded.c @@ -27,7 +27,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array_threaded, state) nmod_mpoly_t f, g, h, k; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; modulus = n_randbits(state, modulus); @@ -90,7 +90,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array_threaded, state) nmod_mpoly_t f, g, h; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; modulus = n_randbits(state, modulus); @@ -148,7 +148,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_array_threaded, state) nmod_mpoly_t f, g, h; slong len, len1, len2, exp_bound, exp_bound1, exp_bound2; slong n, max_bound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-mul_dense.c b/src/nmod_mpoly/test/t-mul_dense.c index 69f7ed07a8..227d273cc8 100644 --- a/src/nmod_mpoly/test/t-mul_dense.c +++ b/src/nmod_mpoly/test/t-mul_dense.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_dense, state) nmod_mpoly_t f, g, h, k; slong len, len1, len2; slong max_bound, exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; slong n; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -85,7 +85,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_dense, state) nmod_mpoly_t f, g, h; slong len, len1, len2; slong max_bound, exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; slong n; modulus = n_randint(state, FLINT_BITS - 1) + 1; @@ -144,7 +144,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_dense, state) nmod_mpoly_t f, g, h; slong len, len1, len2; slong max_bound, exp_bound, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; slong n; modulus = n_randint(state, FLINT_BITS - 1) + 1; diff --git a/src/nmod_mpoly/test/t-mul_heap_threaded.c b/src/nmod_mpoly/test/t-mul_heap_threaded.c index 2c7c52b60c..02a660ba7e 100644 --- a/src/nmod_mpoly/test/t-mul_heap_threaded.c +++ b/src/nmod_mpoly/test/t-mul_heap_threaded.c @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_heap_threaded, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; @@ -117,7 +117,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_heap_threaded, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -173,7 +173,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_heap_threaded, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; diff --git a/src/nmod_mpoly/test/t-mul_johnson.c b/src/nmod_mpoly/test/t-mul_johnson.c index e5ee131fa3..54009ba99e 100644 --- a/src/nmod_mpoly/test/t-mul_johnson.c +++ b/src/nmod_mpoly/test/t-mul_johnson.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_johnson, state) nmod_mpoly_t f, g, h, k1, k2, t1, t2; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -95,7 +95,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_johnson, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -149,7 +149,7 @@ TEST_FUNCTION_START(nmod_mpoly_mul_johnson, state) nmod_mpoly_t f, g, h; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-pow_rmul.c b/src/nmod_mpoly/test/t-pow_rmul.c index a3acb06e9a..a44c7e039e 100644 --- a/src/nmod_mpoly/test/t-pow_rmul.c +++ b/src/nmod_mpoly/test/t-pow_rmul.c @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_mpoly_pow_rmul, state) ulong pow_bound; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); diff --git a/src/nmod_mpoly/test/t-pow_ui.c b/src/nmod_mpoly/test/t-pow_ui.c index c6e7140468..ad31584ab8 100644 --- a/src/nmod_mpoly/test/t-pow_ui.c +++ b/src/nmod_mpoly/test/t-pow_ui.c @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_mpoly_pow_ui, state) ulong pow_bound; slong len, len1, len2; flint_bitcnt_t exp_bits, exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); diff --git a/src/nmod_mpoly/test/t-push_term_ui_fmpz.c b/src/nmod_mpoly/test/t-push_term_ui_fmpz.c index 7a2bb8b302..2eb296d8e3 100644 --- a/src/nmod_mpoly/test/t-push_term_ui_fmpz.c +++ b/src/nmod_mpoly/test/t-push_term_ui_fmpz.c @@ -23,8 +23,8 @@ TEST_FUNCTION_START(nmod_mpoly_push_term_ui_fmpz, state) flint_bitcnt_t exp_bits; fmpz **exp, **exp2, *exp3; slong len, nvars; - mp_limb_t c, c2; - mp_limb_t modulus; + ulong c, c2; + ulong modulus; modulus = n_randtest_bits(state, n_randint(state, FLINT_BITS) + 1); nmod_mpoly_ctx_init_rand(ctx, state, 10, modulus); diff --git a/src/nmod_mpoly/test/t-push_term_ui_ui.c b/src/nmod_mpoly/test/t-push_term_ui_ui.c index 5a9690ea9b..c07dc16d4e 100644 --- a/src/nmod_mpoly/test/t-push_term_ui_ui.c +++ b/src/nmod_mpoly/test/t-push_term_ui_ui.c @@ -23,8 +23,8 @@ TEST_FUNCTION_START(nmod_mpoly_push_term_ui_ui, state) flint_bitcnt_t exp_bits; ulong * exp, * exp2; slong len, nvars; - mp_limb_t c, c2; - mp_limb_t modulus; + ulong c, c2; + ulong modulus; modulus = n_randtest_bits(state, n_randint(state, FLINT_BITS) + 1); nmod_mpoly_ctx_init_rand(ctx, state, 10, modulus); diff --git a/src/nmod_mpoly/test/t-quadratic_root.c b/src/nmod_mpoly/test/t-quadratic_root.c index 6cf11101ef..417616afd4 100644 --- a/src/nmod_mpoly/test/t-quadratic_root.c +++ b/src/nmod_mpoly/test/t-quadratic_root.c @@ -80,7 +80,7 @@ TEST_FUNCTION_START(nmod_mpoly_quadratic_root, state) nmod_mpoly_t f, a, b, x; slong len, len1; flint_bitcnt_t exp_bits, exp_bits1; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-resize.c b/src/nmod_mpoly/test/t-resize.c index 911a999054..5740e5db82 100644 --- a/src/nmod_mpoly/test/t-resize.c +++ b/src/nmod_mpoly/test/t-resize.c @@ -23,8 +23,8 @@ TEST_FUNCTION_START(nmod_mpoly_resize, state) flint_bitcnt_t exp_bits; ulong * exp, * exp2; slong len, nvars; - mp_limb_t c; - mp_limb_t modulus; + ulong c; + ulong modulus; modulus = n_randtest_bits(state, n_randint(state, FLINT_BITS) + 1); nmod_mpoly_ctx_init_rand(ctx, state, 10, modulus); diff --git a/src/nmod_mpoly/test/t-resultant_discriminant.c b/src/nmod_mpoly/test/t-resultant_discriminant.c index 8a8a41d60e..2fc24c4a8d 100644 --- a/src/nmod_mpoly/test/t-resultant_discriminant.c +++ b/src/nmod_mpoly/test/t-resultant_discriminant.c @@ -55,9 +55,9 @@ TEST_FUNCTION_START(nmod_mpoly_resultant_discriminant, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, r; nmod_poly_t au, bu; - mp_limb_t ru; + ulong ru; slong len1, len2, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -105,7 +105,7 @@ TEST_FUNCTION_START(nmod_mpoly_resultant_discriminant, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, c, ab, ra, rb, rab, p; slong len1, len2, len3, exp_bound1, exp_bound2, exp_bound3; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -175,7 +175,7 @@ TEST_FUNCTION_START(nmod_mpoly_resultant_discriminant, state) nmod_mpoly_ctx_t ctx; nmod_mpoly_t a, b, ab, r, da, db, dab, p; slong len1, len2, exp_bound1, exp_bound2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-scalar_addmul_ui.c b/src/nmod_mpoly/test/t-scalar_addmul_ui.c index 742ca0e191..42f43e79ce 100644 --- a/src/nmod_mpoly/test/t-scalar_addmul_ui.c +++ b/src/nmod_mpoly/test/t-scalar_addmul_ui.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_mpoly_scalar_addmul_ui, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, h, k; - mp_limb_t modulus; + ulong modulus; slong len, len1, len2; slong exp_bits, exp_bits1, exp_bits2; @@ -45,7 +45,7 @@ TEST_FUNCTION_START(nmod_mpoly_scalar_addmul_ui, state) for (j = 0; j < 10; j++) { - mp_limb_t c = n_randlimb(state); + ulong c = n_randlimb(state); nmod_mpoly_randtest_bits(f, state, len1, exp_bits1, ctx); nmod_mpoly_randtest_bits(g, state, len2, exp_bits2, ctx); nmod_mpoly_randtest_bits(h, state, len, exp_bits, ctx); diff --git a/src/nmod_mpoly/test/t-scalar_mul_ui.c b/src/nmod_mpoly/test/t-scalar_mul_ui.c index 98cc22f947..def9d6cf8d 100644 --- a/src/nmod_mpoly/test/t-scalar_mul_ui.c +++ b/src/nmod_mpoly/test/t-scalar_mul_ui.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_mpoly_scalar_mul_ui, state) ulong a, b, c; slong len1, len2, len3, len4; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3, exp_bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, 1 + n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); @@ -108,7 +108,7 @@ TEST_FUNCTION_START(nmod_mpoly_scalar_mul_ui, state) ulong a, b; slong len1, len2, len3, len4; flint_bitcnt_t exp_bits1, exp_bits2, exp_bits3, exp_bits4; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, 1 + n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); diff --git a/src/nmod_mpoly/test/t-sqrt.c b/src/nmod_mpoly/test/t-sqrt.c index 9404eca729..421c5c8cb8 100644 --- a/src/nmod_mpoly/test/t-sqrt.c +++ b/src/nmod_mpoly/test/t-sqrt.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_mpoly_sqrt, state) slong len, len1; flint_bitcnt_t exp_bits, exp_bits1; int sqr; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 4 == 0) ? 4 : FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -85,7 +85,7 @@ TEST_FUNCTION_START(nmod_mpoly_sqrt, state) slong len, len1; flint_bitcnt_t exp_bits, exp_bits1; int sqr; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 4 == 0) ? 4 : FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -143,7 +143,7 @@ TEST_FUNCTION_START(nmod_mpoly_sqrt, state) slong len, len1; flint_bitcnt_t exp_bits, exp_bits1; int sqr1, sqr2; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, (i % 4 == 0) ? 4 : FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-total_degree.c b/src/nmod_mpoly/test/t-total_degree.c index 8d93c7647a..df399af9dc 100644 --- a/src/nmod_mpoly/test/t-total_degree.c +++ b/src/nmod_mpoly/test/t-total_degree.c @@ -36,7 +36,7 @@ TEST_FUNCTION_START(nmod_mpoly_total_degree, state) fmpz_t fdeg, gdeg, hdeg; slong len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); @@ -96,7 +96,7 @@ TEST_FUNCTION_START(nmod_mpoly_total_degree, state) fmpz_t fdeg, gdeg, hdeg; slong len1, len2; flint_bitcnt_t exp_bits1, exp_bits2; - mp_limb_t modulus; + ulong modulus; modulus = n_randbits(state, n_randint(state, FLINT_BITS)); modulus = FLINT_MAX(UWORD(2), modulus); diff --git a/src/nmod_mpoly/test/t-univar.c b/src/nmod_mpoly/test/t-univar.c index 2c87deb8f0..4de934c271 100644 --- a/src/nmod_mpoly/test/t-univar.c +++ b/src/nmod_mpoly/test/t-univar.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_mpoly_univar, state) nmod_mpoly_univar_t fx, gx; slong len1, len2, n; flint_bitcnt_t exp_bits1, exp_bits2, bits; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, SMALL_FMPZ_BITCOUNT_MAX) + 2; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/test/t-univar_resultant.c b/src/nmod_mpoly/test/t-univar_resultant.c index e2a10a3edb..3f426d3062 100644 --- a/src/nmod_mpoly/test/t-univar_resultant.c +++ b/src/nmod_mpoly/test/t-univar_resultant.c @@ -23,7 +23,7 @@ void test_resultant( nmod_mpoly_univar_t F, G; nmod_poly_t f, g; nmod_mpoly_t R; - mp_limb_t r; + ulong r; nmod_mpoly_univar_init(F, ctx); nmod_mpoly_univar_init(G, ctx); @@ -136,7 +136,7 @@ TEST_FUNCTION_START(nmod_mpoly_univar_resultant, state) { nmod_mpoly_ctx_t ctx; nmod_mpoly_t f, g, t; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly/to_from_nmod_poly.c b/src/nmod_mpoly/to_from_nmod_poly.c index cf3fa119fc..e082f52ff4 100644 --- a/src/nmod_mpoly/to_from_nmod_poly.c +++ b/src/nmod_mpoly/to_from_nmod_poly.c @@ -28,7 +28,7 @@ void _nmod_mpoly_to_nmod_poly_deflate( ulong mask; slong i, shift, off, N; slong len = B->length; - mp_limb_t * coeff = B->coeffs; + ulong * coeff = B->coeffs; ulong * exp = B->exps; ulong var_shift, var_stride; flint_bitcnt_t bits = B->bits; @@ -87,7 +87,7 @@ void _nmod_mpoly_from_nmod_poly_inflate( slong N; slong k; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * shiftexp; ulong * strideexp; diff --git a/src/nmod_mpoly/univar.c b/src/nmod_mpoly/univar.c index e48609f1c3..8b9ed2f51f 100644 --- a/src/nmod_mpoly/univar.c +++ b/src/nmod_mpoly/univar.c @@ -229,7 +229,7 @@ void nmod_mpoly_to_univar(nmod_mpoly_univar_t A, const nmod_mpoly_t B, slong N = mpoly_words_per_exp(bits, ctx->minfo); slong shift, off; slong Blen = B->length; - const mp_limb_t * Bcoeff = B->coeffs; + const ulong * Bcoeff = B->coeffs; const ulong * Bexp = B->exps; slong i; int its_new; diff --git a/src/nmod_mpoly_factor.h b/src/nmod_mpoly_factor.h index 06cae03d6d..bba3257aa6 100644 --- a/src/nmod_mpoly_factor.h +++ b/src/nmod_mpoly_factor.h @@ -188,7 +188,7 @@ void _nmod_mpoly_set_lead0( slong _n_poly_vec_max_degree(const n_poly_struct * A, slong Alen); void _n_poly_vec_mul_nmod_intertible(n_poly_struct * A, - slong Alen, mp_limb_t c, nmod_t ctx); + slong Alen, ulong c, nmod_t ctx); void _n_poly_vec_mod_mul_poly(n_poly_struct * A, slong Alen, const n_poly_t g, const nmod_t ctx); @@ -311,7 +311,7 @@ int nmod_mpoly_factor_irred_lgprime_zippel(nmod_mpolyv_t Af, /*****************************************************************************/ void nmod_mpoly_compression_do(nmod_mpoly_t L, - const nmod_mpoly_ctx_t Lctx, mp_limb_t * Acoeffs, slong Alen, + const nmod_mpoly_ctx_t Lctx, ulong * Acoeffs, slong Alen, mpoly_compression_t M); void nmod_mpoly_compression_undo(nmod_mpoly_t A, @@ -357,7 +357,7 @@ typedef nmod_mpoly_pfrac_struct nmod_mpoly_pfrac_t[1]; int nmod_mpoly_pfrac_init(nmod_mpoly_pfrac_t I, flint_bitcnt_t bits, slong l, slong r, const nmod_mpoly_struct * betas, - const mp_limb_t * alpha, const nmod_mpoly_ctx_t ctx); + const ulong * alpha, const nmod_mpoly_ctx_t ctx); void nmod_mpoly_pfrac_clear(nmod_mpoly_pfrac_t I, const nmod_mpoly_ctx_t ctx); @@ -366,33 +366,33 @@ int nmod_mpoly_pfrac(slong r, nmod_mpoly_t t, const slong * deg, nmod_mpoly_pfrac_t I, const nmod_mpoly_ctx_t ctx); int nmod_mpoly_hlift(slong m, nmod_mpoly_struct * f, slong r, - const mp_limb_t * alpha, const nmod_mpoly_t A, const slong * degs, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx); int n_bpoly_mod_pfrac(slong r, n_bpoly_struct * C, slong * C_deg1_bound, n_bpoly_t A, n_bpoly_struct * B, nmod_t mod); int n_bpoly_mod_hlift2(n_bpoly_t A, n_bpoly_t B0, n_bpoly_t B1, - mp_limb_t alpha, slong degree_inner, nmod_t mod, + ulong alpha, slong degree_inner, nmod_t mod, n_poly_bpoly_stack_t St); int n_bpoly_mod_hlift2_cubic(n_bpoly_t A, n_bpoly_t B0, n_bpoly_t B1, - mp_limb_t alpha, slong degree_inner, nmod_t ctx, + ulong alpha, slong degree_inner, nmod_t ctx, nmod_eval_interp_t E, n_poly_bpoly_stack_t St); int n_bpoly_mod_hlift(slong r, n_bpoly_t A, n_bpoly_struct * B, - mp_limb_t alpha, slong degree_inner, nmod_t mod, + ulong alpha, slong degree_inner, nmod_t mod, n_poly_bpoly_stack_t St); int n_bpoly_mod_hlift_cubic(slong r, n_bpoly_t A, n_bpoly_struct * B, - mp_limb_t alpha, slong degree_inner, nmod_t mod, + ulong alpha, slong degree_inner, nmod_t mod, nmod_eval_interp_t E, n_poly_bpoly_stack_t St); int n_polyu3_mod_hlift(slong r, n_polyun_struct * BB, n_polyu_t A, - n_polyu_struct * B, mp_limb_t beta, slong degree_inner, nmod_t ctx); + n_polyu_struct * B, ulong beta, slong degree_inner, nmod_t ctx); int nmod_mpoly_hlift_zippel(slong m, nmod_mpoly_struct * B, slong r, - const mp_limb_t * alpha, const nmod_mpoly_t A, const slong * degs, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx, flint_rand_t state); int nmod_mpoly_factor_algo(nmod_mpoly_factor_t f, @@ -409,7 +409,7 @@ int nmod_mpoly_factor_zippel(nmod_mpoly_factor_t f, int _nmod_mpoly_evaluate_rest_n_poly(n_poly_struct * E, slong * starts, slong * ends, slong * stops, ulong * es, - const mp_limb_t * Acoeffs, const ulong * Aexps, slong Alen, slong var, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, slong var, const n_poly_struct * alphas, const slong * offsets, const slong * shifts, slong N, ulong mask, slong nvars, nmod_t ctx); diff --git a/src/nmod_mpoly_factor/compression.c b/src/nmod_mpoly_factor/compression.c index 639b9c1fe7..5c1369430c 100644 --- a/src/nmod_mpoly_factor/compression.c +++ b/src/nmod_mpoly_factor/compression.c @@ -15,7 +15,7 @@ void nmod_mpoly_compression_do( nmod_mpoly_t L, const nmod_mpoly_ctx_t Lctx, - mp_limb_t * Acoeffs, + ulong * Acoeffs, slong Alen, mpoly_compression_t M) { diff --git a/src/nmod_mpoly_factor/eval.c b/src/nmod_mpoly_factor/eval.c index b5e10a155b..e242a8a476 100644 --- a/src/nmod_mpoly_factor/eval.c +++ b/src/nmod_mpoly_factor/eval.c @@ -23,7 +23,7 @@ int _nmod_mpoly_evaluate_rest_n_poly( slong * ends, slong * stops, ulong * es, - const mp_limb_t * Acoeffs, + const ulong * Acoeffs, const ulong * Aexps, slong Alen, slong var, @@ -229,7 +229,7 @@ void _nmod_mpoly_set_n_bpoly_var1_zero( Alen = 0; for (i = Blen - 1; i >= 0; i--) { - mp_limb_t c = n_poly_get_coeff(B->coeffs + i, 0); + ulong c = n_poly_get_coeff(B->coeffs + i, 0); if (c == 0) continue; diff --git a/src/nmod_mpoly_factor/gcd_zippel.c b/src/nmod_mpoly_factor/gcd_zippel.c index e2cc597cb1..28369dce8a 100644 --- a/src/nmod_mpoly_factor/gcd_zippel.c +++ b/src/nmod_mpoly_factor/gcd_zippel.c @@ -65,7 +65,7 @@ static void n_polyu1n_mod_zip_eval_cur_inc_coeff( const nmod_t ctx) { slong i; - mp_limb_t c; + ulong c; FLINT_ASSERT(Acur->length > 0); FLINT_ASSERT(Acur->length == Ainc->length); @@ -93,7 +93,7 @@ static int n_poly_add_zip_must_match( slong Alen = A->length; ulong * Zexps = Z->exps; n_poly_struct * Zcoeffs = Z->coeffs; - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; ai = Alen - 1; @@ -125,12 +125,12 @@ static int n_poly_add_zip_must_match( } -static mp_limb_t * nmod_mat_row_ref(nmod_mat_t M, slong i) +static ulong * nmod_mat_row_ref(nmod_mat_t M, slong i) { return M->rows[i]; } -static void _nmod_vec_mul(mp_limb_t * a, mp_limb_t * b, mp_limb_t * c, +static void _nmod_vec_mul(ulong * a, ulong * b, ulong * c, slong n, nmod_t ctx) { for (n--; n >= 0; n--) @@ -176,7 +176,7 @@ int nmod_mpolyl_gcds_zippel( n_polyun_t Aeh_inc, Aeh_cur, Aeh_coeff_mock; n_polyun_t Beh_inc, Beh_cur, Beh_coeff_mock; n_polyun_t HG, MG, ZG; - mp_limb_t * betas; + ulong * betas; n_poly_struct * beta_caches; nmod_mat_struct * ML; nmod_mat_t MF, Msol, MFtemp, Mwindow; @@ -201,7 +201,7 @@ int nmod_mpolyl_gcds_zippel( return Gmarks[1] - Gmarks[0] == 1; } - betas = FLINT_ARRAY_ALLOC(var, mp_limb_t); + betas = FLINT_ARRAY_ALLOC(var, ulong); beta_caches = FLINT_ARRAY_ALLOC(3*var, n_poly_struct); for (i = 0; i < var; i++) { @@ -325,7 +325,7 @@ int nmod_mpolyl_gcds_zippel( if (Gmarks[s + 1] - Gmarks[s] == 1) { /* monic case */ - mp_limb_t temp = 1; + ulong temp = 1; for (i = 0; i < l; i++) { @@ -592,7 +592,7 @@ int nmod_mpolyl_gcdp_zippel_smprime( slong Adeg, Bdeg, Alastdeg, Blastdeg, Gdeg; slong bound, Gdegbound, lastdeg, req_zip_images; int success, changed, have_enough; - mp_limb_t alpha, start_alpha, gammaeval, temp; + ulong alpha, start_alpha, gammaeval, temp; n_poly_t a, b, c, gamma, modulus, alphapow; nmod_mpoly_t Ac, Bc, Aeval, Beval, Geval, Abareval, Bbareval; nmod_mpolyn_t H, T; diff --git a/src/nmod_mpoly_factor/irred_smprime_wang.c b/src/nmod_mpoly_factor/irred_smprime_wang.c index 2029facd42..41c8b2795e 100644 --- a/src/nmod_mpoly_factor/irred_smprime_wang.c +++ b/src/nmod_mpoly_factor/irred_smprime_wang.c @@ -30,7 +30,7 @@ int nmod_mpoly_factor_irred_smprime_wang( int alphas_tries_remaining, alphabetas_tries_remaining, alphabetas_length; const slong n = ctx->minfo->nvars - 1; slong i, j, k, r; - mp_limb_t * alpha; + ulong * alpha; n_poly_struct * alphabetas; nmod_mpoly_struct * Aevals; slong * degs, * degeval; @@ -61,7 +61,7 @@ int nmod_mpoly_factor_irred_smprime_wang( degs = (slong *) flint_malloc((n + 1)*sizeof(slong)); degeval = (slong *) flint_malloc((n + 1)*sizeof(slong)); - alpha = (mp_limb_t *) flint_malloc(n*sizeof(mp_limb_t)); + alpha = (ulong *) flint_malloc(n*sizeof(ulong)); alphabetas = (n_poly_struct *) flint_malloc(n*sizeof(n_poly_struct)); Aevals = (nmod_mpoly_struct *) flint_malloc(n*sizeof(nmod_mpoly_struct)); for (i = 0; i < n; i++) @@ -223,7 +223,7 @@ int nmod_mpoly_factor_irred_smprime_wang( fac->length = r; for (i = 0; i < r; i++) { - mp_limb_t q; + ulong q; FLINT_ASSERT(nmod_mpoly_is_ui(new_lcs->coeffs + 0*r + i, ctx)); FLINT_ASSERT(nmod_mpoly_length(new_lcs->coeffs + 0*r + i, ctx) == 1); _nmod_mpoly_set_n_bpoly_var1_zero(fac->coeffs + i, newA->bits, diff --git a/src/nmod_mpoly_factor/irred_smprime_zassenhaus.c b/src/nmod_mpoly_factor/irred_smprime_zassenhaus.c index 06e67e4ef7..23a1014c73 100644 --- a/src/nmod_mpoly_factor/irred_smprime_zassenhaus.c +++ b/src/nmod_mpoly_factor/irred_smprime_zassenhaus.c @@ -26,7 +26,7 @@ static int _try_lift( const nmod_mpolyv_t pfac, const nmod_mpoly_t FLINT_UNUSED(p), slong m, - mp_limb_t * alpha, + ulong * alpha, slong n, const nmod_mpoly_ctx_t ctx) { @@ -135,7 +135,7 @@ int nmod_mpoly_factor_irred_smprime_zassenhaus( const slong n = ctx->minfo->nvars - 1; slong i, j, k, m, len; slong * subset; - mp_limb_t * alpha; + ulong * alpha; nmod_mpoly_struct * Aevals; slong * deg, * degeval; nmod_mpolyv_t qfac, pfac, tfac, dfac; @@ -150,7 +150,7 @@ int nmod_mpoly_factor_irred_smprime_zassenhaus( FLINT_ASSERT(A->bits <= FLINT_BITS); subset = (slong*) flint_malloc(4*sizeof(slong)); - alpha = (mp_limb_t *) flint_malloc(n*sizeof(mp_limb_t)); + alpha = (ulong *) flint_malloc(n*sizeof(ulong)); Aevals = (nmod_mpoly_struct *) flint_malloc(n*sizeof(nmod_mpoly_struct)); deg = (slong *) flint_malloc((n + 1)*sizeof(slong)); degeval = (slong *) flint_malloc((n + 1)*sizeof(slong)); diff --git a/src/nmod_mpoly_factor/irred_smprime_zippel.c b/src/nmod_mpoly_factor/irred_smprime_zippel.c index d4169d7c25..a384e4ddda 100644 --- a/src/nmod_mpoly_factor/irred_smprime_zippel.c +++ b/src/nmod_mpoly_factor/irred_smprime_zippel.c @@ -30,7 +30,7 @@ int nmod_mpoly_factor_irred_smprime_zippel( int alphas_tries_remaining, alphabetas_tries_remaining, alphabetas_length; const slong n = ctx->minfo->nvars - 1; slong i, j, k, r; - mp_limb_t * alpha; + ulong * alpha; n_poly_struct * alphabetas; nmod_mpoly_struct * Aevals; slong * degs, * degeval; @@ -64,7 +64,7 @@ int nmod_mpoly_factor_irred_smprime_zippel( degs = (slong *) flint_malloc((n + 1)*sizeof(slong)); degeval = (slong *) flint_malloc((n + 1)*sizeof(slong)); - alpha = (mp_limb_t *) flint_malloc(n*sizeof(mp_limb_t)); + alpha = (ulong *) flint_malloc(n*sizeof(ulong)); alphabetas = (n_poly_struct *) flint_malloc(n*sizeof(n_poly_struct)); Aevals = (nmod_mpoly_struct *) flint_malloc(n*sizeof(nmod_mpoly_struct)); for (i = 0; i < n; i++) @@ -245,7 +245,7 @@ int nmod_mpoly_factor_irred_smprime_zippel( fac->length = r; for (i = 0; i < r; i++) { - mp_limb_t q; + ulong q; FLINT_ASSERT(nmod_mpoly_is_ui(new_lcs->coeffs + 0*r + i, ctx)); FLINT_ASSERT(nmod_mpoly_length(new_lcs->coeffs + 0*r + i, ctx) == 1); _nmod_mpoly_set_n_bpoly_var1_zero(fac->coeffs + i, newA->bits, diff --git a/src/nmod_mpoly_factor/mpoly_hlift.c b/src/nmod_mpoly_factor/mpoly_hlift.c index c5dcaaa4b6..e196d72a43 100644 --- a/src/nmod_mpoly_factor/mpoly_hlift.c +++ b/src/nmod_mpoly_factor/mpoly_hlift.c @@ -15,7 +15,7 @@ static int _hlift_quartic2( slong m, nmod_mpoly_struct * f, slong r, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx) @@ -139,7 +139,7 @@ static int _hlift_quartic( slong m, nmod_mpoly_struct * f, slong r, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx) @@ -319,7 +319,7 @@ static int _hlift_quintic( slong m, nmod_mpoly_struct * f, slong r, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx) @@ -424,7 +424,7 @@ int nmod_mpoly_hlift( slong m, nmod_mpoly_struct * f, /* length r */ slong r, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx) diff --git a/src/nmod_mpoly_factor/mpoly_hlift_zippel.c b/src/nmod_mpoly_factor/mpoly_hlift_zippel.c index 0c3e87d727..b9b79b7c8b 100644 --- a/src/nmod_mpoly_factor/mpoly_hlift_zippel.c +++ b/src/nmod_mpoly_factor/mpoly_hlift_zippel.c @@ -53,7 +53,7 @@ static int nmod_mpoly_from_zip( slong zvar = 1; ulong x, y, z; flint_bitcnt_t bits = B->bits; - mp_limb_t * Bcoeffs; + ulong * Bcoeffs; ulong * Bexps; slong N = mpoly_words_per_exp_sp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); @@ -172,11 +172,11 @@ static void nmod_mpoly_set_eval_helper3( ulong y, x, z; slong yoff, xoff, zoff, * off; slong yshift, xshift, zshift, * shift; - mp_limb_t * p; + ulong * p; flint_bitcnt_t bits = A->bits; slong Alen = A->length; const ulong * Aexps = A->exps; - const mp_limb_t * Acoeffs = A->coeffs; + const ulong * Acoeffs = A->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -245,7 +245,7 @@ static void nmod_mpoly_set_eval_helper3( for (j = 0; j < n; j++) { slong Ai = ind[j]; - mp_limb_t meval = 1; + ulong meval = 1; for (k = 2; k < yvar; k++) { @@ -289,13 +289,13 @@ static slong nmod_mpoly_set_eval_helper_and_zip_form3( slong i, j, k, n; slong * off, * shift; ulong y, x, z; - mp_limb_t * p; + ulong * p; nmod_mpoly_struct * Hc; slong old_len, zip_length = 0; flint_bitcnt_t bits = B->bits; slong Blen = B->length; const ulong * Bexps = B->exps; - const mp_limb_t * Bcoeffs = B->coeffs; + const ulong * Bcoeffs = B->coeffs; slong N = mpoly_words_per_exp(bits, ctx->minfo); ulong mask = (-UWORD(1)) >> (FLINT_BITS - bits); ulong * ind; @@ -383,7 +383,7 @@ static slong nmod_mpoly_set_eval_helper_and_zip_form3( for (j = 0; j < n; j++) { slong Bi = ind[j]; - mp_limb_t meval = 1; + ulong meval = 1; for (k = 2; k < yvar; k++) { @@ -436,7 +436,7 @@ static slong nmod_mpoly_set_eval_helper_and_zip_form3( static void n_polyu_mod_eval_step(n_polyu_t E, n_polyun_t A, nmod_t ctx) { slong Ai, Ei, n; - mp_limb_t * p; + ulong * p; n_polyu_fit_length(E, A->length); @@ -587,7 +587,7 @@ int nmod_mpoly_hlift_zippel( slong m, nmod_mpoly_struct * B, slong r, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_t A, const slong * degs, const nmod_mpoly_ctx_t ctx, @@ -599,7 +599,7 @@ int nmod_mpoly_hlift_zippel( nmod_mpolyu_struct * H; n_polyun_struct M[1], Aeh[1], * Beh, * BBeval, * Z; n_polyu_struct Aeval[1], * Beval; - mp_limb_t * beta; + ulong * beta; n_poly_struct * caches; nmod_mpoly_t T1, T2; ulong * Bdegs; @@ -638,7 +638,7 @@ int nmod_mpoly_hlift_zippel( } #endif - beta = FLINT_ARRAY_ALLOC(ctx->minfo->nvars,mp_limb_t); + beta = FLINT_ARRAY_ALLOC(ctx->minfo->nvars,ulong); /* caches for powers of the betas */ caches = FLINT_ARRAY_ALLOC(3*ctx->minfo->nvars, n_poly_struct); diff --git a/src/nmod_mpoly_factor/mpoly_pfrac.c b/src/nmod_mpoly_factor/mpoly_pfrac.c index 03b26103fa..66e37d42f1 100644 --- a/src/nmod_mpoly_factor/mpoly_pfrac.c +++ b/src/nmod_mpoly_factor/mpoly_pfrac.c @@ -18,7 +18,7 @@ int nmod_mpoly_pfrac_init( slong r, slong w, const nmod_mpoly_struct * betas, - const mp_limb_t * alpha, + const ulong * alpha, const nmod_mpoly_ctx_t ctx) { int success = 1; diff --git a/src/nmod_mpoly_factor/n_bpoly_mod.c b/src/nmod_mpoly_factor/n_bpoly_mod.c index 9088295c12..ca3dfc8a55 100644 --- a/src/nmod_mpoly_factor/n_bpoly_mod.c +++ b/src/nmod_mpoly_factor/n_bpoly_mod.c @@ -56,7 +56,7 @@ void nmod_mpoly_set_bpoly( slong i, j; slong NA; slong Alen; - mp_limb_t * Acoeff; + ulong * Acoeff; ulong * Aexp; ulong * Aexps; TMP_INIT; @@ -103,7 +103,7 @@ void nmod_mpoly_set_bpoly( } void n_bpoly_mod_taylor_shift_gen1(n_bpoly_t A, const n_bpoly_t B, - mp_limb_t c, nmod_t ctx) + ulong c, nmod_t ctx) { slong i; @@ -195,7 +195,7 @@ void n_bpoly_mod_add( void n_bpoly_mod_make_primitive(n_poly_t g, n_bpoly_t A, nmod_t ctx) { - mp_limb_t c = 1; + ulong c = 1; slong Alen = A->length; slong i; n_poly_t q, r; @@ -529,7 +529,7 @@ int n_bpoly_mod_divides( { for (j = order - 1; j >= 0; j--) { - mp_limb_t qc = n_poly_get_coeff(q, order*i + j); + ulong qc = n_poly_get_coeff(q, order*i + j); if (qc == 0) continue; @@ -606,12 +606,12 @@ int n_bpoly_mod_divides( return divides; } -void n_bpoly_mod_taylor_shift_gen0(n_bpoly_t A, mp_limb_t alpha, nmod_t ctx) +void n_bpoly_mod_taylor_shift_gen0(n_bpoly_t A, ulong alpha, nmod_t ctx) { slong i, j; slong n = A->length; n_poly_struct * Acoeffs = A->coeffs; - mp_limb_t c; + ulong c; FLINT_ASSERT(alpha < ctx.n); diff --git a/src/nmod_mpoly_factor/n_bpoly_mod_factor_lgprime.c b/src/nmod_mpoly_factor/n_bpoly_mod_factor_lgprime.c index 23f4cb42d4..9fdf3dc349 100644 --- a/src/nmod_mpoly_factor/n_bpoly_mod_factor_lgprime.c +++ b/src/nmod_mpoly_factor/n_bpoly_mod_factor_lgprime.c @@ -642,11 +642,11 @@ static void _lattice( n_bpoly_struct * ld; nmod_mat_t M, T1, T2; int nlimbs; - mp_limb_t * trow; + ulong * trow; slong lift_order = lift_alpha_pow->length - 1; nlimbs = _nmod_vec_dot_bound_limbs(r, ctx); - trow = (mp_limb_t *) flint_malloc(r*sizeof(mp_limb_t)); + trow = (ulong *) flint_malloc(r*sizeof(ulong)); n_bpoly_init(Q); n_bpoly_init(R); n_bpoly_init(dg); diff --git a/src/nmod_mpoly_factor/n_bpoly_mod_factor_smprime.c b/src/nmod_mpoly_factor/n_bpoly_mod_factor_smprime.c index 03c12fc801..01b887fea6 100644 --- a/src/nmod_mpoly_factor/n_bpoly_mod_factor_smprime.c +++ b/src/nmod_mpoly_factor/n_bpoly_mod_factor_smprime.c @@ -60,7 +60,7 @@ static void n_bpoly_mod_make_monic_series( static void _n_bpoly_set_poly_gen0( n_bpoly_t A, - const mp_limb_t * Bcoeffs, slong Blength) + const ulong * Bcoeffs, slong Blength) { slong i; n_bpoly_fit_length(A, Blength); @@ -73,7 +73,7 @@ static void _n_bpoly_set_poly_gen0( static void n_bpoly_mod_eval( nmod_poly_t E, const n_bpoly_t A, - mp_limb_t alpha, + ulong alpha, nmod_t ctx) { slong i; @@ -1091,10 +1091,10 @@ static void _lattice( n_bpoly_struct * ld; nmod_mat_t M, T1, T2; int nlimbs; - mp_limb_t * trow; + ulong * trow; nlimbs = _nmod_vec_dot_bound_limbs(r, ctx); - trow = FLINT_ARRAY_ALLOC(r, mp_limb_t); + trow = FLINT_ARRAY_ALLOC(r, ulong); n_bpoly_init(Q); n_bpoly_init(R); n_bpoly_init(dg); @@ -1171,7 +1171,7 @@ static int _zassenhaus( const zassenhaus_prune_t zas, slong limit, n_tpoly_t F, - mp_limb_t malpha, + ulong malpha, const nmod_mat_t N, n_bpoly_struct * const * g, slong r, @@ -1348,7 +1348,7 @@ int n_bpoly_mod_factor_smprime( slong final_order, lift_order, lattice_order; slong * CLD; nmod_poly_t Aeval; - mp_limb_t alpha_best, alpha_tmp; + ulong alpha_best, alpha_tmp; nmod_poly_factor_t local_fac_best, local_fac_tmp; int local_fac_tries = 0; n_bpoly_t monicA; diff --git a/src/nmod_mpoly_factor/n_bpoly_mod_hlift.c b/src/nmod_mpoly_factor/n_bpoly_mod_hlift.c index 2b87672016..50c5c12621 100644 --- a/src/nmod_mpoly_factor/n_bpoly_mod_hlift.c +++ b/src/nmod_mpoly_factor/n_bpoly_mod_hlift.c @@ -17,7 +17,7 @@ int n_bpoly_mod_hlift2_cubic( n_bpoly_t A, /* clobbered (shifted by alpha) */ n_bpoly_t B0, n_bpoly_t B1, - mp_limb_t alpha, + ulong alpha, slong degree_inner, /* required degree in x */ nmod_t ctx, nmod_eval_interp_t E, @@ -201,7 +201,7 @@ int n_bpoly_mod_hlift2( n_bpoly_t A, /* clobbered (shifted by alpha) */ n_bpoly_t B0, n_bpoly_t B1, - mp_limb_t alpha, + ulong alpha, slong degree_inner, /* required degree in x */ nmod_t ctx, n_poly_bpoly_stack_t St) @@ -332,7 +332,7 @@ int n_bpoly_mod_hlift_cubic( slong r, n_bpoly_t A, /* clobbered (shifted by alpha) */ n_bpoly_struct * B, - mp_limb_t alpha, + ulong alpha, slong degree_inner, /* required degree in x */ nmod_t ctx, nmod_eval_interp_t E, @@ -575,7 +575,7 @@ int n_bpoly_mod_hlift( slong r, n_bpoly_t A, /* clobbered (shifted by alpha) */ n_bpoly_struct * B, - mp_limb_t alpha, + ulong alpha, slong degree_inner, /* required degree in x */ nmod_t ctx, n_poly_bpoly_stack_t St) diff --git a/src/nmod_mpoly_factor/n_bpoly_mod_pfrac.c b/src/nmod_mpoly_factor/n_bpoly_mod_pfrac.c index 2965ac65e4..f48691fc6e 100644 --- a/src/nmod_mpoly_factor/n_bpoly_mod_pfrac.c +++ b/src/nmod_mpoly_factor/n_bpoly_mod_pfrac.c @@ -23,7 +23,7 @@ int n_bpoly_mod_pfrac2( int success; slong A_deg1, B1_deg1, B2_deg1, C1_deg1, C2_deg1; slong bad_prime_count, bound; - mp_limb_t alpha, c; + ulong alpha, c; n_poly_t Aevalp, B1evalp, B2evalp, C1evalp, C2evalp; n_poly_t Aevalm, B1evalm, B2evalm, C1evalm, C2evalm; n_poly_t modulus, alphapow, t1, t2; @@ -224,7 +224,7 @@ int n_bpoly_mod_pfrac( { int success; slong i, j, bad_prime_count, bound; - mp_limb_t alpha, c; + ulong alpha, c; n_poly_struct Aevalp[1], * Bevalp, * Cevalp; n_poly_struct Aevalm[1], * Bevalm, * Cevalm; n_poly_t modulus, alphapow, t1, t2; diff --git a/src/nmod_mpoly_factor/n_poly_vec.c b/src/nmod_mpoly_factor/n_poly_vec.c index 88aa8f9c01..9f2d587947 100644 --- a/src/nmod_mpoly_factor/n_poly_vec.c +++ b/src/nmod_mpoly_factor/n_poly_vec.c @@ -28,7 +28,7 @@ slong _n_poly_vec_max_degree(const n_poly_struct * A, slong Alen) void _n_poly_vec_mul_nmod_intertible( n_poly_struct * A, slong Alen, - mp_limb_t c, + ulong c, nmod_t ctx) { slong i; diff --git a/src/nmod_mpoly_factor/nmod_mat_extras.c b/src/nmod_mpoly_factor/nmod_mat_extras.c index 665c1d7e92..3542a41525 100644 --- a/src/nmod_mpoly_factor/nmod_mat_extras.c +++ b/src/nmod_mpoly_factor/nmod_mat_extras.c @@ -83,7 +83,7 @@ void nmod_mat_init_nullspace_tr(nmod_mat_t X, nmod_mat_t tmp) { for (j = 0; j < rank; j++) { - mp_limb_t c = nmod_mat_entry(tmp, j, nonpivots[i]); + ulong c = nmod_mat_entry(tmp, j, nonpivots[i]); nmod_mat_entry(X, i, pivots[j]) = nmod_neg(c, tmp->mod); } diff --git a/src/nmod_mpoly_factor/polyu3_mod_hlift.c b/src/nmod_mpoly_factor/polyu3_mod_hlift.c index 9108252221..ba5694390f 100644 --- a/src/nmod_mpoly_factor/polyu3_mod_hlift.c +++ b/src/nmod_mpoly_factor/polyu3_mod_hlift.c @@ -42,7 +42,7 @@ static void n_polyu_sort_terms(n_polyu_t A) for (i = 1; i < A->length; i++) for (j = i; j > 0 && A->exps[j - 1] < A->exps[j]; j--) { - FLINT_SWAP(mp_limb_t, A->coeffs[j - 1], A->coeffs[j]); + FLINT_SWAP(ulong, A->coeffs[j - 1], A->coeffs[j]); FLINT_SWAP(ulong, A->exps[j - 1], A->exps[j]); } return; @@ -173,7 +173,7 @@ void n_polyu3_mod_interp_reduce_2sm_bpoly( slong i; slong cur0, cur1, e0, e1, e2; ulong tp0, tp1, tp2, tm0, tm1, tm2, p1, p0; - const mp_limb_t * Acoeffs = A->coeffs; + const ulong * Acoeffs = A->coeffs; const ulong * Aexps = A->exps; n_bpoly_zero(Ap); @@ -252,7 +252,7 @@ void n_polyu3n_mod_interp_lift_2sm_bpoly( n_polyun_t T, const n_bpoly_t A, const n_bpoly_t B, - mp_limb_t alpha, + ulong alpha, nmod_t mod) { slong lastlength = 0; @@ -263,8 +263,8 @@ void n_polyu3n_mod_interp_lift_2sm_bpoly( slong Ai, ai; n_poly_struct * Bcoeffs = B->coeffs; slong Bi, bi; - mp_limb_t u, v, Avalue, Bvalue; - mp_limb_t d0, d1; + ulong u, v, Avalue, Bvalue; + ulong d0, d1; FLINT_ASSERT(2*alpha < mod.n); @@ -401,9 +401,9 @@ int n_polyu3n_mod_interp_crt_2sm_bpoly( n_poly_struct * Bcoeffs = B->coeffs; slong Bi, bi; n_poly_struct * Fvalue; - mp_limb_t u, v, Avalue, Bvalue, FvalueA, FvalueB; + ulong u, v, Avalue, Bvalue, FvalueA, FvalueB; int texp_set, cmp; - mp_limb_t alpha = alphapow->coeffs[1]; + ulong alpha = alphapow->coeffs[1]; #if FLINT_WANT_ASSERT u = n_poly_mod_evaluate_nmod(modulus, alpha, mod); @@ -580,7 +580,7 @@ int n_polyu3_mod_hlift2( n_polyu_t A, n_polyu_t B0, n_polyu_t B1, - mp_limb_t beta, + ulong beta, slong degree_inner, /* required degree in x */ nmod_t ctx) { @@ -588,7 +588,7 @@ int n_polyu3_mod_hlift2( n_polyun_t T; n_bpoly_t Ap, Am, B0p, B0m, B1p, B1m; n_poly_t modulus, alphapow, t1, t2; - mp_limb_t alpha, c; + ulong alpha, c; slong ldegBB0, ldegBB1; slong Adegy, Adegz, Adegx; slong bad_primes_left; @@ -753,7 +753,7 @@ int n_polyu3_mod_hlift( n_polyun_struct * BB, n_polyu_t A, n_polyu_struct * B, - mp_limb_t beta, + ulong beta, slong degree_inner, /* required degree in x */ nmod_t ctx) { @@ -763,7 +763,7 @@ int n_polyu3_mod_hlift( n_bpoly_struct * Bp, * Bm; n_bpoly_t Ap, Am; n_poly_t modulus, alphapow, t1, t2; - mp_limb_t alpha, c; + ulong alpha, c; slong * BBdegZ; slong AdegY, AdegX, AdegZ; slong bad_primes_left; diff --git a/src/nmod_mpoly_factor/profile/p-factor.c b/src/nmod_mpoly_factor/profile/p-factor.c index 7ae5409889..595193ad9b 100644 --- a/src/nmod_mpoly_factor/profile/p-factor.c +++ b/src/nmod_mpoly_factor/profile/p-factor.c @@ -1184,7 +1184,7 @@ int main(int argc, char *argv[]) for (k = 0; k <= 4; k++) { - mp_limb_t ps[] = {2, 3, 11, 257, 43051}; + ulong ps[] = {2, 3, 11, 257, 43051}; flint_printf("\n------ 4 variables, characteristic %wu ------\n", ps[k]); total_time = 0; diff --git a/src/nmod_mpoly_factor/test/main.c b/src/nmod_mpoly_factor/test/main.c index 787540c8d8..b96f615578 100644 --- a/src/nmod_mpoly_factor/test/main.c +++ b/src/nmod_mpoly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/nmod_mpoly_factor/test/t-factor.c b/src/nmod_mpoly_factor/test/t-factor.c index e78e8927a8..ed37813617 100644 --- a/src/nmod_mpoly_factor/test/t-factor.c +++ b/src/nmod_mpoly_factor/test/t-factor.c @@ -149,7 +149,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor, state) nmod_mpoly_t a, t; slong nfacs, len; ulong expbound, powbound, pow, expbounds[2]; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); @@ -195,7 +195,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong expbound, powbound, pow; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-factor_content.c b/src/nmod_mpoly_factor/test/t-factor_content.c index 0b4711ecd4..28ba633a2c 100644 --- a/src/nmod_mpoly_factor/test/t-factor_content.c +++ b/src/nmod_mpoly_factor/test/t-factor_content.c @@ -98,7 +98,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_content, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong * expbounds; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-factor_squarefree.c b/src/nmod_mpoly_factor/test/t-factor_squarefree.c index 2052faeb06..3b70f16590 100644 --- a/src/nmod_mpoly_factor/test/t-factor_squarefree.c +++ b/src/nmod_mpoly_factor/test/t-factor_squarefree.c @@ -90,7 +90,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_squarefree, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong expbound, powbound, pow; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-factor_wang.c b/src/nmod_mpoly_factor/test/t-factor_wang.c index f0e0ea4090..bdf9736a98 100644 --- a/src/nmod_mpoly_factor/test/t-factor_wang.c +++ b/src/nmod_mpoly_factor/test/t-factor_wang.c @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_wang, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong expbound, powbound, pow; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-factor_zassenhaus.c b/src/nmod_mpoly_factor/test/t-factor_zassenhaus.c index 5e38650d2a..d489c0bc71 100644 --- a/src/nmod_mpoly_factor/test/t-factor_zassenhaus.c +++ b/src/nmod_mpoly_factor/test/t-factor_zassenhaus.c @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_zassenhaus, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong expbound, powbound, pow; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-factor_zippel.c b/src/nmod_mpoly_factor/test/t-factor_zippel.c index 8d5dd2024e..801b5a32e0 100644 --- a/src/nmod_mpoly_factor/test/t-factor_zippel.c +++ b/src/nmod_mpoly_factor/test/t-factor_zippel.c @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_zippel, state) nmod_mpoly_t a, t; slong n, nfacs, len; ulong expbound, powbound, pow; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 4 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/test/t-gcd_subresultant.c b/src/nmod_mpoly_factor/test/t-gcd_subresultant.c index 37ecf77d09..eaffa513fa 100644 --- a/src/nmod_mpoly_factor/test/t-gcd_subresultant.c +++ b/src/nmod_mpoly_factor/test/t-gcd_subresultant.c @@ -204,7 +204,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_gcd_subresultant, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; slong degbound; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_mpoly_factor/test/t-gcd_zippel.c b/src/nmod_mpoly_factor/test/t-gcd_zippel.c index 683ab25fbc..4fc9eb0282 100644 --- a/src/nmod_mpoly_factor/test/t-gcd_zippel.c +++ b/src/nmod_mpoly_factor/test/t-gcd_zippel.c @@ -422,7 +422,7 @@ TEST_FUNCTION_START(nmod_mpoly_factor_gcd_zippel, state) nmod_mpoly_t a, b, g, t; slong len, len1, len2; slong degbound; - mp_limb_t p; + ulong p; p = n_randint(state, (i % 2 == 0) ? 10 : FLINT_BITS - 1) + 1; p = n_randbits(state, p); diff --git a/src/nmod_mpoly_factor/zip_helpers.c b/src/nmod_mpoly_factor/zip_helpers.c index 95e6b8ae4e..2955a2d817 100644 --- a/src/nmod_mpoly_factor/zip_helpers.c +++ b/src/nmod_mpoly_factor/zip_helpers.c @@ -33,7 +33,7 @@ void mpoly_monomial_evals_nmod( const nmod_t fpctx) { slong i, k; - mp_limb_t * p; + ulong * p; ulong mask = (-UWORD(1)) >> (FLINT_BITS - Abits); slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; @@ -91,7 +91,7 @@ void mpoly1_monomial_evals_nmod( ulong mask = (-UWORD(1)) >> (FLINT_BITS - Abits); slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; - mp_limb_t * p; + ulong * p; TMP_INIT; FLINT_ASSERT(1 < m && m <= mctx->nvars); @@ -163,7 +163,7 @@ void mpoly2_monomial_evals_nmod( ulong mask = (-UWORD(1)) >> (FLINT_BITS - Abits); slong N = mpoly_words_per_exp_sp(Abits, mctx); slong * off, * shift; - mp_limb_t * p; + ulong * p; TMP_INIT; FLINT_ASSERT(2 < m && m <= mctx->nvars); @@ -281,7 +281,7 @@ int n_polyun_zip_solve( { int success; slong Ai, i, n; - mp_limb_t * Acoeffs = A->coeffs; + ulong * Acoeffs = A->coeffs; n_poly_t t; n_poly_init(t); diff --git a/src/nmod_poly.h b/src/nmod_poly.h index 47d62980db..1cb6fe7faf 100644 --- a/src/nmod_poly.h +++ b/src/nmod_poly.h @@ -34,8 +34,8 @@ extern "C" { typedef struct { - mp_limb_t res; - mp_limb_t lc; + ulong res; + ulong lc; slong len0; slong len1; slong off; @@ -62,37 +62,16 @@ typedef struct } nmod_poly_compose_mod_precomp_preinv_arg_t; -/* zn_poly helper functions ************************************************ - -Copyright (C) 2007, 2008 David Harvey - -*/ - -NMOD_POLY_INLINE -int signed_mpn_sub_n(mp_ptr res, mp_srcptr op1, mp_srcptr op2, slong n) -{ - if (mpn_cmp(op1, op2, n) >= 0) - { - mpn_sub_n(res, op1, op2, n); - return 0; - } - else - { - mpn_sub_n(res, op2, op1, n); - return 1; - } -} - /* Memory management ********************************************************/ -void nmod_poly_init(nmod_poly_t poly, mp_limb_t n); +void nmod_poly_init(nmod_poly_t poly, ulong n); -void nmod_poly_init_preinv(nmod_poly_t poly, mp_limb_t n, mp_limb_t ninv); +void nmod_poly_init_preinv(nmod_poly_t poly, ulong n, ulong ninv); -void nmod_poly_init2(nmod_poly_t poly, mp_limb_t n, slong alloc); +void nmod_poly_init2(nmod_poly_t poly, ulong n, slong alloc); void nmod_poly_init2_preinv(nmod_poly_t poly, - mp_limb_t n, mp_limb_t ninv, slong alloc); + ulong n, ulong ninv, slong alloc); void nmod_poly_realloc(nmod_poly_t poly, slong alloc); @@ -143,7 +122,7 @@ slong nmod_poly_degree(const nmod_poly_t poly) } NMOD_POLY_INLINE -mp_limb_t nmod_poly_modulus(const nmod_poly_t poly) +ulong nmod_poly_modulus(const nmod_poly_t poly) { return poly->mod.n; } @@ -151,7 +130,7 @@ mp_limb_t nmod_poly_modulus(const nmod_poly_t poly) flint_bitcnt_t nmod_poly_max_bits(const nmod_poly_t poly); NMOD_POLY_INLINE -mp_ptr nmod_poly_lead(const nmod_poly_t poly) +nn_ptr nmod_poly_lead(const nmod_poly_t poly) { if (poly->length) return poly->coeffs + (poly->length - 1); @@ -191,7 +170,7 @@ void nmod_poly_truncate(nmod_poly_t poly, slong len) nmod_poly_set_trunc(poly, poly, len); } -void _nmod_poly_reverse(mp_ptr output, mp_srcptr input, slong len, slong m); +void _nmod_poly_reverse(nn_ptr output, nn_srcptr input, slong len, slong m); void nmod_poly_reverse(nmod_poly_t output, const nmod_poly_t input, slong m); /* Comparison ***************************************************************/ @@ -301,18 +280,18 @@ int nmod_poly_read(nmod_poly_t poly); /* Shifting *****************************************************************/ -void _nmod_poly_shift_left(mp_ptr res, mp_srcptr poly, slong len, slong k); +void _nmod_poly_shift_left(nn_ptr res, nn_srcptr poly, slong len, slong k); void nmod_poly_shift_left(nmod_poly_t res, const nmod_poly_t poly, slong k); -void _nmod_poly_shift_right(mp_ptr res, mp_srcptr poly, slong len, slong k); +void _nmod_poly_shift_right(nn_ptr res, nn_srcptr poly, slong len, slong k); void nmod_poly_shift_right(nmod_poly_t res, const nmod_poly_t poly, slong k); /* Addition and subtraction *************************************************/ -void _nmod_poly_add(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_add(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_add(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); @@ -322,8 +301,8 @@ void nmod_poly_add_ui(nmod_poly_t res, const nmod_poly_t poly, ulong c); void nmod_poly_add_series(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong n); -void _nmod_poly_sub(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_sub(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_sub(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); @@ -338,59 +317,59 @@ void nmod_poly_neg(nmod_poly_t res, const nmod_poly_t poly1); /* Scalar multiplication and division ***************************************/ void nmod_poly_scalar_mul_nmod(nmod_poly_t res, - const nmod_poly_t poly, mp_limb_t c); + const nmod_poly_t poly, ulong c); void nmod_poly_scalar_addmul_nmod(nmod_poly_t res, const nmod_poly_t poly, ulong c); -void _nmod_poly_make_monic(mp_ptr output, - mp_srcptr input, slong len, nmod_t mod); +void _nmod_poly_make_monic(nn_ptr output, + nn_srcptr input, slong len, nmod_t mod); void nmod_poly_make_monic(nmod_poly_t output, const nmod_poly_t input); /* Bit packing and unpacking aand reduction **********************************/ -void _nmod_poly_KS2_pack1(mp_ptr res, mp_srcptr op, slong n, slong s, +void _nmod_poly_KS2_pack1(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r); -void _nmod_poly_KS2_pack(mp_ptr res, mp_srcptr op, slong n, slong s, +void _nmod_poly_KS2_pack(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r); -void _nmod_poly_KS2_unpack1(mp_ptr res, mp_srcptr op, slong n, ulong b, +void _nmod_poly_KS2_unpack1(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k); -void _nmod_poly_KS2_unpack2(mp_ptr res, mp_srcptr op, slong n, ulong b, +void _nmod_poly_KS2_unpack2(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k); -void _nmod_poly_KS2_unpack3(mp_ptr res, mp_srcptr op, slong n, ulong b, +void _nmod_poly_KS2_unpack3(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k); -void _nmod_poly_KS2_unpack(mp_ptr res, mp_srcptr op, slong n, ulong b, +void _nmod_poly_KS2_unpack(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k); -void _nmod_poly_KS2_reduce(mp_ptr res, slong s, mp_srcptr op, +void _nmod_poly_KS2_reduce(nn_ptr res, slong s, nn_srcptr op, slong n, ulong w, nmod_t mod); -void _nmod_poly_KS2_recover_reduce1(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, nmod_t mod); +void _nmod_poly_KS2_recover_reduce1(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod); -void _nmod_poly_KS2_recover_reduce2(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, nmod_t mod); +void _nmod_poly_KS2_recover_reduce2(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod); -void _nmod_poly_KS2_recover_reduce2b(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong FLINT_UNUSED(b), nmod_t mod); +void _nmod_poly_KS2_recover_reduce2b(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong FLINT_UNUSED(b), nmod_t mod); -void _nmod_poly_KS2_recover_reduce3(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, nmod_t mod); +void _nmod_poly_KS2_recover_reduce3(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod); -void _nmod_poly_KS2_recover_reduce(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, nmod_t mod); +void _nmod_poly_KS2_recover_reduce(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod); -void _nmod_poly_bit_pack(mp_ptr res, mp_srcptr poly, +void _nmod_poly_bit_pack(nn_ptr res, nn_srcptr poly, slong len, flint_bitcnt_t bits); -void _nmod_poly_bit_unpack(mp_ptr res, slong len, - mp_srcptr mpn, flint_bitcnt_t bits, nmod_t mod); +void _nmod_poly_bit_unpack(nn_ptr res, slong len, + nn_srcptr mpn, flint_bitcnt_t bits, nmod_t mod); void nmod_poly_bit_pack(fmpz_t f, const nmod_poly_t poly, flint_bitcnt_t bit_size); @@ -399,106 +378,106 @@ void nmod_poly_bit_unpack(nmod_poly_t poly, const fmpz_t f, flint_bitcnt_t bit_s /* Multiplication ***********************************************************/ -void _nmod_poly_mul_classical(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_mul_classical(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_mul_classical(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); -void _nmod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong trunc, nmod_t mod); +void _nmod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong trunc, nmod_t mod); void nmod_poly_mullow_classical(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong trunc); -void _nmod_poly_mulhigh_classical(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong start, nmod_t mod); +void _nmod_poly_mulhigh_classical(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong start, nmod_t mod); void nmod_poly_mulhigh_classical(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong start); -void _nmod_poly_mul_KS(mp_ptr out, mp_srcptr in1, slong len1, - mp_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod); +void _nmod_poly_mul_KS(nn_ptr out, nn_srcptr in1, slong len1, + nn_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod); void nmod_poly_mul_KS(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, flint_bitcnt_t bits); -void _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, - mp_srcptr op2, slong n2, nmod_t mod); +void _nmod_poly_mul_KS2(nn_ptr res, nn_srcptr op1, slong n1, + nn_srcptr op2, slong n2, nmod_t mod); void nmod_poly_mul_KS2(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); -void _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, - mp_srcptr op2, slong n2, nmod_t mod); +void _nmod_poly_mul_KS4(nn_ptr res, nn_srcptr op1, slong n1, + nn_srcptr op2, slong n2, nmod_t mod); void nmod_poly_mul_KS4(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); -void _nmod_poly_mullow_KS(mp_ptr out, mp_srcptr in1, slong len1, - mp_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod); +void _nmod_poly_mullow_KS(nn_ptr out, nn_srcptr in1, slong len1, + nn_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod); void nmod_poly_mullow_KS(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, flint_bitcnt_t bits, slong n); -void _nmod_poly_mul(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_mul(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_mul(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); -void _nmod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong trunc, nmod_t mod); +void _nmod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong trunc, nmod_t mod); void nmod_poly_mullow(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong trunc); -void _nmod_poly_mulhigh(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod); +void _nmod_poly_mulhigh(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod); void nmod_poly_mulhigh(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong n); -void _nmod_poly_mulmod(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, mp_srcptr f, +void _nmod_poly_mulmod(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nn_srcptr f, slong lenf, nmod_t mod); void nmod_poly_mulmod(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, const nmod_poly_t f); -void _nmod_poly_mulmod_preinv(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, mp_srcptr f, - slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod); +void _nmod_poly_mulmod_preinv(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nn_srcptr f, + slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod); void nmod_poly_mulmod_preinv(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, const nmod_poly_t f, const nmod_poly_t finv); -int _nmod_poly_invmod(mp_limb_t *A, - const mp_limb_t *B, slong lenB, - const mp_limb_t *P, slong lenP, const nmod_t mod); +int _nmod_poly_invmod(ulong *A, + const ulong *B, slong lenB, + const ulong *P, slong lenP, const nmod_t mod); int nmod_poly_invmod(nmod_poly_t A, const nmod_poly_t B, const nmod_poly_t P); /* Powering *****************************************************************/ -void _nmod_poly_pow_binexp(mp_ptr res, - mp_srcptr poly, slong len, ulong e, nmod_t mod); +void _nmod_poly_pow_binexp(nn_ptr res, + nn_srcptr poly, slong len, ulong e, nmod_t mod); void nmod_poly_pow_binexp(nmod_poly_t res, const nmod_poly_t poly, ulong e); -void _nmod_poly_pow(mp_ptr res, mp_srcptr poly, slong len, ulong e, nmod_t mod); +void _nmod_poly_pow(nn_ptr res, nn_srcptr poly, slong len, ulong e, nmod_t mod); void nmod_poly_pow(nmod_poly_t res, const nmod_poly_t poly, ulong e); -void _nmod_poly_pow_trunc_binexp(mp_ptr res, mp_srcptr poly, +void _nmod_poly_pow_trunc_binexp(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod); void nmod_poly_pow_trunc_binexp(nmod_poly_t res, const nmod_poly_t poly, ulong e, slong trunc); -void _nmod_poly_pow_trunc(mp_ptr res, mp_srcptr poly, +void _nmod_poly_pow_trunc(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod); void nmod_poly_pow_trunc(nmod_poly_t res, @@ -508,199 +487,199 @@ void nmod_poly_powmod_ui_binexp(nmod_poly_t res, const nmod_poly_t poly, ulong e, const nmod_poly_t f); -void _nmod_poly_powmod_ui_binexp(mp_ptr res, mp_srcptr poly, - ulong e, mp_srcptr f, slong lenf, nmod_t mod); +void _nmod_poly_powmod_ui_binexp(nn_ptr res, nn_srcptr poly, + ulong e, nn_srcptr f, slong lenf, nmod_t mod); void nmod_poly_powmod_fmpz_binexp(nmod_poly_t res, const nmod_poly_t poly, fmpz_t e, const nmod_poly_t f); -void _nmod_poly_powmod_fmpz_binexp(mp_ptr res, mp_srcptr poly, - fmpz_t e, mp_srcptr f, slong lenf, nmod_t mod); +void _nmod_poly_powmod_fmpz_binexp(nn_ptr res, nn_srcptr poly, + fmpz_t e, nn_srcptr f, slong lenf, nmod_t mod); -void _nmod_poly_powmod_ui_binexp_preinv (mp_ptr res, mp_srcptr poly, - ulong e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod); +void _nmod_poly_powmod_ui_binexp_preinv (nn_ptr res, nn_srcptr poly, + ulong e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod); void nmod_poly_powmod_ui_binexp_preinv(nmod_poly_t res, const nmod_poly_t poly, ulong e, const nmod_poly_t f, const nmod_poly_t finv); -void _nmod_poly_powmod_fmpz_binexp_preinv (mp_ptr res, mp_srcptr poly, - fmpz_t e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod); +void _nmod_poly_powmod_fmpz_binexp_preinv (nn_ptr res, nn_srcptr poly, + fmpz_t e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod); void nmod_poly_powmod_fmpz_binexp_preinv(nmod_poly_t res, const nmod_poly_t poly, fmpz_t e, const nmod_poly_t f, const nmod_poly_t finv); -void _nmod_poly_powmod_x_ui_preinv (mp_ptr res, ulong e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod); +void _nmod_poly_powmod_x_ui_preinv (nn_ptr res, ulong e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod); void nmod_poly_powmod_x_ui_preinv(nmod_poly_t res, ulong e, const nmod_poly_t f, const nmod_poly_t finv); -void _nmod_poly_powmod_x_fmpz_preinv (mp_ptr res, fmpz_t e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod); +void _nmod_poly_powmod_x_fmpz_preinv (nn_ptr res, fmpz_t e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod); void nmod_poly_powmod_x_fmpz_preinv(nmod_poly_t res, fmpz_t e, const nmod_poly_t f, const nmod_poly_t finv); -void _nmod_poly_powers_mod_preinv_naive(mp_ptr * res, mp_srcptr f, - slong flen, slong n, mp_srcptr g, slong glen, mp_srcptr ginv, +void _nmod_poly_powers_mod_preinv_naive(nn_ptr * res, nn_srcptr f, + slong flen, slong n, nn_srcptr g, slong glen, nn_srcptr ginv, slong ginvlen, const nmod_t mod); void nmod_poly_powers_mod_naive(nmod_poly_struct * res, const nmod_poly_t f, slong n, const nmod_poly_t g); -void _nmod_poly_powers_mod_preinv_threaded_pool(mp_ptr * res, - mp_srcptr f, slong flen, slong n, mp_srcptr g, slong glen, - mp_srcptr ginv, slong ginvlen, const nmod_t mod, +void _nmod_poly_powers_mod_preinv_threaded_pool(nn_ptr * res, + nn_srcptr f, slong flen, slong n, nn_srcptr g, slong glen, + nn_srcptr ginv, slong ginvlen, const nmod_t mod, thread_pool_handle * threads, slong num_threads); void -_nmod_poly_powers_mod_preinv_threaded(mp_ptr * res, mp_srcptr f, - slong flen, slong n, mp_srcptr g, slong glen, - mp_srcptr ginv, slong ginvlen, const nmod_t mod); +_nmod_poly_powers_mod_preinv_threaded(nn_ptr * res, nn_srcptr f, + slong flen, slong n, nn_srcptr g, slong glen, + nn_srcptr ginv, slong ginvlen, const nmod_t mod); void nmod_poly_powers_mod_bsgs(nmod_poly_struct * res, const nmod_poly_t f, slong n, const nmod_poly_t g); /* Division *****************************************************************/ -void _nmod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong A_len, mp_srcptr B, slong B_len, mp_limb_t invB, nmod_t mod); +void _nmod_poly_divrem_basecase_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong A_len, nn_srcptr B, slong B_len, ulong invB, nmod_t mod); -void _nmod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong A_len, mp_srcptr B, slong B_len, nmod_t mod); +void _nmod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong A_len, nn_srcptr B, slong B_len, nmod_t mod); void nmod_poly_divrem_basecase(nmod_poly_t Q, nmod_poly_t R, const nmod_poly_t A, const nmod_poly_t B); -void _nmod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +void _nmod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_divrem(nmod_poly_t Q, nmod_poly_t R, const nmod_poly_t A, const nmod_poly_t B); -void _nmod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +void _nmod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_div(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B); -void _nmod_poly_rem(mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +void _nmod_poly_rem(nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_rem(nmod_poly_t R, const nmod_poly_t A, const nmod_poly_t B); -void _nmod_poly_divexact(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +void _nmod_poly_divexact(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_divexact(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B); -void _nmod_poly_inv_series_basecase(mp_ptr Qinv, - mp_srcptr Q, slong Qlen, slong n, nmod_t mod); +void _nmod_poly_inv_series_basecase(nn_ptr Qinv, + nn_srcptr Q, slong Qlen, slong n, nmod_t mod); void nmod_poly_inv_series_basecase(nmod_poly_t Qinv, const nmod_poly_t Q, slong n); -void _nmod_poly_inv_series_newton(mp_ptr Qinv, - mp_srcptr Q, slong Qlen, slong n, nmod_t mod); +void _nmod_poly_inv_series_newton(nn_ptr Qinv, + nn_srcptr Q, slong Qlen, slong n, nmod_t mod); void nmod_poly_inv_series_newton(nmod_poly_t Qinv, const nmod_poly_t Q, slong n); -void _nmod_poly_inv_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod); +void _nmod_poly_inv_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod); void nmod_poly_inv_series(nmod_poly_t Qinv, const nmod_poly_t Q, slong n); -void _nmod_poly_div_series_basecase(mp_ptr Q, mp_srcptr A, - slong Alen, mp_srcptr B, slong Blen, slong n, nmod_t mod); +void _nmod_poly_div_series_basecase(nn_ptr Q, nn_srcptr A, + slong Alen, nn_srcptr B, slong Blen, slong n, nmod_t mod); void nmod_poly_div_series_basecase(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B, slong n); -void _nmod_poly_div_series(mp_ptr Q, mp_srcptr A, slong Alen, - mp_srcptr B, slong Blen, slong n, nmod_t mod); +void _nmod_poly_div_series(nn_ptr Q, nn_srcptr A, slong Alen, + nn_srcptr B, slong Blen, slong n, nmod_t mod); void nmod_poly_div_series(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B, slong n); -void _nmod_poly_div_newton_n_preinv(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr FLINT_UNUSED(B), slong lenB, mp_srcptr Binv, +void _nmod_poly_div_newton_n_preinv(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr FLINT_UNUSED(B), slong lenB, nn_srcptr Binv, slong lenBinv, nmod_t mod); void nmod_poly_div_newton_n_preinv (nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B, const nmod_poly_t Binv); -void _nmod_poly_divrem_newton_n_preinv (mp_ptr Q, - mp_ptr R, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_srcptr Binv, slong lenBinv, nmod_t mod); +void _nmod_poly_divrem_newton_n_preinv (nn_ptr Q, + nn_ptr R, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + nn_srcptr Binv, slong lenBinv, nmod_t mod); void nmod_poly_divrem_newton_n_preinv(nmod_poly_t Q, nmod_poly_t R, const nmod_poly_t A, const nmod_poly_t B, const nmod_poly_t Binv); -mp_limb_t _nmod_poly_div_root(mp_ptr Q, - mp_srcptr A, slong len, mp_limb_t c, nmod_t mod); +ulong _nmod_poly_div_root(nn_ptr Q, + nn_srcptr A, slong len, ulong c, nmod_t mod); -mp_limb_t nmod_poly_div_root(nmod_poly_t Q, - const nmod_poly_t A, mp_limb_t c); +ulong nmod_poly_div_root(nmod_poly_t Q, + const nmod_poly_t A, ulong c); /* Divisibility testing *****************************************************/ -int _nmod_poly_divides_classical(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod); +int _nmod_poly_divides_classical(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod); int nmod_poly_divides_classical(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B); -int _nmod_poly_divides(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod); +int _nmod_poly_divides(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod); int nmod_poly_divides(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B); ulong nmod_poly_remove(nmod_poly_t f, const nmod_poly_t p); /* Derivative ***************************************************************/ -void _nmod_poly_derivative(mp_ptr x_prime, - mp_srcptr x, slong len, nmod_t mod); +void _nmod_poly_derivative(nn_ptr x_prime, + nn_srcptr x, slong len, nmod_t mod); void nmod_poly_derivative(nmod_poly_t x_prime, const nmod_poly_t x); -void _nmod_poly_integral(mp_ptr - x_int, mp_srcptr x, slong len, nmod_t mod); +void _nmod_poly_integral(nn_ptr + x_int, nn_srcptr x, slong len, nmod_t mod); void nmod_poly_integral(nmod_poly_t x_int, const nmod_poly_t x); /* Evaluation ***************************************************************/ -mp_limb_t _nmod_poly_evaluate_nmod(mp_srcptr poly, - slong len, mp_limb_t c, nmod_t mod); +ulong _nmod_poly_evaluate_nmod(nn_srcptr poly, + slong len, ulong c, nmod_t mod); -mp_limb_t nmod_poly_evaluate_nmod(const nmod_poly_t poly, - mp_limb_t c); +ulong nmod_poly_evaluate_nmod(const nmod_poly_t poly, + ulong c); -void _nmod_poly_evaluate_nmod_vec(mp_ptr ys, - mp_srcptr coeffs, slong len, mp_srcptr xs, slong n, nmod_t mod); +void _nmod_poly_evaluate_nmod_vec(nn_ptr ys, + nn_srcptr coeffs, slong len, nn_srcptr xs, slong n, nmod_t mod); -void nmod_poly_evaluate_nmod_vec(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n); +void nmod_poly_evaluate_nmod_vec(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n); -void _nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, - mp_srcptr coeffs, slong len, mp_srcptr xs, slong n, nmod_t mod); +void _nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, + nn_srcptr coeffs, slong len, nn_srcptr xs, slong n, nmod_t mod); -void nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n); +void nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n); -void _nmod_poly_evaluate_nmod_vec_fast_precomp(mp_ptr vs, - mp_srcptr poly, slong plen, const mp_ptr * tree, slong len, nmod_t mod); +void _nmod_poly_evaluate_nmod_vec_fast_precomp(nn_ptr vs, + nn_srcptr poly, slong plen, const nn_ptr * tree, slong len, nmod_t mod); -void _nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, - mp_srcptr coeffs, slong len, mp_srcptr xs, slong n, nmod_t mod); +void _nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, + nn_srcptr coeffs, slong len, nn_srcptr xs, slong n, nmod_t mod); -void nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n); +void nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n); void nmod_mat_one_addmul(nmod_mat_t dest, - const nmod_mat_t mat, mp_limb_t c); + const nmod_mat_t mat, ulong c); void nmod_poly_evaluate_mat_horner(nmod_mat_t dest, const nmod_poly_t poly, const nmod_mat_t c); @@ -724,55 +703,55 @@ void nmod_poly_evaluate_mat(nmod_mat_t dest, /* Subproduct tree **********************************************************/ -mp_ptr * _nmod_poly_tree_alloc(slong len); +nn_ptr * _nmod_poly_tree_alloc(slong len); -void _nmod_poly_tree_free(mp_ptr * tree, slong len); +void _nmod_poly_tree_free(nn_ptr * tree, slong len); -void _nmod_poly_tree_build(mp_ptr * tree, mp_srcptr roots, +void _nmod_poly_tree_build(nn_ptr * tree, nn_srcptr roots, slong len, nmod_t mod); /* Interpolation ************************************************************/ -void _nmod_poly_interpolate_nmod_vec_newton(mp_ptr poly, mp_srcptr xs, - mp_srcptr ys, slong n, nmod_t mod); +void _nmod_poly_interpolate_nmod_vec_newton(nn_ptr poly, nn_srcptr xs, + nn_srcptr ys, slong n, nmod_t mod); void nmod_poly_interpolate_nmod_vec_newton(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n); + nn_srcptr xs, nn_srcptr ys, slong n); -void _nmod_poly_interpolate_nmod_vec_barycentric(mp_ptr poly, mp_srcptr xs, - mp_srcptr ys, slong n, nmod_t mod); +void _nmod_poly_interpolate_nmod_vec_barycentric(nn_ptr poly, nn_srcptr xs, + nn_srcptr ys, slong n, nmod_t mod); void nmod_poly_interpolate_nmod_vec_barycentric(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n); + nn_srcptr xs, nn_srcptr ys, slong n); -void _nmod_poly_interpolate_nmod_vec(mp_ptr poly, mp_srcptr xs, - mp_srcptr ys, slong n, nmod_t mod); +void _nmod_poly_interpolate_nmod_vec(nn_ptr poly, nn_srcptr xs, + nn_srcptr ys, slong n, nmod_t mod); void nmod_poly_interpolate_nmod_vec(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n); + nn_srcptr xs, nn_srcptr ys, slong n); void nmod_poly_interpolate_nmod_vec_fast(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n); + nn_srcptr xs, nn_srcptr ys, slong n); -void _nmod_poly_interpolate_nmod_vec_fast(mp_ptr poly, - mp_srcptr xs, mp_srcptr ys, slong len, nmod_t mod); +void _nmod_poly_interpolate_nmod_vec_fast(nn_ptr poly, + nn_srcptr xs, nn_srcptr ys, slong len, nmod_t mod); -void _nmod_poly_interpolate_nmod_vec_fast_precomp(mp_ptr poly, mp_srcptr ys, - const mp_ptr * tree, mp_srcptr weights, slong len, nmod_t mod); +void _nmod_poly_interpolate_nmod_vec_fast_precomp(nn_ptr poly, nn_srcptr ys, + const nn_ptr * tree, nn_srcptr weights, slong len, nmod_t mod); -void _nmod_poly_interpolation_weights(mp_ptr w, const mp_ptr * tree, +void _nmod_poly_interpolation_weights(nn_ptr w, const nn_ptr * tree, slong len, nmod_t mod); /* Composition **************************************************************/ -void _nmod_poly_compose_horner(mp_ptr res, mp_srcptr poly1, - slong len1, mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_compose_horner(nn_ptr res, nn_srcptr poly1, + slong len1, nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_compose_horner(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); -void _nmod_poly_compose(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +void _nmod_poly_compose(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_compose(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2); @@ -783,26 +762,26 @@ void nmod_poly_compose(nmod_poly_t res, /* Taylor shift *************************************************************/ -void _nmod_poly_taylor_shift_horner(mp_ptr poly, mp_limb_t c, +void _nmod_poly_taylor_shift_horner(nn_ptr poly, ulong c, slong len, nmod_t mod); void nmod_poly_taylor_shift_horner(nmod_poly_t g, - const nmod_poly_t f, mp_limb_t c); + const nmod_poly_t f, ulong c); -void _nmod_poly_taylor_shift_convolution(mp_ptr poly, mp_limb_t c, +void _nmod_poly_taylor_shift_convolution(nn_ptr poly, ulong c, slong len, nmod_t mod); void nmod_poly_taylor_shift_convolution(nmod_poly_t g, - const nmod_poly_t f, mp_limb_t c); + const nmod_poly_t f, ulong c); -void _nmod_poly_taylor_shift(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod); +void _nmod_poly_taylor_shift(nn_ptr poly, ulong c, slong len, nmod_t mod); -void nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c); +void nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, ulong c); /* Modular composition ******************************************************/ -void _nmod_poly_compose_mod_brent_kung(mp_ptr res, mp_srcptr f, slong lenf, - mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod); +void _nmod_poly_compose_mod_brent_kung(nn_ptr res, nn_srcptr f, slong lenf, + nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod); void nmod_poly_compose_mod_brent_kung(nmod_poly_t res, const nmod_poly_t f, const nmod_poly_t g, @@ -811,17 +790,17 @@ void nmod_poly_compose_mod_brent_kung(nmod_poly_t res, void _nmod_poly_reduce_matrix_mod_poly(nmod_mat_t A, const nmod_mat_t B, const nmod_poly_t f); -void _nmod_poly_precompute_matrix(nmod_mat_t A, mp_srcptr poly1, mp_srcptr poly2, - slong len2, mp_srcptr poly2inv, slong len2inv, nmod_t mod); +void _nmod_poly_precompute_matrix(nmod_mat_t A, nn_srcptr poly1, nn_srcptr poly2, + slong len2, nn_srcptr poly2inv, slong len2inv, nmod_t mod); void _nmod_poly_precompute_matrix_worker(void * arg_ptr); void nmod_poly_precompute_matrix(nmod_mat_t A, const nmod_poly_t poly1, const nmod_poly_t poly2, const nmod_poly_t poly2inv); -void _nmod_poly_compose_mod_brent_kung_precomp_preinv(mp_ptr res, mp_srcptr poly1, - slong len1, const nmod_mat_t A, mp_srcptr poly3, - slong len3, mp_srcptr poly3inv, slong len3inv, +void _nmod_poly_compose_mod_brent_kung_precomp_preinv(nn_ptr res, nn_srcptr poly1, + slong len1, const nmod_mat_t A, nn_srcptr poly3, + slong len3, nn_srcptr poly3inv, slong len3inv, nmod_t mod); void _nmod_poly_compose_mod_brent_kung_precomp_preinv_worker(void * arg_ptr); @@ -830,9 +809,9 @@ void nmod_poly_compose_mod_brent_kung_precomp_preinv(nmod_poly_t res, const nmod_poly_t poly1, const nmod_mat_t A, const nmod_poly_t poly3, const nmod_poly_t poly3inv); -void _nmod_poly_compose_mod_brent_kung_preinv(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, mp_srcptr poly3, slong len3, - mp_srcptr poly3inv, slong len3inv, nmod_t mod); +void _nmod_poly_compose_mod_brent_kung_preinv(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, nn_srcptr poly3, slong len3, + nn_srcptr poly3inv, slong len3inv, nmod_t mod); void nmod_poly_compose_mod_brent_kung_preinv(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, @@ -841,8 +820,8 @@ void nmod_poly_compose_mod_brent_kung_preinv(nmod_poly_t res, void _nmod_poly_compose_mod_brent_kung_vec_preinv(nmod_poly_struct * res, const nmod_poly_struct * polys, slong FLINT_UNUSED(lenpolys), slong l, - mp_srcptr g, slong glen, mp_srcptr poly, slong len, - mp_srcptr polyinv, slong leninv, nmod_t mod); + nn_srcptr g, slong glen, nn_srcptr poly, slong len, + nn_srcptr polyinv, slong leninv, nmod_t mod); void nmod_poly_compose_mod_brent_kung_vec_preinv(nmod_poly_struct * res, const nmod_poly_struct * polys, slong len1, slong n, @@ -862,9 +841,9 @@ void _nmod_poly_compose_mod_brent_kung_vec_preinv_threaded_pool( nmod_poly_struct * res, const nmod_poly_struct * polys, slong FLINT_UNUSED(lenpolys), slong l, - mp_srcptr g, slong glen, - mp_srcptr poly, slong len, - mp_srcptr polyinv, slong leninv, + nn_srcptr g, slong glen, + nn_srcptr poly, slong len, + nn_srcptr polyinv, slong leninv, nmod_t mod, thread_pool_handle * threads, slong num_threads); @@ -876,16 +855,16 @@ void nmod_poly_compose_mod_brent_kung_vec_preinv_threaded(nmod_poly_struct * res const nmod_poly_t poly, const nmod_poly_t polyinv); -void _nmod_poly_compose_mod_horner(mp_ptr res, - mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod); +void _nmod_poly_compose_mod_horner(nn_ptr res, + nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod); void nmod_poly_compose_mod_horner(nmod_poly_t res, const nmod_poly_t f, const nmod_poly_t g, const nmod_poly_t h); -void _nmod_poly_compose_mod(mp_ptr res, mp_srcptr f, slong lenf, - mp_srcptr g, - mp_srcptr h, slong lenh, nmod_t mod); +void _nmod_poly_compose_mod(nn_ptr res, nn_srcptr f, slong lenf, + nn_srcptr g, + nn_srcptr h, slong lenh, nmod_t mod); void nmod_poly_compose_mod(nmod_poly_t res, const nmod_poly_t f, const nmod_poly_t g, @@ -893,18 +872,18 @@ void nmod_poly_compose_mod(nmod_poly_t res, /* Power series composition and reversion ************************************/ -void _nmod_poly_compose_series(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod); +void _nmod_poly_compose_series(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod); void nmod_poly_compose_series(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, slong n); -void _nmod_poly_revert_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod); +void _nmod_poly_revert_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod); void nmod_poly_revert_series(nmod_poly_t Qinv, const nmod_poly_t Q, slong n); /* norms *********************************************************************/ -NMOD_POLY_INLINE slong _nmod_poly_hamming_weight(mp_srcptr a, slong len) +NMOD_POLY_INLINE slong _nmod_poly_hamming_weight(nn_srcptr a, slong len) { slong i, sum = 0; for (i = 0; i < len; i++) @@ -920,20 +899,20 @@ NMOD_POLY_INLINE slong nmod_poly_hamming_weight(const nmod_poly_t A) /* Greatest common divisor **************************************************/ -slong _nmod_poly_gcd_euclidean(mp_ptr G, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod); +slong _nmod_poly_gcd_euclidean(nn_ptr G, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_gcd_euclidean(nmod_poly_t G, const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_hgcd_recursive(mp_ptr *M, slong *lenM, - mp_ptr A, slong *lenA, mp_ptr B, slong *lenB, - mp_srcptr a, slong lena, mp_srcptr b, slong lenb, - mp_ptr P, nmod_t mod, int flag, nmod_poly_res_t res); +slong _nmod_poly_hgcd_recursive(nn_ptr *M, slong *lenM, + nn_ptr A, slong *lenA, nn_ptr B, slong *lenB, + nn_srcptr a, slong lena, nn_srcptr b, slong lenb, + nn_ptr P, nmod_t mod, int flag, nmod_poly_res_t res); -slong _nmod_poly_hgcd(mp_ptr *M, slong *lenM, - mp_ptr A, slong *lenA, mp_ptr B, slong *lenB, - mp_srcptr a, slong lena, mp_srcptr b, slong lenb, +slong _nmod_poly_hgcd(nn_ptr *M, slong *lenM, + nn_ptr A, slong *lenA, nn_ptr B, slong *lenB, + nn_srcptr a, slong lena, nn_srcptr b, slong lenb, nmod_t mod); slong nmod_poly_hgcd_ref( @@ -944,54 +923,54 @@ slong nmod_poly_hgcd( nmod_poly_t m11, nmod_poly_t m12, nmod_poly_t m21, nmod_poly_t m22, nmod_poly_t A, nmod_poly_t B, const nmod_poly_t a, const nmod_poly_t b); -slong _nmod_poly_gcd_hgcd(mp_ptr G, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +slong _nmod_poly_gcd_hgcd(nn_ptr G, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_gcd_hgcd(nmod_poly_t G, const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_gcd(mp_ptr G, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +slong _nmod_poly_gcd(nn_ptr G, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_gcd(nmod_poly_t G, const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_xgcd_euclidean(mp_ptr res, mp_ptr s, mp_ptr t, - mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, nmod_t mod); +slong _nmod_poly_xgcd_euclidean(nn_ptr res, nn_ptr s, nn_ptr t, + nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod); void nmod_poly_xgcd_euclidean(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_xgcd_hgcd(mp_ptr G, mp_ptr S, mp_ptr T, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, +slong _nmod_poly_xgcd_hgcd(nn_ptr G, nn_ptr S, nn_ptr T, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_xgcd_hgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_xgcd(mp_ptr G, mp_ptr S, mp_ptr T, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod); +slong _nmod_poly_xgcd(nn_ptr G, nn_ptr S, nn_ptr T, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod); void nmod_poly_xgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, const nmod_poly_t A, const nmod_poly_t B); -mp_limb_t _nmod_poly_resultant_euclidean(mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod); +ulong _nmod_poly_resultant_euclidean(nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod); -mp_limb_t nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g); +ulong nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g); -mp_limb_t _nmod_poly_resultant_hgcd(mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +ulong _nmod_poly_resultant_hgcd(nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); -mp_limb_t nmod_poly_resultant_hgcd(const nmod_poly_t A, const nmod_poly_t B); +ulong nmod_poly_resultant_hgcd(const nmod_poly_t A, const nmod_poly_t B); -mp_limb_t _nmod_poly_resultant(mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod); +ulong _nmod_poly_resultant(nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod); -mp_limb_t nmod_poly_resultant(const nmod_poly_t A, const nmod_poly_t B); +ulong nmod_poly_resultant(const nmod_poly_t A, const nmod_poly_t B); -slong _nmod_poly_gcdinv(mp_limb_t *G, mp_limb_t *S, - const mp_limb_t *A, slong lenA, - const mp_limb_t *B, slong lenB, +slong _nmod_poly_gcdinv(ulong *G, ulong *S, + const ulong *A, slong lenA, + const ulong *B, slong lenB, const nmod_t mod); void nmod_poly_gcdinv(nmod_poly_t G, nmod_poly_t S, @@ -999,108 +978,108 @@ void nmod_poly_gcdinv(nmod_poly_t G, nmod_poly_t S, /* Discriminant **************************************************************/ -mp_limb_t _nmod_poly_discriminant(mp_srcptr poly, slong len, nmod_t mod); +ulong _nmod_poly_discriminant(nn_srcptr poly, slong len, nmod_t mod); -mp_limb_t nmod_poly_discriminant(const nmod_poly_t f); +ulong nmod_poly_discriminant(const nmod_poly_t f); /* Square roots **************************************************************/ -void _nmod_poly_invsqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_invsqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_invsqrt_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_sqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_sqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_sqrt_series(nmod_poly_t g, const nmod_poly_t h, slong n); -int _nmod_poly_sqrt(mp_ptr s, mp_srcptr p, slong len, nmod_t mod); +int _nmod_poly_sqrt(nn_ptr s, nn_srcptr p, slong len, nmod_t mod); int nmod_poly_sqrt(nmod_poly_t b, const nmod_poly_t a); /* Power sums ****************************************************************/ -void _nmod_poly_power_sums_naive(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod); +void _nmod_poly_power_sums_naive(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod); void nmod_poly_power_sums_naive(nmod_poly_t res, const nmod_poly_t poly, slong n); -void _nmod_poly_power_sums_schoenhage(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod); +void _nmod_poly_power_sums_schoenhage(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod); void nmod_poly_power_sums_schoenhage(nmod_poly_t res, const nmod_poly_t poly, slong n); -void _nmod_poly_power_sums(mp_ptr res, mp_srcptr poly, slong len, slong n, nmod_t mod); +void _nmod_poly_power_sums(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod); void nmod_poly_power_sums(nmod_poly_t res, const nmod_poly_t poly, slong n); -void _nmod_poly_power_sums_to_poly_naive(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod); +void _nmod_poly_power_sums_to_poly_naive(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod); void nmod_poly_power_sums_to_poly_naive(nmod_poly_t res, const nmod_poly_t Q); -void _nmod_poly_power_sums_to_poly_schoenhage(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod); +void _nmod_poly_power_sums_to_poly_schoenhage(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod); void nmod_poly_power_sums_to_poly_schoenhage(nmod_poly_t res, const nmod_poly_t Q); -void _nmod_poly_power_sums_to_poly(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod); +void _nmod_poly_power_sums_to_poly(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod); void nmod_poly_power_sums_to_poly(nmod_poly_t res, const nmod_poly_t Q); /* Transcendental functions **************************************************/ -void _nmod_poly_atan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_atan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_atan_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_tan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_tan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_tan_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_asin_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_asin_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_asin_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_sin_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod); +void _nmod_poly_sin_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod); void nmod_poly_sin_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_cos_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod); +void _nmod_poly_cos_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod); void nmod_poly_cos_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_asinh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_asinh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_asinh_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_atanh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_atanh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_atanh_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_sinh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod); +void _nmod_poly_sinh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod); void nmod_poly_sinh_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_cosh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod); +void _nmod_poly_cosh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod); void nmod_poly_cosh_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_tanh_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod); +void _nmod_poly_tanh_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod); void nmod_poly_tanh_series(nmod_poly_t g, const nmod_poly_t h, slong n); -void _nmod_poly_log_series(mp_ptr res, mp_srcptr f, slong flen, slong n, nmod_t mod); +void _nmod_poly_log_series(nn_ptr res, nn_srcptr f, slong flen, slong n, nmod_t mod); void nmod_poly_log_series(nmod_poly_t res, const nmod_poly_t f, slong n); -void _nmod_poly_exp_expinv_series(mp_ptr f, mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod); -void _nmod_poly_exp_series(mp_ptr f, mp_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_exp_expinv_series(nn_ptr f, nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod); +void _nmod_poly_exp_series(nn_ptr f, nn_srcptr h, slong hlen, slong n, nmod_t mod); void nmod_poly_exp_series(nmod_poly_t f, const nmod_poly_t h, slong n); /* Special polynomials *******************************************************/ -int _nmod_poly_conway(mp_ptr op, ulong prime, slong deg); +int _nmod_poly_conway(nn_ptr op, ulong prime, slong deg); ulong _nmod_poly_conway_rand(slong * degree, flint_rand_t state, int type); /* Products *****************************************************************/ -void nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, mp_srcptr xs, slong n); +void nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, nn_srcptr xs, slong n); -void _nmod_poly_product_roots_nmod_vec(mp_ptr poly, - mp_srcptr xs, slong n, nmod_t mod); +void _nmod_poly_product_roots_nmod_vec(nn_ptr poly, + nn_srcptr xs, slong n, nmod_t mod); void _nmod_poly_split_rabin(nmod_poly_t a, nmod_poly_t b, const nmod_poly_t f, nmod_poly_t t, nmod_poly_t t2, flint_rand_t randstate); -int nmod_poly_find_distinct_nonzero_roots(mp_limb_t * roots, +int nmod_poly_find_distinct_nonzero_roots(ulong * roots, const nmod_poly_t P); @@ -1170,7 +1149,7 @@ void nmod_poly_inflate(nmod_poly_t result, const nmod_poly_t input, slong inflat /* Characteristic polynomial and minimal polynomial */ /* FIXME: These should be moved to nmod_mat.h. */ -void _nmod_mat_charpoly_berkowitz(mp_ptr p, const nmod_mat_t M, nmod_t mod); +void _nmod_mat_charpoly_berkowitz(nn_ptr p, const nmod_mat_t M, nmod_t mod); void nmod_mat_charpoly_berkowitz(nmod_poly_t p, const nmod_mat_t M); void nmod_mat_charpoly_danilevsky(nmod_poly_t p, const nmod_mat_t M); void nmod_mat_charpoly(nmod_poly_t p, const nmod_mat_t M); @@ -1192,7 +1171,7 @@ typedef nmod_berlekamp_massey_struct nmod_berlekamp_massey_t[1]; void nmod_berlekamp_massey_init( nmod_berlekamp_massey_t B, - mp_limb_t p); + ulong p); void nmod_berlekamp_massey_start_over( nmod_berlekamp_massey_t B); @@ -1202,14 +1181,14 @@ void nmod_berlekamp_massey_clear( void nmod_berlekamp_massey_set_prime( nmod_berlekamp_massey_t B, - mp_limb_t p); + ulong p); void nmod_berlekamp_massey_print( const nmod_berlekamp_massey_t B); void nmod_berlekamp_massey_add_points( nmod_berlekamp_massey_t B, - const mp_limb_t * a, + const ulong * a, slong count); void nmod_berlekamp_massey_add_zeros( @@ -1218,12 +1197,12 @@ void nmod_berlekamp_massey_add_zeros( void nmod_berlekamp_massey_add_point( nmod_berlekamp_massey_t B, - mp_limb_t a); + ulong a); int nmod_berlekamp_massey_reduce( nmod_berlekamp_massey_t B); -NMOD_POLY_INLINE const mp_limb_t * nmod_berlekamp_massey_points( +NMOD_POLY_INLINE const ulong * nmod_berlekamp_massey_points( const nmod_berlekamp_massey_t B) { return B->points->coeffs; diff --git a/src/nmod_poly/KS2_pack.c b/src/nmod_poly/KS2_pack.c index ac31809fc8..bcb1ed5cb5 100644 --- a/src/nmod_poly/KS2_pack.c +++ b/src/nmod_poly/KS2_pack.c @@ -16,14 +16,14 @@ Same as _nmod_poly_KS2_pack(), but requires b <= FLINT_BITS. */ void -_nmod_poly_KS2_pack1(mp_ptr res, mp_srcptr op, slong n, slong s, +_nmod_poly_KS2_pack1(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r) { /* where to write the next limb */ - mp_ptr dest = res; + nn_ptr dest = res; /* limb currently being filled */ - mp_limb_t buf; + ulong buf; /* number of bits used in buf; always in [0, FLINT_BITS) */ ulong buf_b, buf_b_old; @@ -69,14 +69,14 @@ _nmod_poly_KS2_pack1(mp_ptr res, mp_srcptr op, slong n, slong s, } void -_nmod_poly_KS2_pack(mp_ptr res, mp_srcptr op, slong n, slong s, +_nmod_poly_KS2_pack(nn_ptr res, nn_srcptr op, slong n, slong s, ulong b, ulong k, slong r) { /* where to write the next limb */ - mp_ptr dest = res; + nn_ptr dest = res; /* limb currently being filled */ - mp_limb_t buf; + ulong buf; /* number of bits used in buf; always in [0, FLINT_BITS) */ ulong buf_b, buf_b_old; diff --git a/src/nmod_poly/KS2_reduce.c b/src/nmod_poly/KS2_reduce.c index 854e63f88b..1e72e56319 100644 --- a/src/nmod_poly/KS2_reduce.c +++ b/src/nmod_poly/KS2_reduce.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_KS2_reduce(mp_ptr res, slong s, mp_srcptr op, slong n, ulong w, +_nmod_poly_KS2_reduce(nn_ptr res, slong s, nn_srcptr op, slong n, ulong w, nmod_t mod) { if (w == 1) @@ -38,8 +38,8 @@ _nmod_poly_KS2_reduce(mp_ptr res, slong s, mp_srcptr op, slong n, ulong w, Same as _nmod_poly_KS2_recover_reduce(), but requires 0 < 2 * b <= FLINT_BITS */ void -_nmod_poly_KS2_recover_reduce1(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, +_nmod_poly_KS2_recover_reduce1(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod) { ulong mask = (UWORD(1) << b) - 1; @@ -76,8 +76,8 @@ _nmod_poly_KS2_recover_reduce1(mp_ptr res, slong s, mp_srcptr op1, FLINT_BITS < 2 * b < 2*FLINT_BITS */ void -_nmod_poly_KS2_recover_reduce2(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, +_nmod_poly_KS2_recover_reduce2(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod) { /* @@ -118,8 +118,8 @@ _nmod_poly_KS2_recover_reduce2(mp_ptr res, slong s, mp_srcptr op1, Same as _nmod_poly_KS2_recover_reduce(), but requires b == FLINT_BITS */ void -_nmod_poly_KS2_recover_reduce2b(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong FLINT_UNUSED(b), +_nmod_poly_KS2_recover_reduce2b(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong FLINT_UNUSED(b), nmod_t mod) { /* @@ -156,8 +156,8 @@ _nmod_poly_KS2_recover_reduce2b(mp_ptr res, slong s, mp_srcptr op1, 2 * FLINT_BITS < 2 * b <= 3 * FLINT_BITS. */ void -_nmod_poly_KS2_recover_reduce3(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, +_nmod_poly_KS2_recover_reduce3(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod) { /* @@ -213,8 +213,8 @@ _nmod_poly_KS2_recover_reduce3(mp_ptr res, slong s, mp_srcptr op1, Dispatches to one of the above routines depending on b. */ void -_nmod_poly_KS2_recover_reduce(mp_ptr res, slong s, mp_srcptr op1, - mp_srcptr op2, slong n, ulong b, +_nmod_poly_KS2_recover_reduce(nn_ptr res, slong s, nn_srcptr op1, + nn_srcptr op2, slong n, ulong b, nmod_t mod) { if (2 * b <= FLINT_BITS) diff --git a/src/nmod_poly/KS2_unpack.c b/src/nmod_poly/KS2_unpack.c index 66c328fe90..4ec063f379 100644 --- a/src/nmod_poly/KS2_unpack.c +++ b/src/nmod_poly/KS2_unpack.c @@ -17,11 +17,11 @@ (i.e. writes one word per coefficient) */ void -_nmod_poly_KS2_unpack1(mp_ptr res, mp_srcptr op, slong n, ulong b, +_nmod_poly_KS2_unpack1(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) { /* limb we're currently extracting bits from */ - mp_limb_t buf = 0; + ulong buf = 0; /* number of bits currently in buf; always in [0, FLINT_BITS) */ ulong buf_b = 0; @@ -92,11 +92,11 @@ _nmod_poly_KS2_unpack1(mp_ptr res, mp_srcptr op, slong n, ulong b, (i.e. writes two words per coefficient) */ void -_nmod_poly_KS2_unpack2(mp_ptr res, mp_srcptr op, slong n, ulong b, +_nmod_poly_KS2_unpack2(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) { /* limb we're currently extracting bits from */ - mp_limb_t buf = 0; + ulong buf = 0; /* number of bits currently in buf; always in [0, FLINT_BITS) */ ulong buf_b = 0; @@ -183,11 +183,11 @@ _nmod_poly_KS2_unpack2(mp_ptr res, mp_srcptr op, slong n, ulong b, (i.e. writes three words per coefficient) */ void -_nmod_poly_KS2_unpack3(mp_ptr res, mp_srcptr op, slong n, ulong b, +_nmod_poly_KS2_unpack3(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) { /* limb we're currently extracting bits from */ - mp_limb_t buf = 0; + ulong buf = 0; /* number of bits currently in buf; always in [0, FLINT_BITS) */ ulong buf_b = 0, mask; @@ -251,7 +251,7 @@ _nmod_poly_KS2_unpack3(mp_ptr res, mp_srcptr op, slong n, ulong b, void -_nmod_poly_KS2_unpack(mp_ptr res, mp_srcptr op, slong n, ulong b, +_nmod_poly_KS2_unpack(nn_ptr res, nn_srcptr op, slong n, ulong b, ulong k) { if (b <= FLINT_BITS) diff --git a/src/nmod_poly/add.c b/src/nmod_poly/add.c index 12fac1934a..39d6f6c558 100644 --- a/src/nmod_poly/add.c +++ b/src/nmod_poly/add.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_add(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, +_nmod_poly_add(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) { slong i, min = FLINT_MIN(len1, len2); diff --git a/src/nmod_poly/asin_series.c b/src/nmod_poly/asin_series.c index 832ebf3d7b..2bc6b6c93f 100644 --- a/src/nmod_poly/asin_series.c +++ b/src/nmod_poly/asin_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_asin_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_asin_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/asinh_series.c b/src/nmod_poly/asinh_series.c index 55e1930af3..3773878a76 100644 --- a/src/nmod_poly/asinh_series.c +++ b/src/nmod_poly/asinh_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_asinh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_asinh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/atan_series.c b/src/nmod_poly/atan_series.c index a47e78baaf..39d28bb12d 100644 --- a/src/nmod_poly/atan_series.c +++ b/src/nmod_poly/atan_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_atan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_atan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/atanh_series.c b/src/nmod_poly/atanh_series.c index 0982ee8e97..3e2f1ba654 100644 --- a/src/nmod_poly/atanh_series.c +++ b/src/nmod_poly/atanh_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_atanh_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_atanh_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/berlekamp_massey.c b/src/nmod_poly/berlekamp_massey.c index 9f831b46bb..4e46700eb5 100644 --- a/src/nmod_poly/berlekamp_massey.c +++ b/src/nmod_poly/berlekamp_massey.c @@ -57,7 +57,7 @@ typedef nmod_berlekamp_massey_struct nmod_berlekamp_massey_t[1]; */ void nmod_berlekamp_massey_init( nmod_berlekamp_massey_t B, - mp_limb_t p) + ulong p) { nmod_t fpctx; nmod_init(&fpctx, p); @@ -101,7 +101,7 @@ void nmod_berlekamp_massey_clear( /* setting the prime also starts over */ void nmod_berlekamp_massey_set_prime( nmod_berlekamp_massey_t B, - mp_limb_t p) + ulong p) { nmod_t fpctx; nmod_init(&fpctx, p); @@ -129,7 +129,7 @@ void nmod_berlekamp_massey_print( void nmod_berlekamp_massey_add_points( nmod_berlekamp_massey_t B, - const mp_limb_t * a, + const ulong * a, slong count) { slong i; @@ -158,7 +158,7 @@ void nmod_berlekamp_massey_add_zeros( void nmod_berlekamp_massey_add_point( nmod_berlekamp_massey_t B, - mp_limb_t a) + ulong a) { slong old_length = B->points->length; nmod_poly_fit_length(B->points, old_length + 1); diff --git a/src/nmod_poly/bit_pack.c b/src/nmod_poly/bit_pack.c index 0f9ecc003e..c29bc9bc1a 100644 --- a/src/nmod_poly/bit_pack.c +++ b/src/nmod_poly/bit_pack.c @@ -16,12 +16,12 @@ /* Assumes length > 0, bits > 0. */ void -_nmod_poly_bit_pack(mp_ptr res, mp_srcptr poly, slong len, flint_bitcnt_t bits) +_nmod_poly_bit_pack(nn_ptr res, nn_srcptr poly, slong len, flint_bitcnt_t bits) { slong i; ulong current_bit = 0, current_limb = 0; ulong total_limbs = (len * bits - 1) / FLINT_BITS + 1; - mp_limb_t temp_lower, temp_upper; + ulong temp_lower, temp_upper; res[0] = WORD(0); diff --git a/src/nmod_poly/bit_unpack.c b/src/nmod_poly/bit_unpack.c index 8147c2b40b..e79384cf20 100644 --- a/src/nmod_poly/bit_unpack.c +++ b/src/nmod_poly/bit_unpack.c @@ -17,18 +17,18 @@ /* Assumes len > 0, bits > 0. */ void -_nmod_poly_bit_unpack(mp_ptr res, slong len, mp_srcptr mpn, flint_bitcnt_t bits, +_nmod_poly_bit_unpack(nn_ptr res, slong len, nn_srcptr mpn, flint_bitcnt_t bits, nmod_t mod) { slong i; ulong current_bit = 0, current_limb = 0; - mp_limb_t temp_lower, temp_upper, temp_upper2; + ulong temp_lower, temp_upper, temp_upper2; if (bits < FLINT_BITS) { ulong boundary_limit_bit = FLINT_BITS - bits; - mp_limb_t mask = (WORD(1) << bits) - WORD(1); + ulong mask = (WORD(1) << bits) - WORD(1); for (i = 0; i < len; i++) { @@ -78,7 +78,7 @@ _nmod_poly_bit_unpack(mp_ptr res, slong len, mp_srcptr mpn, flint_bitcnt_t bits, { ulong double_boundary_limit_bit = 2 * FLINT_BITS - bits; - mp_limb_t mask = (WORD(1) << (bits - FLINT_BITS)) - WORD(1); + ulong mask = (WORD(1) << (bits - FLINT_BITS)) - WORD(1); for (i = 0; i < len; i++) { @@ -134,7 +134,7 @@ _nmod_poly_bit_unpack(mp_ptr res, slong len, mp_srcptr mpn, flint_bitcnt_t bits, { ulong double_boundary_limit_bit = 3 * FLINT_BITS - bits; - mp_limb_t mask = (WORD(1) << (bits - 2 * FLINT_BITS)) - WORD(1); + ulong mask = (WORD(1) << (bits - 2 * FLINT_BITS)) - WORD(1); for (i = 0; i < len; i++) { diff --git a/src/nmod_poly/compose.c b/src/nmod_poly/compose.c index 9e87d412b7..17f27bec72 100644 --- a/src/nmod_poly/compose.c +++ b/src/nmod_poly/compose.c @@ -15,8 +15,8 @@ #include "gr_poly.h" void -_nmod_poly_compose(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +_nmod_poly_compose(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { if (len1 == 1) res[0] = poly1[0]; diff --git a/src/nmod_poly/compose_horner.c b/src/nmod_poly/compose_horner.c index 37183fe186..1800c85b1f 100644 --- a/src/nmod_poly/compose_horner.c +++ b/src/nmod_poly/compose_horner.c @@ -15,8 +15,8 @@ #include "nmod_poly.h" void -_nmod_poly_compose_horner(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +_nmod_poly_compose_horner(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { if (len1 == 1) { @@ -35,11 +35,11 @@ _nmod_poly_compose_horner(mp_ptr res, mp_srcptr poly1, slong len1, { const slong alloc = (len1 - 1) * (len2 - 1) + 1; slong i = len1 - 1, lenr = len2; - mp_ptr t, t1, t2; + nn_ptr t, t1, t2; TMP_INIT; TMP_START; - t = TMP_ALLOC(alloc * sizeof(mp_limb_t)); + t = TMP_ALLOC(alloc * sizeof(ulong)); if (len1 % 2 == 0) { @@ -63,7 +63,7 @@ _nmod_poly_compose_horner(mp_ptr res, mp_srcptr poly1, slong len1, { _nmod_poly_mul(t2, t1, lenr, poly2, len2, mod); lenr += len2 - 1; - FLINT_SWAP(mp_ptr, t1, t2); + FLINT_SWAP(nn_ptr, t1, t2); t1[0] = n_addmod(t1[0], poly1[i], mod.n); } diff --git a/src/nmod_poly/compose_mod.c b/src/nmod_poly/compose_mod.c index 2fbaedfd5d..4c5cc911de 100644 --- a/src/nmod_poly/compose_mod.c +++ b/src/nmod_poly/compose_mod.c @@ -14,8 +14,8 @@ #include "nmod_poly.h" void -_nmod_poly_compose_mod(mp_ptr res, - mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod) +_nmod_poly_compose_mod(nn_ptr res, + nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod) { if (lenh < 8 || lenf >= lenh) _nmod_poly_compose_mod_horner(res, f, lenf, g, h, lenh, mod); @@ -33,7 +33,7 @@ nmod_poly_compose_mod(nmod_poly_t res, slong len3 = poly3->length; slong len = len3 - 1; - mp_ptr ptr2; + nn_ptr ptr2; if (len3 == 0) { diff --git a/src/nmod_poly/compose_mod_brent_kung.c b/src/nmod_poly/compose_mod_brent_kung.c index 68d526d4f2..13b1f67382 100644 --- a/src/nmod_poly/compose_mod_brent_kung.c +++ b/src/nmod_poly/compose_mod_brent_kung.c @@ -16,12 +16,12 @@ #include "nmod_mat.h" void -_nmod_poly_compose_mod_brent_kung(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, - mp_srcptr poly3, slong len3, nmod_t mod) +_nmod_poly_compose_mod_brent_kung(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, + nn_srcptr poly3, slong len3, nmod_t mod) { nmod_mat_t A, B, C; - mp_ptr t, h; + nn_ptr t, h; slong i, n, m; n = len3 - 1; @@ -93,7 +93,7 @@ nmod_poly_compose_mod_brent_kung(nmod_poly_t res, slong len3 = poly3->length; slong len = len3 - 1; - mp_ptr ptr2; + nn_ptr ptr2; if (len3 == 0) { diff --git a/src/nmod_poly/compose_mod_brent_kung_precomp_preinv.c b/src/nmod_poly/compose_mod_brent_kung_precomp_preinv.c index 07da8d57e5..3bf1cdc8f1 100644 --- a/src/nmod_poly/compose_mod_brent_kung_precomp_preinv.c +++ b/src/nmod_poly/compose_mod_brent_kung_precomp_preinv.c @@ -21,7 +21,7 @@ void _nmod_poly_reduce_matrix_mod_poly(nmod_mat_t A, const nmod_mat_t B, const nmod_poly_t f) { - mp_ptr tmp1; + nn_ptr tmp1; slong n = f->length - 1; slong i, m = n_sqrt(n) + 1; @@ -39,8 +39,8 @@ _nmod_poly_reduce_matrix_mod_poly(nmod_mat_t A, const nmod_mat_t B, } void -_nmod_poly_precompute_matrix(nmod_mat_t A, mp_srcptr poly1, mp_srcptr poly2, - slong len2, mp_srcptr poly2inv, slong len2inv, nmod_t mod) +_nmod_poly_precompute_matrix(nmod_mat_t A, nn_srcptr poly1, nn_srcptr poly2, + slong len2, nn_srcptr poly2inv, slong len2inv, nmod_t mod) { /* Set rows of A to powers of poly1 */ slong n, m; @@ -62,7 +62,7 @@ nmod_poly_precompute_matrix(nmod_mat_t A, const nmod_poly_t poly1, slong len = len2 - 1; slong m = n_sqrt(len) + 1; - mp_ptr ptr1; + nn_ptr ptr1; if (len2 == 0) { @@ -98,12 +98,12 @@ nmod_poly_precompute_matrix(nmod_mat_t A, const nmod_poly_t poly1, } void -_nmod_poly_compose_mod_brent_kung_precomp_preinv(mp_ptr res, mp_srcptr poly1, - slong len1, const nmod_mat_t A, mp_srcptr poly3, slong len3, - mp_srcptr poly3inv, slong len3inv, nmod_t mod) +_nmod_poly_compose_mod_brent_kung_precomp_preinv(nn_ptr res, nn_srcptr poly1, + slong len1, const nmod_mat_t A, nn_srcptr poly3, slong len3, + nn_srcptr poly3inv, slong len3inv, nmod_t mod) { nmod_mat_t B, C; - mp_ptr t, h; + nn_ptr t, h; slong i, n, m; n = len3 - 1; diff --git a/src/nmod_poly/compose_mod_brent_kung_preinv.c b/src/nmod_poly/compose_mod_brent_kung_preinv.c index f230ddb8ab..f4086dac0c 100644 --- a/src/nmod_poly/compose_mod_brent_kung_preinv.c +++ b/src/nmod_poly/compose_mod_brent_kung_preinv.c @@ -18,12 +18,12 @@ #include "nmod_mat.h" void -_nmod_poly_compose_mod_brent_kung_preinv(mp_ptr res, mp_srcptr poly1, - slong len1, mp_srcptr poly2, mp_srcptr poly3, slong len3, - mp_srcptr poly3inv, slong len3inv, nmod_t mod) +_nmod_poly_compose_mod_brent_kung_preinv(nn_ptr res, nn_srcptr poly1, + slong len1, nn_srcptr poly2, nn_srcptr poly3, slong len3, + nn_srcptr poly3inv, slong len3inv, nmod_t mod) { nmod_mat_t A, B, C; - mp_ptr t, h; + nn_ptr t, h; slong i, n, m; n = len3 - 1; @@ -94,7 +94,7 @@ nmod_poly_compose_mod_brent_kung_preinv(nmod_poly_t res, slong len3 = poly3->length; slong len = len3 - 1; - mp_ptr ptr2; + nn_ptr ptr2; if (len3 == 0) { diff --git a/src/nmod_poly/compose_mod_brent_kung_vec_preinv.c b/src/nmod_poly/compose_mod_brent_kung_vec_preinv.c index c60bb44c34..286d51e208 100644 --- a/src/nmod_poly/compose_mod_brent_kung_vec_preinv.c +++ b/src/nmod_poly/compose_mod_brent_kung_vec_preinv.c @@ -20,11 +20,11 @@ void _nmod_poly_compose_mod_brent_kung_vec_preinv(nmod_poly_struct * res, const nmod_poly_struct * polys, slong FLINT_UNUSED(lenpolys), slong l, - mp_srcptr g, slong glen, mp_srcptr poly, slong len, - mp_srcptr polyinv, slong leninv, nmod_t mod) + nn_srcptr g, slong glen, nn_srcptr poly, slong len, + nn_srcptr polyinv, slong leninv, nmod_t mod) { nmod_mat_t A, B, C; - mp_ptr t, h; + nn_ptr t, h; slong i, j, k, n, m, len2 = l, len1; n = len - 1; diff --git a/src/nmod_poly/compose_mod_brent_kung_vec_preinv_threaded.c b/src/nmod_poly/compose_mod_brent_kung_vec_preinv_threaded.c index fda5bdf159..60a94b0889 100644 --- a/src/nmod_poly/compose_mod_brent_kung_vec_preinv_threaded.c +++ b/src/nmod_poly/compose_mod_brent_kung_vec_preinv_threaded.c @@ -23,11 +23,11 @@ typedef struct { nmod_poly_struct * res; nmod_mat_struct * C; - mp_srcptr h; - mp_srcptr poly; - mp_srcptr polyinv; + nn_srcptr h; + nn_srcptr poly; + nn_srcptr polyinv; nmod_t p; - mp_ptr t; + nn_ptr t; volatile slong * j; slong k; slong m; @@ -45,10 +45,10 @@ _nmod_poly_compose_mod_brent_kung_vec_preinv_worker(void * arg_ptr) compose_vec_arg_t arg = *((compose_vec_arg_t *) arg_ptr); slong i, j, k = arg.k, n = arg.len - 1; slong len = arg.len, leninv = arg.leninv; - mp_ptr t = arg.t; - mp_srcptr h = arg.h; - mp_srcptr poly = arg.poly; - mp_srcptr polyinv = arg.polyinv; + nn_ptr t = arg.t; + nn_srcptr h = arg.h; + nn_srcptr poly = arg.poly; + nn_srcptr polyinv = arg.polyinv; nmod_poly_struct * res = arg.res; nmod_mat_struct * C = arg.C; nmod_t p = arg.p; @@ -94,16 +94,16 @@ void _nmod_poly_compose_mod_brent_kung_vec_preinv_threaded_pool( nmod_poly_struct * res, const nmod_poly_struct * polys, slong FLINT_UNUSED(lenpolys), slong l, - mp_srcptr g, slong glen, - mp_srcptr poly, slong len, - mp_srcptr polyinv, slong leninv, + nn_srcptr g, slong glen, + nn_srcptr poly, slong len, + nn_srcptr polyinv, slong leninv, nmod_t mod, thread_pool_handle * threads, slong num_threads) { nmod_mat_t A, B, C; slong i, j, n, m, k, len2 = l, len1, shared_j = 0; - mp_ptr h; + nn_ptr h; compose_vec_arg_t * args; #if FLINT_USES_PTHREAD pthread_mutex_t mutex; diff --git a/src/nmod_poly/compose_mod_horner.c b/src/nmod_poly/compose_mod_horner.c index ec2fbf0d1c..7501e8b919 100644 --- a/src/nmod_poly/compose_mod_horner.c +++ b/src/nmod_poly/compose_mod_horner.c @@ -15,11 +15,11 @@ #include "nmod_poly.h" void -_nmod_poly_compose_mod_horner(mp_ptr res, - mp_srcptr f, slong lenf, mp_srcptr g, mp_srcptr h, slong lenh, nmod_t mod) +_nmod_poly_compose_mod_horner(nn_ptr res, + nn_srcptr f, slong lenf, nn_srcptr g, nn_srcptr h, slong lenh, nmod_t mod) { slong i, len; - mp_ptr t; + nn_ptr t; if (lenh == 1) return; @@ -65,7 +65,7 @@ nmod_poly_compose_mod_horner(nmod_poly_t res, slong len3 = poly3->length; slong len = len3 - 1; - mp_ptr ptr2; + nn_ptr ptr2; if (len3 == 0) { diff --git a/src/nmod_poly/compose_series.c b/src/nmod_poly/compose_series.c index be8a27d4b4..3e6bc4bcf8 100644 --- a/src/nmod_poly/compose_series.c +++ b/src/nmod_poly/compose_series.c @@ -14,8 +14,8 @@ #include "gr_poly.h" void -_nmod_poly_compose_series(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod) +_nmod_poly_compose_series(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/conway.c b/src/nmod_poly/conway.c index 55aaf9f2d2..68ab537e57 100644 --- a/src/nmod_poly/conway.c +++ b/src/nmod_poly/conway.c @@ -23,7 +23,7 @@ extern uint8_t __nmod_poly_ntcoeffs0[]; #define num_nontrivialcoeffs __nmod_poly_numntcoeffs0 #define nontrivialcoeffs __nmod_poly_ntcoeffs0 static int -conway_polynomial_lt_260(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_260(nn_ptr op, ulong prime, ulong deg) { ulong ix, jx, kx; ulong numnt, sum; @@ -113,7 +113,7 @@ extern uint16_t __nmod_poly_cp_md_coeffs1[]; #define small_coeffs __nmod_poly_cp_sm_coeffs1 #define big_coeffs __nmod_poly_cp_md_coeffs1 static int -conway_polynomial_lt_300(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_300(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -227,7 +227,7 @@ extern uint16_t __nmod_poly_cp_md_coeffs2[]; #define small_coeffs __nmod_poly_cp_sm_coeffs2 #define big_coeffs __nmod_poly_cp_md_coeffs2 static int -conway_polynomial_lt_1000(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_1000(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -317,7 +317,7 @@ extern uint16_t __nmod_poly_cp_md_coeffs3[]; #define small_coeffs __nmod_poly_cp_sm_coeffs3 #define big_coeffs __nmod_poly_cp_md_coeffs3 static int -conway_polynomial_lt_3371(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_3371(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -404,7 +404,7 @@ extern uint16_t __nmod_poly_cp_md_coeffs4[]; #define small_coeffs __nmod_poly_cp_sm_coeffs4 #define big_coeffs __nmod_poly_cp_md_coeffs4 static int -conway_polynomial_lt_11000(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_11000(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -477,7 +477,7 @@ extern uint16_t __nmod_poly_cp_md_coeffs5[]; #define small_coeffs __nmod_poly_cp_sm_coeffs5 #define big_coeffs __nmod_poly_cp_md_coeffs5 static int -conway_polynomial_lt_65536(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_65536(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -539,7 +539,7 @@ extern uint32_t __nmod_poly_cp_lg_coeffs6[]; #define small_coeffs __nmod_poly_cp_sm_coeffs6 #define big_coeffs __nmod_poly_cp_lg_coeffs6 static int -conway_polynomial_lt_109988(mp_ptr op, ulong prime, ulong deg) +conway_polynomial_lt_109988(nn_ptr op, ulong prime, ulong deg) { ulong ix = 0; const uint8_t * ap; @@ -577,7 +577,7 @@ conway_polynomial_lt_109988(mp_ptr op, ulong prime, ulong deg) #undef big_coeffs int -_nmod_poly_conway(mp_ptr op, ulong prime, slong deg) +_nmod_poly_conway(nn_ptr op, ulong prime, slong deg) { if (deg <= 0) return 0; diff --git a/src/nmod_poly/cos_series.c b/src/nmod_poly/cos_series.c index 08a74716bb..3de5a1cc6d 100644 --- a/src/nmod_poly/cos_series.c +++ b/src/nmod_poly/cos_series.c @@ -15,9 +15,9 @@ #include "nmod_poly.h" void -_nmod_poly_cos_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +_nmod_poly_cos_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) { - mp_ptr t, u; + nn_ptr t, u; t = _nmod_vec_init(n); u = _nmod_vec_init(n); @@ -37,7 +37,7 @@ _nmod_poly_cos_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) void nmod_poly_cos_series(nmod_poly_t g, const nmod_poly_t h, slong n) { - mp_ptr h_coeffs; + nn_ptr h_coeffs; slong h_len = h->length; if (h_len > 0 && h->coeffs[0] != UWORD(0)) diff --git a/src/nmod_poly/cosh_series.c b/src/nmod_poly/cosh_series.c index 969e4f5420..cb6946a3c7 100644 --- a/src/nmod_poly/cosh_series.c +++ b/src/nmod_poly/cosh_series.c @@ -15,9 +15,9 @@ #include "nmod_poly.h" void -_nmod_poly_cosh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) +_nmod_poly_cosh_series(nn_ptr f, nn_srcptr h, slong n, nmod_t mod) { - mp_ptr g = _nmod_vec_init(n); + nn_ptr g = _nmod_vec_init(n); _nmod_poly_exp_expinv_series(f, g, h, n, n, mod); _nmod_vec_add(f, f, g, n, mod); _nmod_vec_scalar_mul_nmod(f, f, n, n_invmod(UWORD(2), mod.n), mod); @@ -27,7 +27,7 @@ _nmod_poly_cosh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) void nmod_poly_cosh_series(nmod_poly_t g, const nmod_poly_t h, slong n) { - mp_ptr g_coeffs, h_coeffs; + nn_ptr g_coeffs, h_coeffs; nmod_poly_t t1; slong h_len; diff --git a/src/nmod_poly/derivative.c b/src/nmod_poly/derivative.c index 2f89e9594d..1c64e86757 100644 --- a/src/nmod_poly/derivative.c +++ b/src/nmod_poly/derivative.c @@ -12,10 +12,10 @@ #include "ulong_extras.h" #include "nmod_poly.h" -void _nmod_poly_derivative(mp_ptr x_prime, mp_srcptr x, slong len, nmod_t mod) +void _nmod_poly_derivative(nn_ptr x_prime, nn_srcptr x, slong len, nmod_t mod) { slong j; - mp_limb_t k = 1; + ulong k = 1; for (j = 1; j < len; j++) { diff --git a/src/nmod_poly/discriminant.c b/src/nmod_poly/discriminant.c index 2ff1a931dd..ffc29843b0 100644 --- a/src/nmod_poly/discriminant.c +++ b/src/nmod_poly/discriminant.c @@ -13,12 +13,12 @@ #include "nmod_vec.h" #include "nmod_poly.h" -mp_limb_t -_nmod_poly_discriminant(mp_srcptr poly, slong len, nmod_t mod) +ulong +_nmod_poly_discriminant(nn_srcptr poly, slong len, nmod_t mod) { - mp_ptr der = _nmod_vec_init(len - 1); + nn_ptr der = _nmod_vec_init(len - 1); slong dlen = len - 1; - mp_limb_t res, pow; + ulong res, pow; _nmod_poly_derivative(der, poly, len, mod); NMOD_VEC_NORM(der, dlen); @@ -41,7 +41,7 @@ _nmod_poly_discriminant(mp_srcptr poly, slong len, nmod_t mod) return res; } -mp_limb_t +ulong nmod_poly_discriminant(const nmod_poly_t f) { const slong len = f->length; diff --git a/src/nmod_poly/div.c b/src/nmod_poly/div.c index e1a3f846ab..686e39f113 100644 --- a/src/nmod_poly/div.c +++ b/src/nmod_poly/div.c @@ -17,7 +17,7 @@ #include "gr_poly.h" void -_nmod_poly_div(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +_nmod_poly_div(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) { if (lenA == lenB) { @@ -44,7 +44,7 @@ nmod_poly_div(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) { nmod_poly_t tQ; - mp_ptr q; + nn_ptr q; slong A_len, B_len; B_len = B->length; diff --git a/src/nmod_poly/div_newton_n_preinv.c b/src/nmod_poly/div_newton_n_preinv.c index 385b920439..c479d90521 100644 --- a/src/nmod_poly/div_newton_n_preinv.c +++ b/src/nmod_poly/div_newton_n_preinv.c @@ -15,12 +15,12 @@ #include "nmod_poly.h" #include "ulong_extras.h" -void _nmod_poly_div_newton_n_preinv(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr FLINT_UNUSED(B), slong lenB, mp_srcptr Binv, +void _nmod_poly_div_newton_n_preinv(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr FLINT_UNUSED(B), slong lenB, nn_srcptr Binv, slong lenBinv, nmod_t mod) { const slong lenQ = lenA - lenB + 1; - mp_ptr Arev; + nn_ptr Arev; Arev = _nmod_vec_init(lenQ); _nmod_poly_reverse(Arev, A + (lenA - lenQ), lenQ, lenQ); @@ -38,7 +38,7 @@ void nmod_poly_div_newton_n_preinv(nmod_poly_t Q, const nmod_poly_t A, const slong lenA = A->length, lenB = B->length, lenQ = lenA - lenB + 1, lenBinv = Binv->length; - mp_ptr q; + nn_ptr q; if (lenB == 0) { @@ -65,7 +65,7 @@ void nmod_poly_div_newton_n_preinv(nmod_poly_t Q, const nmod_poly_t A, if (Q == A || Q == B || Q == Binv) { - q = (mp_ptr) flint_malloc(lenQ * sizeof(mp_limb_t)); + q = (nn_ptr) flint_malloc(lenQ * sizeof(ulong)); } else { diff --git a/src/nmod_poly/div_root.c b/src/nmod_poly/div_root.c index bdc0f8628f..5f82b77f5b 100644 --- a/src/nmod_poly/div_root.c +++ b/src/nmod_poly/div_root.c @@ -12,10 +12,10 @@ #include "nmod.h" #include "nmod_poly.h" -mp_limb_t -_nmod_poly_div_root(mp_ptr Q, mp_srcptr A, slong len, mp_limb_t c, nmod_t mod) +ulong +_nmod_poly_div_root(nn_ptr Q, nn_srcptr A, slong len, ulong c, nmod_t mod) { - mp_limb_t r, t; + ulong r, t; slong i; if (len < 2) @@ -35,11 +35,11 @@ _nmod_poly_div_root(mp_ptr Q, mp_srcptr A, slong len, mp_limb_t c, nmod_t mod) return r; } -mp_limb_t +ulong nmod_poly_div_root(nmod_poly_t Q, - const nmod_poly_t A, mp_limb_t c) + const nmod_poly_t A, ulong c) { - mp_limb_t rem; + ulong rem; slong len = A->length; diff --git a/src/nmod_poly/div_series.c b/src/nmod_poly/div_series.c index 15736606a1..fdaec0500c 100644 --- a/src/nmod_poly/div_series.c +++ b/src/nmod_poly/div_series.c @@ -16,12 +16,12 @@ #include "gr_poly.h" void -_nmod_poly_div_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr P, slong Plen, - mp_srcptr Q, slong Qlen, slong n, mp_limb_t q, nmod_t mod) +_nmod_poly_div_series_basecase_preinv1(nn_ptr Qinv, nn_srcptr P, slong Plen, + nn_srcptr Q, slong Qlen, slong n, ulong q, nmod_t mod) { slong i, j, l; int nlimbs; - mp_limb_t s; + ulong s; Plen = FLINT_MIN(Plen, n); Qlen = FLINT_MIN(Qlen, n); @@ -57,10 +57,10 @@ _nmod_poly_div_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr P, slong Plen, } void -_nmod_poly_div_series_basecase(mp_ptr Qinv, mp_srcptr P, slong Plen, - mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +_nmod_poly_div_series_basecase(nn_ptr Qinv, nn_srcptr P, slong Plen, + nn_srcptr Q, slong Qlen, slong n, nmod_t mod) { - mp_limb_t q; + ulong q; q = Q[0]; if (q != 1) @@ -109,8 +109,8 @@ nmod_poly_div_series_basecase(nmod_poly_t Q, const nmod_poly_t A, } void -_nmod_poly_div_series(mp_ptr Q, mp_srcptr A, slong Alen, - mp_srcptr B, slong Blen, slong n, nmod_t mod) +_nmod_poly_div_series(nn_ptr Q, nn_srcptr A, slong Alen, + nn_srcptr B, slong Blen, slong n, nmod_t mod) { Blen = FLINT_MIN(Blen, n); diff --git a/src/nmod_poly/divexact.c b/src/nmod_poly/divexact.c index 60872d91d1..8c42aa7668 100644 --- a/src/nmod_poly/divexact.c +++ b/src/nmod_poly/divexact.c @@ -17,7 +17,7 @@ #include "gr_poly.h" void -_nmod_poly_divexact(mp_ptr Q, mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +_nmod_poly_divexact(nn_ptr Q, nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) { if (lenA == lenB) { @@ -40,7 +40,7 @@ nmod_poly_divexact(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) { nmod_poly_t tQ; - mp_ptr q; + nn_ptr q; slong A_len, B_len; B_len = B->length; diff --git a/src/nmod_poly/divides.c b/src/nmod_poly/divides.c index c9733c59e9..d7771d6570 100644 --- a/src/nmod_poly/divides.c +++ b/src/nmod_poly/divides.c @@ -13,10 +13,10 @@ #include "nmod_vec.h" #include "nmod_poly.h" -int _nmod_poly_divides(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +int _nmod_poly_divides(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { - mp_ptr R; + nn_ptr R; slong i, lenQ = lenA - lenB + 1; int res = 1; @@ -28,9 +28,9 @@ int _nmod_poly_divides(mp_ptr Q, mp_srcptr A, slong lenA, if (lenA < 2*lenB - 1) { slong offset = 0; - mp_ptr P; + nn_ptr P; - P = (mp_ptr) _nmod_vec_init(2*lenQ - 1); + P = (nn_ptr) _nmod_vec_init(2*lenQ - 1); _nmod_vec_zero(R, lenB - 1); @@ -86,7 +86,7 @@ int _nmod_poly_divides(mp_ptr Q, mp_srcptr A, slong lenA, int nmod_poly_divides(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) { nmod_poly_t tQ; - mp_ptr q; + nn_ptr q; slong lenA, lenB; int res; @@ -126,11 +126,11 @@ int nmod_poly_divides(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) /* check if (p, n) = mullow(poly1, len1, poly2, n, n) where len1 > 0, n >= 0 */ static int -_nmod_poly_mullow_classical_check(mp_srcptr p, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong n, nmod_t mod) +_nmod_poly_mullow_classical_check(nn_srcptr p, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong n, nmod_t mod) { slong i, j, bits, log_len, nlimbs, n1; - mp_limb_t c; + ulong c; len1 = FLINT_MIN(len1, n); @@ -181,8 +181,8 @@ _nmod_poly_mullow_classical_check(mp_srcptr p, mp_srcptr poly1, slong len1, return 1; } -int _nmod_poly_divides_classical(mp_ptr Q, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +int _nmod_poly_divides_classical(nn_ptr Q, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { slong lenQ = lenA - lenB + 1; int res; @@ -202,7 +202,7 @@ int nmod_poly_divides_classical(nmod_poly_t Q, const nmod_poly_t A, const nmod_poly_t B) { nmod_poly_t tQ; - mp_ptr q; + nn_ptr q; slong lenA, lenB; int res; diff --git a/src/nmod_poly/divrem.c b/src/nmod_poly/divrem.c index f6b5519eb4..5ce99bd6bc 100644 --- a/src/nmod_poly/divrem.c +++ b/src/nmod_poly/divrem.c @@ -16,10 +16,10 @@ #include "gr_poly.h" void -_nmod_poly_divrem(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +_nmod_poly_divrem(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { - mp_limb_t invB; + ulong invB; if (lenA <= 20 || lenB <= 8 || lenA - lenB <= 6 || (NMOD_BITS(mod) <= 61 && lenA <= 40) || @@ -50,7 +50,7 @@ void nmod_poly_divrem(nmod_poly_t Q, nmod_poly_t R, { const slong lenA = A->length, lenB = B->length; nmod_poly_t tQ, tR; - mp_ptr q, r; + nn_ptr q, r; if (lenB == 0) { diff --git a/src/nmod_poly/divrem_basecase.c b/src/nmod_poly/divrem_basecase.c index 6457c9cb99..1789d33099 100644 --- a/src/nmod_poly/divrem_basecase.c +++ b/src/nmod_poly/divrem_basecase.c @@ -31,8 +31,8 @@ slong NMOD_DIVREM_BC_ITCH(slong lenA, slong lenB, nmod_t mod) } -void _nmod_poly_divrem_q0_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, mp_srcptr B, slong lenA, mp_limb_t invL, nmod_t mod) +void _nmod_poly_divrem_q0_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, nn_srcptr B, slong lenA, ulong invL, nmod_t mod) { if (lenA == 1) { @@ -54,9 +54,9 @@ void _nmod_poly_divrem_q0_preinv1(mp_ptr Q, mp_ptr R, } } -void _nmod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_limb_t invL, nmod_t mod) +void _nmod_poly_divrem_q1_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + ulong invL, nmod_t mod) { if (lenB == 1) { @@ -64,7 +64,7 @@ void _nmod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, } else { - mp_limb_t q0, q1, t, t0, t1, t2, s0, s1; + ulong q0, q1, t, t0, t1, t2, s0, s1; slong i; q1 = nmod_mul(A[lenA-1], invL, mod); @@ -119,14 +119,14 @@ void _nmod_poly_divrem_q1_preinv1(mp_ptr Q, mp_ptr R, } void -_nmod_poly_divrem_basecase_preinv1_1(mp_ptr Q, mp_ptr R, mp_ptr W, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_limb_t invL, +_nmod_poly_divrem_basecase_preinv1_1(nn_ptr Q, nn_ptr R, nn_ptr W, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + ulong invL, nmod_t mod) { slong iR; - mp_ptr ptrQ = Q - lenB + 1; - mp_ptr R1 = W; + nn_ptr ptrQ = Q - lenB + 1; + nn_ptr R1 = W; flint_mpn_copyi(R1, A, lenA); @@ -142,7 +142,7 @@ _nmod_poly_divrem_basecase_preinv1_1(mp_ptr Q, mp_ptr R, mp_ptr W, if (lenB > 1) { - const mp_limb_t c = n_negmod(ptrQ[iR], mod.n); + const ulong c = n_negmod(ptrQ[iR], mod.n); mpn_addmul_1(R1 + iR - lenB + 1, B, lenB - 1, c); } } @@ -153,13 +153,13 @@ _nmod_poly_divrem_basecase_preinv1_1(mp_ptr Q, mp_ptr R, mp_ptr W, } void -_nmod_poly_divrem_basecase_preinv1_2(mp_ptr Q, mp_ptr R, mp_ptr W, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_limb_t invL, +_nmod_poly_divrem_basecase_preinv1_2(nn_ptr Q, nn_ptr R, nn_ptr W, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + ulong invL, nmod_t mod) { slong iR, i; - mp_ptr B2 = W, R2 = W + 2*(lenB - 1), ptrQ = Q - lenB + 1; + nn_ptr B2 = W, R2 = W + 2*(lenB - 1), ptrQ = Q - lenB + 1; for (i = 0; i < lenB - 1; i++) { @@ -174,7 +174,7 @@ _nmod_poly_divrem_basecase_preinv1_2(mp_ptr Q, mp_ptr R, mp_ptr W, for (iR = lenA - 1; iR >= lenB - 1; ) { - mp_limb_t r = + ulong r = n_ll_mod_preinv(R2[2 * iR + 1], R2[2 * iR], mod.n, mod.ninv); while ((iR + 1 >= lenB) && (r == WORD(0))) @@ -191,7 +191,7 @@ _nmod_poly_divrem_basecase_preinv1_2(mp_ptr Q, mp_ptr R, mp_ptr W, if (lenB > 1) { - const mp_limb_t c = n_negmod(ptrQ[iR], mod.n); + const ulong c = n_negmod(ptrQ[iR], mod.n); mpn_addmul_1(R2 + 2 * (iR - lenB + 1), B2, 2 * lenB - 2, c); } iR--; @@ -203,13 +203,13 @@ _nmod_poly_divrem_basecase_preinv1_2(mp_ptr Q, mp_ptr R, mp_ptr W, } void -_nmod_poly_divrem_basecase_preinv1_3(mp_ptr Q, mp_ptr R, mp_ptr W, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_limb_t invL, +_nmod_poly_divrem_basecase_preinv1_3(nn_ptr Q, nn_ptr R, nn_ptr W, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + ulong invL, nmod_t mod) { slong iR, i; - mp_ptr B3 = W, R3 = W + 3*(lenB - 1), ptrQ = Q - lenB + 1; + nn_ptr B3 = W, R3 = W + 3*(lenB - 1), ptrQ = Q - lenB + 1; for (i = 0; i < lenB - 1; i++) { @@ -226,7 +226,7 @@ _nmod_poly_divrem_basecase_preinv1_3(mp_ptr Q, mp_ptr R, mp_ptr W, for (iR = lenA - 1; iR >= lenB - 1; ) { - mp_limb_t r = + ulong r = n_lll_mod_preinv(R3[3 * iR + 2], R3[3 * iR + 1], R3[3 * iR], mod.n, mod.ninv); @@ -244,7 +244,7 @@ _nmod_poly_divrem_basecase_preinv1_3(mp_ptr Q, mp_ptr R, mp_ptr W, if (lenB > 1) { - const mp_limb_t c = n_negmod(ptrQ[iR], mod.n); + const ulong c = n_negmod(ptrQ[iR], mod.n); mpn_addmul_1(R3 + 3 * (iR - lenB + 1), B3, 3 * lenB - 3, c); } iR--; @@ -257,9 +257,9 @@ _nmod_poly_divrem_basecase_preinv1_3(mp_ptr Q, mp_ptr R, mp_ptr W, } void -_nmod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, - mp_limb_t invB, +_nmod_poly_divrem_basecase_preinv1(nn_ptr Q, nn_ptr R, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, + ulong invB, nmod_t mod) { if (lenA == lenB + 1) @@ -276,12 +276,12 @@ _nmod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, } else { - mp_ptr W; + nn_ptr W; TMP_INIT; slong bits = 2 * (FLINT_BITS - mod.norm) + FLINT_BIT_COUNT(lenA - lenB + 1); TMP_START; - W = TMP_ALLOC(NMOD_DIVREM_BC_ITCH(lenA, lenB, mod)*sizeof(mp_limb_t)); + W = TMP_ALLOC(NMOD_DIVREM_BC_ITCH(lenA, lenB, mod)*sizeof(ulong)); if (bits <= FLINT_BITS) _nmod_poly_divrem_basecase_preinv1_1(Q, R, W, A, lenA, B, lenB, invB, mod); @@ -295,10 +295,10 @@ _nmod_poly_divrem_basecase_preinv1(mp_ptr Q, mp_ptr R, } void -_nmod_poly_divrem_basecase(mp_ptr Q, mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +_nmod_poly_divrem_basecase(nn_ptr Q, nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { - mp_limb_t invB; + ulong invB; invB = (B[lenB - 1] == 1) ? 1 : n_invmod(B[lenB - 1], mod.n); _nmod_poly_divrem_basecase_preinv1(Q, R, A, lenA, B, lenB, invB, mod); @@ -309,7 +309,7 @@ void nmod_poly_divrem_basecase(nmod_poly_t Q, nmod_poly_t R, { const slong lenA = A->length, lenB = B->length; nmod_poly_t tQ, tR; - mp_ptr q, r; + nn_ptr q, r; if (lenB == 0) { diff --git a/src/nmod_poly/divrem_newton_n_preinv.c b/src/nmod_poly/divrem_newton_n_preinv.c index bbf0c0d03d..fa5ee901f4 100644 --- a/src/nmod_poly/divrem_newton_n_preinv.c +++ b/src/nmod_poly/divrem_newton_n_preinv.c @@ -13,9 +13,9 @@ #include "nmod_vec.h" #include "nmod_poly.h" -void _nmod_poly_divrem_newton_n_preinv(mp_ptr Q, mp_ptr R, mp_srcptr A, - slong lenA, mp_srcptr B, slong lenB, - mp_srcptr Binv, slong lenBinv, nmod_t mod) +void _nmod_poly_divrem_newton_n_preinv(nn_ptr Q, nn_ptr R, nn_srcptr A, + slong lenA, nn_srcptr B, slong lenB, + nn_srcptr Binv, slong lenBinv, nmod_t mod) { const slong lenQ = lenA - lenB + 1; @@ -44,7 +44,7 @@ void nmod_poly_divrem_newton_n_preinv(nmod_poly_t Q, nmod_poly_t R, const nmod_poly_t Binv) { const slong lenA = A->length, lenB = B->length, lenBinv = Binv->length; - mp_ptr q, r; + nn_ptr q, r; if (lenB == 0) { diff --git a/src/nmod_poly/evaluate_mat.c b/src/nmod_poly/evaluate_mat.c index 089bac8a90..1b93acee74 100644 --- a/src/nmod_poly/evaluate_mat.c +++ b/src/nmod_poly/evaluate_mat.c @@ -14,7 +14,7 @@ #include "nmod_mat.h" void -nmod_mat_one_addmul(nmod_mat_t dest, const nmod_mat_t mat, mp_limb_t c) +nmod_mat_one_addmul(nmod_mat_t dest, const nmod_mat_t mat, ulong c) { slong i, j; @@ -40,7 +40,7 @@ nmod_mat_one_addmul(nmod_mat_t dest, const nmod_mat_t mat, mp_limb_t c) } void -_nmod_poly_evaluate_mat_horner(nmod_mat_t dest, mp_srcptr poly, slong len, const nmod_mat_t c) +_nmod_poly_evaluate_mat_horner(nmod_mat_t dest, nn_srcptr poly, slong len, const nmod_mat_t c) { slong m = len-1; nmod_mat_t temp; diff --git a/src/nmod_poly/evaluate_nmod.c b/src/nmod_poly/evaluate_nmod.c index 730f8d222b..edb32bcbc6 100644 --- a/src/nmod_poly/evaluate_nmod.c +++ b/src/nmod_poly/evaluate_nmod.c @@ -12,11 +12,11 @@ #include "ulong_extras.h" #include "nmod_poly.h" -mp_limb_t -_nmod_poly_evaluate_nmod(mp_srcptr poly, slong len, mp_limb_t c, nmod_t mod) +ulong +_nmod_poly_evaluate_nmod(nn_srcptr poly, slong len, ulong c, nmod_t mod) { slong m; - mp_limb_t val; + ulong val; if (len == 0) return 0; @@ -38,8 +38,8 @@ _nmod_poly_evaluate_nmod(mp_srcptr poly, slong len, mp_limb_t c, nmod_t mod) return val; } -mp_limb_t -nmod_poly_evaluate_nmod(const nmod_poly_t poly, mp_limb_t c) +ulong +nmod_poly_evaluate_nmod(const nmod_poly_t poly, ulong c) { return _nmod_poly_evaluate_nmod(poly->coeffs, poly->length, c, poly->mod); } diff --git a/src/nmod_poly/evaluate_nmod_vec.c b/src/nmod_poly/evaluate_nmod_vec.c index 62da2692cb..58ff01ddea 100644 --- a/src/nmod_poly/evaluate_nmod_vec.c +++ b/src/nmod_poly/evaluate_nmod_vec.c @@ -14,8 +14,8 @@ #include "nmod_poly.h" void -_nmod_poly_evaluate_nmod_vec(mp_ptr ys, mp_srcptr coeffs, slong len, - mp_srcptr xs, slong n, nmod_t mod) +_nmod_poly_evaluate_nmod_vec(nn_ptr ys, nn_srcptr coeffs, slong len, + nn_srcptr xs, slong n, nmod_t mod) { if (len < 32) _nmod_poly_evaluate_nmod_vec_iter(ys, coeffs, len, xs, n, mod); @@ -24,16 +24,16 @@ _nmod_poly_evaluate_nmod_vec(mp_ptr ys, mp_srcptr coeffs, slong len, } void -nmod_poly_evaluate_nmod_vec(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n) +nmod_poly_evaluate_nmod_vec(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n) { _nmod_poly_evaluate_nmod_vec(ys, poly->coeffs, poly->length, xs, n, poly->mod); } /* This gives some speedup for small lengths. */ -static inline void _nmod_poly_rem_2(mp_ptr r, mp_srcptr a, slong al, - mp_srcptr b, slong bl, nmod_t mod) +static inline void _nmod_poly_rem_2(nn_ptr r, nn_srcptr a, slong al, + nn_srcptr b, slong bl, nmod_t mod) { if (al == 2) r[0] = nmod_sub(a[0], nmod_mul(a[1], b[0], mod), mod); @@ -42,13 +42,13 @@ static inline void _nmod_poly_rem_2(mp_ptr r, mp_srcptr a, slong al, } void -_nmod_poly_evaluate_nmod_vec_fast_precomp(mp_ptr vs, mp_srcptr poly, - slong plen, const mp_ptr * tree, slong len, nmod_t mod) +_nmod_poly_evaluate_nmod_vec_fast_precomp(nn_ptr vs, nn_srcptr poly, + slong plen, const nn_ptr * tree, slong len, nmod_t mod) { slong height, i, j, pow, left; slong tree_height; slong tlen; - mp_ptr t, u, swap, pa, pb, pc; + nn_ptr t, u, swap, pa, pb, pc; /* avoid worrying about some degenerate cases */ if (len < 2 || plen < 2) @@ -120,10 +120,10 @@ _nmod_poly_evaluate_nmod_vec_fast_precomp(mp_ptr vs, mp_srcptr poly, _nmod_vec_clear(u); } -void _nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, mp_srcptr poly, slong plen, - mp_srcptr xs, slong n, nmod_t mod) +void _nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, nn_srcptr poly, slong plen, + nn_srcptr xs, slong n, nmod_t mod) { - mp_ptr * tree; + nn_ptr * tree; tree = _nmod_poly_tree_alloc(n); _nmod_poly_tree_build(tree, xs, n, mod); @@ -132,16 +132,16 @@ void _nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, mp_srcptr poly, slong plen, } void -nmod_poly_evaluate_nmod_vec_fast(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n) +nmod_poly_evaluate_nmod_vec_fast(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n) { _nmod_poly_evaluate_nmod_vec_fast(ys, poly->coeffs, poly->length, xs, n, poly->mod); } void -_nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, mp_srcptr coeffs, slong len, - mp_srcptr xs, slong n, nmod_t mod) +_nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, nn_srcptr coeffs, slong len, + nn_srcptr xs, slong n, nmod_t mod) { slong i; for (i = 0; i < n; i++) @@ -149,8 +149,8 @@ _nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, mp_srcptr coeffs, slong len, } void -nmod_poly_evaluate_nmod_vec_iter(mp_ptr ys, - const nmod_poly_t poly, mp_srcptr xs, slong n) +nmod_poly_evaluate_nmod_vec_iter(nn_ptr ys, + const nmod_poly_t poly, nn_srcptr xs, slong n) { _nmod_poly_evaluate_nmod_vec_iter(ys, poly->coeffs, poly->length, xs, n, poly->mod); diff --git a/src/nmod_poly/exp_series.c b/src/nmod_poly/exp_series.c index cdd6518b30..0c98bc8a08 100644 --- a/src/nmod_poly/exp_series.c +++ b/src/nmod_poly/exp_series.c @@ -14,7 +14,7 @@ #include "gr_poly.h" void -_nmod_poly_exp_series(mp_ptr f, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_exp_series(nn_ptr f, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); @@ -23,7 +23,7 @@ _nmod_poly_exp_series(mp_ptr f, mp_srcptr h, slong hlen, slong n, nmod_t mod) /* todo: gr version */ void -_nmod_poly_exp_expinv_series(mp_ptr f, mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_exp_expinv_series(nn_ptr f, nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { _nmod_poly_exp_series(f, h, hlen, n, mod); _nmod_poly_inv_series(g, f, n, n, mod); diff --git a/src/nmod_poly/find_distinct_nonzero_roots.c b/src/nmod_poly/find_distinct_nonzero_roots.c index 2da99c5e3b..a135de5c3e 100644 --- a/src/nmod_poly/find_distinct_nonzero_roots.c +++ b/src/nmod_poly/find_distinct_nonzero_roots.c @@ -57,10 +57,10 @@ void _nmod_poly_split_rabin( The modulus of P is assumed to be prime. */ int nmod_poly_find_distinct_nonzero_roots( - mp_limb_t * roots, + ulong * roots, const nmod_poly_t P) { - mp_limb_t a0, a1; + ulong a0, a1; int success; slong i, roots_idx, sp; nmod_poly_struct * a , * b; diff --git a/src/nmod_poly/gcd.c b/src/nmod_poly/gcd.c index 33af5a3c74..d9c886fb60 100644 --- a/src/nmod_poly/gcd.c +++ b/src/nmod_poly/gcd.c @@ -17,8 +17,8 @@ #include "nmod_poly.h" #include "gr_poly.h" -slong _nmod_poly_gcd(mp_ptr G, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_gcd(nn_ptr G, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; @@ -39,7 +39,7 @@ void nmod_poly_gcd(nmod_poly_t G, { slong lenA = A->length, lenB = B->length, lenG; nmod_poly_t tG; - mp_ptr g; + nn_ptr g; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -80,13 +80,13 @@ void nmod_poly_gcd(nmod_poly_t G, } } -slong _nmod_poly_gcd_euclidean(mp_ptr G, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_gcd_euclidean(nn_ptr G, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { slong steps; slong lenR1, lenR2 = 0, lenG = 0; - mp_ptr F, R1, R2, R3 = G, T; + nn_ptr F, R1, R2, R3 = G, T; if (lenB == 1) { @@ -160,7 +160,7 @@ void nmod_poly_gcd_euclidean(nmod_poly_t G, { slong lenA = A->length, lenB = B->length, lenG; nmod_poly_t tG; - mp_ptr g; + nn_ptr g; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -201,8 +201,8 @@ void nmod_poly_gcd_euclidean(nmod_poly_t G, } } -slong _nmod_poly_gcd_hgcd(mp_ptr G, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_gcd_hgcd(nn_ptr G, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; slong lenG = 0; @@ -223,7 +223,7 @@ void nmod_poly_gcd_hgcd(nmod_poly_t G, { slong lenA = A->length, lenB = B->length, lenG; nmod_poly_t tG; - mp_ptr g; + nn_ptr g; if (lenA == 0) /* lenA = lenB = 0 */ { diff --git a/src/nmod_poly/gcdinv.c b/src/nmod_poly/gcdinv.c index cc11c92c46..99b0171885 100644 --- a/src/nmod_poly/gcdinv.c +++ b/src/nmod_poly/gcdinv.c @@ -15,12 +15,12 @@ #include "nmod_vec.h" #include "nmod_poly.h" -slong _nmod_poly_gcdinv(mp_limb_t *G, mp_limb_t *S, - const mp_limb_t *A, slong lenA, - const mp_limb_t *B, slong lenB, +slong _nmod_poly_gcdinv(ulong *G, ulong *S, + const ulong *A, slong lenA, + const ulong *B, slong lenB, const nmod_t mod) { - mp_limb_t *T; + ulong *T; slong ans; T = _nmod_vec_init(lenA - 1); @@ -59,7 +59,7 @@ void nmod_poly_gcdinv(nmod_poly_t G, nmod_poly_t S, } else { - mp_limb_t *g, *s; + ulong *g, *s; slong lenG; if (G == A || G == B) @@ -105,7 +105,7 @@ void nmod_poly_gcdinv(nmod_poly_t G, nmod_poly_t S, if (nmod_poly_lead(G)[0] != WORD(1)) { - mp_limb_t inv; + ulong inv; inv = n_invmod(nmod_poly_lead(G)[0], A->mod.n); nmod_poly_scalar_mul_nmod(G, G, inv); diff --git a/src/nmod_poly/hgcd.c b/src/nmod_poly/hgcd.c index 22510746bf..bdb4326e26 100644 --- a/src/nmod_poly/hgcd.c +++ b/src/nmod_poly/hgcd.c @@ -18,9 +18,9 @@ XXX: Currently supports aliasing between {A,a} and {B,b}. */ -slong _nmod_poly_hgcd(mp_ptr *M, slong *lenM, - mp_ptr A, slong *lenA, mp_ptr B, slong *lenB, - mp_srcptr a, slong lena, mp_srcptr b, slong lenb, +slong _nmod_poly_hgcd(nn_ptr *M, slong *lenM, + nn_ptr A, slong *lenA, nn_ptr B, slong *lenB, + nn_srcptr a, slong lena, nn_srcptr b, slong lenb, nmod_t mod) { slong sgnM; @@ -113,7 +113,7 @@ slong nmod_poly_hgcd( nmod_poly_t A, nmod_poly_t B, const nmod_poly_t a, const nmod_poly_t b) { - mp_limb_t * M[4]; + ulong * M[4]; slong lenM[4]; slong sgnM; diff --git a/src/nmod_poly/inflate.c b/src/nmod_poly/inflate.c index 774bbfb316..2556e5b7ab 100644 --- a/src/nmod_poly/inflate.c +++ b/src/nmod_poly/inflate.c @@ -21,7 +21,7 @@ nmod_poly_inflate(nmod_poly_t result, const nmod_poly_t input, slong inflation) } else if (inflation == 0) { - mp_limb_t v = nmod_poly_evaluate_nmod(input, 1); + ulong v = nmod_poly_evaluate_nmod(input, 1); nmod_poly_zero(result); nmod_poly_set_coeff_ui(result, 0, v); } diff --git a/src/nmod_poly/init.c b/src/nmod_poly/init.c index 12d1d85608..e13f228733 100644 --- a/src/nmod_poly/init.c +++ b/src/nmod_poly/init.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -nmod_poly_init_preinv(nmod_poly_t poly, mp_limb_t n, mp_limb_t ninv) +nmod_poly_init_preinv(nmod_poly_t poly, ulong n, ulong ninv) { poly->coeffs = NULL; @@ -27,17 +27,17 @@ nmod_poly_init_preinv(nmod_poly_t poly, mp_limb_t n, mp_limb_t ninv) } void -nmod_poly_init(nmod_poly_t poly, mp_limb_t n) +nmod_poly_init(nmod_poly_t poly, ulong n) { nmod_poly_init_preinv(poly, n, n_preinvert_limb(n)); } void nmod_poly_init2_preinv(nmod_poly_t poly, - mp_limb_t n, mp_limb_t ninv, slong alloc) + ulong n, ulong ninv, slong alloc) { if (alloc) - poly->coeffs = (mp_ptr) flint_malloc(alloc * sizeof(mp_limb_t)); + poly->coeffs = (nn_ptr) flint_malloc(alloc * sizeof(ulong)); else poly->coeffs = NULL; @@ -51,7 +51,7 @@ nmod_poly_init2_preinv(nmod_poly_t poly, } void -nmod_poly_init2(nmod_poly_t poly, mp_limb_t n, slong alloc) +nmod_poly_init2(nmod_poly_t poly, ulong n, slong alloc) { nmod_poly_init2_preinv(poly, n, n_preinvert_limb(n), alloc); } diff --git a/src/nmod_poly/integral.c b/src/nmod_poly/integral.c index 0af07e553a..4c8e18317c 100644 --- a/src/nmod_poly/integral.c +++ b/src/nmod_poly/integral.c @@ -12,12 +12,12 @@ #include "ulong_extras.h" #include "nmod_poly.h" -void _nmod_poly_integral(mp_ptr res, mp_srcptr poly, slong len, nmod_t mod) +void _nmod_poly_integral(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) { if (len > 2) { slong k; - mp_limb_t t, u; + ulong t, u; res[len - 1] = poly[len - 2]; t = len - 1; diff --git a/src/nmod_poly/interpolate_nmod_vec.c b/src/nmod_poly/interpolate_nmod_vec.c index 1e4c834d7f..8b551c39e9 100644 --- a/src/nmod_poly/interpolate_nmod_vec.c +++ b/src/nmod_poly/interpolate_nmod_vec.c @@ -14,8 +14,8 @@ #include "nmod_poly.h" void -_nmod_poly_interpolate_nmod_vec(mp_ptr poly, - mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +_nmod_poly_interpolate_nmod_vec(nn_ptr poly, + nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) { if (n < 6) _nmod_poly_interpolate_nmod_vec_newton(poly, xs, ys, n, mod); @@ -27,7 +27,7 @@ _nmod_poly_interpolate_nmod_vec(mp_ptr poly, void nmod_poly_interpolate_nmod_vec(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n) + nn_srcptr xs, nn_srcptr ys, slong n) { if (n == 0) { @@ -44,10 +44,10 @@ nmod_poly_interpolate_nmod_vec(nmod_poly_t poly, } void -_nmod_poly_interpolate_nmod_vec_barycentric(mp_ptr poly, - mp_srcptr xs, mp_srcptr ys, slong n, nmod_t mod) +_nmod_poly_interpolate_nmod_vec_barycentric(nn_ptr poly, + nn_srcptr xs, nn_srcptr ys, slong n, nmod_t mod) { - mp_ptr P, Q, w; + nn_ptr P, Q, w; slong i, j; if (n == 1) @@ -89,7 +89,7 @@ _nmod_poly_interpolate_nmod_vec_barycentric(mp_ptr poly, void nmod_poly_interpolate_nmod_vec_barycentric(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n) + nn_srcptr xs, nn_srcptr ys, slong n) { if (n == 0) { @@ -106,9 +106,9 @@ nmod_poly_interpolate_nmod_vec_barycentric(nmod_poly_t poly, } void -_nmod_poly_interpolation_weights(mp_ptr w, const mp_ptr * tree, slong len, nmod_t mod) +_nmod_poly_interpolation_weights(nn_ptr w, const nn_ptr * tree, slong len, nmod_t mod) { - mp_ptr tmp; + nn_ptr tmp; slong i, n, height; if (len == 0) @@ -137,10 +137,10 @@ _nmod_poly_interpolation_weights(mp_ptr w, const mp_ptr * tree, slong len, nmod_ } void -_nmod_poly_interpolate_nmod_vec_fast_precomp(mp_ptr poly, mp_srcptr ys, - const mp_ptr * tree, mp_srcptr weights, slong len, nmod_t mod) +_nmod_poly_interpolate_nmod_vec_fast_precomp(nn_ptr poly, nn_srcptr ys, + const nn_ptr * tree, nn_srcptr weights, slong len, nmod_t mod) { - mp_ptr t, u, pa, pb; + nn_ptr t, u, pa, pb; slong i, pow, left; if (len == 0) @@ -184,11 +184,11 @@ _nmod_poly_interpolate_nmod_vec_fast_precomp(mp_ptr poly, mp_srcptr ys, void -_nmod_poly_interpolate_nmod_vec_fast(mp_ptr poly, - mp_srcptr xs, mp_srcptr ys, slong len, nmod_t mod) +_nmod_poly_interpolate_nmod_vec_fast(nn_ptr poly, + nn_srcptr xs, nn_srcptr ys, slong len, nmod_t mod) { - mp_ptr * tree; - mp_ptr w; + nn_ptr * tree; + nn_ptr w; tree = _nmod_poly_tree_alloc(len); _nmod_poly_tree_build(tree, xs, len, mod); @@ -204,7 +204,7 @@ _nmod_poly_interpolate_nmod_vec_fast(mp_ptr poly, void nmod_poly_interpolate_nmod_vec_fast(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n) + nn_srcptr xs, nn_srcptr ys, slong n) { if (n == 0) { @@ -221,9 +221,9 @@ nmod_poly_interpolate_nmod_vec_fast(nmod_poly_t poly, } static void -_interpolate_newton(mp_ptr ys, mp_srcptr xs, slong n, nmod_t mod) +_interpolate_newton(nn_ptr ys, nn_srcptr xs, slong n, nmod_t mod) { - mp_limb_t p, q, t; + ulong p, q, t; slong i, j; for (i = 1; i < n; i++) @@ -242,9 +242,9 @@ _interpolate_newton(mp_ptr ys, mp_srcptr xs, slong n, nmod_t mod) } static void -_newton_to_monomial(mp_ptr ys, mp_srcptr xs, slong n, nmod_t mod) +_newton_to_monomial(nn_ptr ys, nn_srcptr xs, slong n, nmod_t mod) { - mp_limb_t t; + ulong t; slong i, j; for (i = n - 2; i >= 0; i--) @@ -266,8 +266,8 @@ _newton_to_monomial(mp_ptr ys, mp_srcptr xs, slong n, nmod_t mod) } void -_nmod_poly_interpolate_nmod_vec_newton(mp_ptr poly, mp_srcptr xs, - mp_srcptr ys, slong n, nmod_t mod) +_nmod_poly_interpolate_nmod_vec_newton(nn_ptr poly, nn_srcptr xs, + nn_srcptr ys, slong n, nmod_t mod) { if (n == 1) { @@ -284,7 +284,7 @@ _nmod_poly_interpolate_nmod_vec_newton(mp_ptr poly, mp_srcptr xs, void nmod_poly_interpolate_nmod_vec_newton(nmod_poly_t poly, - mp_srcptr xs, mp_srcptr ys, slong n) + nn_srcptr xs, nn_srcptr ys, slong n) { if (n == 0) { diff --git a/src/nmod_poly/inv_series.c b/src/nmod_poly/inv_series.c index 69691b1fe7..edd460a2be 100644 --- a/src/nmod_poly/inv_series.c +++ b/src/nmod_poly/inv_series.c @@ -17,7 +17,7 @@ #include "gr_poly.h" void -_nmod_poly_inv_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, mp_limb_t q, nmod_t mod) +_nmod_poly_inv_series_basecase_preinv1(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, ulong q, nmod_t mod) { Qlen = FLINT_MIN(Qlen, n); @@ -31,7 +31,7 @@ _nmod_poly_inv_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slo { slong i, j, l; int nlimbs; - mp_limb_t s; + ulong s; nlimbs = _nmod_vec_dot_bound_limbs(FLINT_MIN(n, Qlen), mod); @@ -49,9 +49,9 @@ _nmod_poly_inv_series_basecase_preinv1(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slo } void -_nmod_poly_inv_series_basecase(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +_nmod_poly_inv_series_basecase(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) { - mp_limb_t q; + ulong q; q = Q[0]; if (q != 1) @@ -61,7 +61,7 @@ _nmod_poly_inv_series_basecase(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nm } void -_nmod_poly_inv_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +_nmod_poly_inv_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) { Qlen = FLINT_MIN(Qlen, n); @@ -138,7 +138,7 @@ nmod_poly_inv_series_basecase(nmod_poly_t Qinv, const nmod_poly_t Q, slong n) } void -_nmod_poly_inv_series_newton(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +_nmod_poly_inv_series_newton(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) { _nmod_poly_inv_series(Qinv, Q, Qlen, n, mod); } diff --git a/src/nmod_poly/invmod.c b/src/nmod_poly/invmod.c index 128e0dea9c..9e34b0b279 100644 --- a/src/nmod_poly/invmod.c +++ b/src/nmod_poly/invmod.c @@ -15,11 +15,11 @@ #include "nmod_vec.h" #include "nmod_poly.h" -int _nmod_poly_invmod(mp_limb_t *A, - const mp_limb_t *B, slong lenB, - const mp_limb_t *P, slong lenP, const nmod_t mod) +int _nmod_poly_invmod(ulong *A, + const ulong *B, slong lenB, + const ulong *P, slong lenP, const nmod_t mod) { - mp_limb_t *G; + ulong *G; slong lenG; NMOD_VEC_NORM(B, lenB); @@ -30,7 +30,7 @@ int _nmod_poly_invmod(mp_limb_t *A, if (lenG == 1 && G[0] != WORD(1)) { - mp_limb_t invG; + ulong invG; invG = n_invmod(G[0], mod.n); _nmod_vec_scalar_mul_nmod(A, A, lenP - 1, invG, mod); @@ -45,7 +45,7 @@ int nmod_poly_invmod(nmod_poly_t A, const nmod_poly_t B, const nmod_poly_t P) { const slong lenB = B->length, lenP = P->length; - mp_limb_t *t; + ulong *t; int ans; if (lenP < 2) diff --git a/src/nmod_poly/invsqrt_series.c b/src/nmod_poly/invsqrt_series.c index b1f10b1105..36444923a8 100644 --- a/src/nmod_poly/invsqrt_series.c +++ b/src/nmod_poly/invsqrt_series.c @@ -14,7 +14,7 @@ #include "gr_poly.h" void -_nmod_poly_invsqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_invsqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/io.c b/src/nmod_poly/io.c index e6ab031761..93c3fbe300 100644 --- a/src/nmod_poly/io.c +++ b/src/nmod_poly/io.c @@ -118,7 +118,7 @@ int nmod_poly_print_pretty(const nmod_poly_t a, const char * x) { return nmod_po int nmod_poly_fread(FILE * f, nmod_poly_t poly) { slong i, length; - mp_limb_t n; + ulong n; if (flint_fscanf(f, "%wd %wu", &length, &n) != 2) return 0; diff --git a/src/nmod_poly/log_series.c b/src/nmod_poly/log_series.c index 35b4ef4603..e4e421d1a5 100644 --- a/src/nmod_poly/log_series.c +++ b/src/nmod_poly/log_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_log_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_log_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/make_monic.c b/src/nmod_poly/make_monic.c index 4b1de19144..9988683483 100644 --- a/src/nmod_poly/make_monic.c +++ b/src/nmod_poly/make_monic.c @@ -14,10 +14,10 @@ #include "nmod_vec.h" #include "nmod_poly.h" -void _nmod_poly_make_monic(mp_ptr output, - mp_srcptr input, slong len, nmod_t mod) +void _nmod_poly_make_monic(nn_ptr output, + nn_srcptr input, slong len, nmod_t mod) { - mp_limb_t inv; + ulong inv; inv = n_invmod(input[len - 1], mod.n); _nmod_vec_scalar_mul_nmod(output, input, len, inv, mod); diff --git a/src/nmod_poly/mul.c b/src/nmod_poly/mul.c index 78829197de..e9ce0cc028 100644 --- a/src/nmod_poly/mul.c +++ b/src/nmod_poly/mul.c @@ -33,8 +33,8 @@ static const short fft_sqr_tab[] = {1420, 1420, 1353, 964, 689, 569, 407, 353, 3 #endif -void _nmod_poly_mul(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +void _nmod_poly_mul(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { slong bits, cutoff_len; diff --git a/src/nmod_poly/mul_KS.c b/src/nmod_poly/mul_KS.c index f0924f7657..2801c1b67c 100644 --- a/src/nmod_poly/mul_KS.c +++ b/src/nmod_poly/mul_KS.c @@ -14,11 +14,11 @@ #include "nmod_poly.h" void -_nmod_poly_mul_KS(mp_ptr out, mp_srcptr in1, slong len1, - mp_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod) +_nmod_poly_mul_KS(nn_ptr out, nn_srcptr in1, slong len1, + nn_srcptr in2, slong len2, flint_bitcnt_t bits, nmod_t mod) { slong len_out = len1 + len2 - 1, limbs1, limbs2; - mp_ptr tmp, mpn1, mpn2, res; + nn_ptr tmp, mpn1, mpn2, res; int squaring; TMP_INIT; @@ -46,7 +46,7 @@ _nmod_poly_mul_KS(mp_ptr out, mp_srcptr in1, slong len1, limbs2 = (len2 * bits - 1) / FLINT_BITS + 1; TMP_START; - tmp = TMP_ALLOC(sizeof(mp_limb_t) * (limbs1 + limbs2 + limbs1 + (squaring ? 0 : limbs2))); + tmp = TMP_ALLOC(sizeof(ulong) * (limbs1 + limbs2 + limbs1 + (squaring ? 0 : limbs2))); res = tmp; mpn1 = tmp + limbs1 + limbs2; mpn2 = squaring ? mpn1 : (mpn1 + limbs1); diff --git a/src/nmod_poly/mul_KS2.c b/src/nmod_poly/mul_KS2.c index a286023b80..ad192d08fc 100644 --- a/src/nmod_poly/mul_KS2.c +++ b/src/nmod_poly/mul_KS2.c @@ -18,15 +18,15 @@ Multiplication/squaring using Kronecker substitution at 2^b and -2^b. */ void -_nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, - mp_srcptr op2, slong n2, nmod_t mod) +_nmod_poly_mul_KS2(nn_ptr res, nn_srcptr op1, slong n1, + nn_srcptr op2, slong n2, nmod_t mod) { int sqr, v3m_neg; ulong bits, b, w; slong n1o, n1e, n2o, n2e, n3o, n3e, n3, k1, k2, k3; - mp_ptr v1_buf0, v2_buf0, v1_buf1, v2_buf1, v1_buf2, v2_buf2; - mp_ptr v1o, v1e, v1p, v1m, v2o, v2e, v2p, v2m, v3o, v3e, v3p, v3m; - mp_ptr z; + nn_ptr v1_buf0, v2_buf0, v1_buf1, v2_buf1, v1_buf2, v2_buf2; + nn_ptr v1o, v1e, v1p, v1m, v2o, v2e, v2p, v2m, v3o, v3e, v3p, v3m; + nn_ptr z; TMP_INIT; if (n2 == 1) @@ -77,7 +77,7 @@ _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, k3 = k1 + k2; /* allocate space */ - v1_buf0 = TMP_ALLOC(sizeof(mp_limb_t) * 3 * k3); /* k1 limbs */ + v1_buf0 = TMP_ALLOC(sizeof(ulong) * 3 * k3); /* k1 limbs */ v2_buf0 = v1_buf0 + k1; /* k2 limbs */ v1_buf1 = v2_buf0 + k2; /* k1 limbs */ v2_buf1 = v1_buf1 + k1; /* k2 limbs */ @@ -101,7 +101,7 @@ _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, v3e = v1_buf2; v3o = v1_buf0; - z = TMP_ALLOC(sizeof(mp_limb_t) * w * n3e); + z = TMP_ALLOC(sizeof(ulong) * w * n3e); if (!sqr) { @@ -126,8 +126,8 @@ _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, compute |f1(-B)| = |f1e(B^2) - B * f1o(B^2)| and |f2(-B)| = |f2e(B^2) - B * f2o(B^2)| */ - v3m_neg = signed_mpn_sub_n(v1m, v1e, v1o, k1); - v3m_neg ^= signed_mpn_sub_n(v2m, v2e, v2o, k2); + v3m_neg = flint_mpn_signed_sub_n(v1m, v1e, v1o, k1); + v3m_neg ^= flint_mpn_signed_sub_n(v2m, v2e, v2o, k2); /* compute h(B) = f1(B) * f2(B) @@ -149,7 +149,7 @@ _nmod_poly_mul_KS2(mp_ptr res, mp_srcptr op1, slong n1, mpn_add_n(v1p, v1e, v1o, k1); /* compute |f1(-B)| = |f1e(B^2) - B * f1o(B^2)| */ - signed_mpn_sub_n(v1m, v1e, v1o, k1); + flint_mpn_signed_sub_n(v1m, v1e, v1o, k1); /* compute h(B) = f1(B)^2 diff --git a/src/nmod_poly/mul_KS4.c b/src/nmod_poly/mul_KS4.c index a92eb36d5a..30131a7a38 100644 --- a/src/nmod_poly/mul_KS4.c +++ b/src/nmod_poly/mul_KS4.c @@ -19,16 +19,16 @@ 2^(-b) and -2^(-b). */ void -_nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, - mp_srcptr op2, slong n2, nmod_t mod) +_nmod_poly_mul_KS4(nn_ptr res, nn_srcptr op1, slong n1, + nn_srcptr op2, slong n2, nmod_t mod) { int sqr, v3m_neg; ulong bits, b, w, a1, a2, a3; slong n1o, n1e, n2o, n2e, n3o, n3e, n3, k1, k2, k3; - mp_ptr v1_buf0, v2_buf0, v1_buf1, v2_buf1, v1_buf2, v2_buf2, v1_buf3, v2_buf3, v1_buf4, v2_buf4; - mp_ptr v1on, v1en, v1pn, v1mn, v2on, v2en, v2pn, v2mn, v3on, v3en, v3pn, v3mn; - mp_ptr v1or, v1er, v1pr, v1mr, v2or, v2er, v2pr, v2mr, v3or, v3er, v3pr, v3mr; - mp_ptr z, zn, zr; + nn_ptr v1_buf0, v2_buf0, v1_buf1, v2_buf1, v1_buf2, v2_buf2, v1_buf3, v2_buf3, v1_buf4, v2_buf4; + nn_ptr v1on, v1en, v1pn, v1mn, v2on, v2en, v2pn, v2mn, v3on, v3en, v3pn, v3mn; + nn_ptr v1or, v1er, v1pr, v1mr, v2or, v2er, v2pr, v2mr, v3or, v3er, v3pr, v3mr; + nn_ptr z, zn, zr; TMP_INIT; if (n2 == 1) @@ -82,7 +82,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, k3 = k1 + k2; /* allocate space */ - v1_buf0 = TMP_ALLOC(sizeof(mp_limb_t) * 5 * k3); /* k1 limbs */ + v1_buf0 = TMP_ALLOC(sizeof(ulong) * 5 * k3); /* k1 limbs */ v2_buf0 = v1_buf0 + k1; /* k2 limbs */ v1_buf1 = v2_buf0 + k2; /* k1 limbs */ v2_buf1 = v1_buf1 + k1; /* k2 limbs */ @@ -124,7 +124,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, v3er = v1_buf2; v3or = v1_buf3; - z = TMP_ALLOC(sizeof(mp_limb_t) * 2*w*(n3e + 1)); + z = TMP_ALLOC(sizeof(ulong) * 2*w*(n3e + 1)); zn = z; zr = z + w*(n3e + 1); @@ -150,7 +150,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, and |f1(-B)| = |f1e(B^2) - B * f1o(B^2)| */ mpn_add_n (v1pn, v1en, v1on, k1); - v3m_neg = signed_mpn_sub_n(v1mn, v1en, v1on, k1); + v3m_neg = flint_mpn_signed_sub_n(v1mn, v1en, v1on, k1); /* evaluate f2e(B^2) and B * f2o(B^2) */ _nmod_poly_KS2_pack(v2en, op2, n2e, 2, 2 * b, 0, k2); @@ -161,7 +161,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, and |f2(-B)| = |f2e(B^2) - B * f2o(B^2)| */ mpn_add_n(v2pn, v2en, v2on, k2); - v3m_neg ^= signed_mpn_sub_n(v2mn, v2en, v2on, k2); + v3m_neg ^= flint_mpn_signed_sub_n(v2mn, v2en, v2on, k2); /* compute h(B) = f1(B) * f2(B) @@ -184,7 +184,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, and |f1(-B)| = |f1e(B^2) - B * f1o(B^2)| */ mpn_add_n (v1pn, v1en, v1on, k1); - signed_mpn_sub_n(v1mn, v1en, v1on, k1); + flint_mpn_signed_sub_n(v1mn, v1en, v1on, k1); /* compute h(B) = f1(B)^2 @@ -246,7 +246,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, |B^(n1-1) * f1e(1/B^2) - B^(n1-2) * f1o(1/B^2)| */ mpn_add_n(v1pr, v1er, v1or, k1); - v3m_neg = signed_mpn_sub_n(v1mr, v1er, v1or, k1); + v3m_neg = flint_mpn_signed_sub_n(v1mr, v1er, v1or, k1); /* evaluate B^(n2-1) * f2e(1/B^2) and B^(n2-2) * f2o(1/B^2) */ _nmod_poly_KS2_pack(v2er, op2 + 2*(n2e - 1), n2e, -2, 2 * b, a2, k2); @@ -259,7 +259,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, |B^(n2-1) * f2e(1/B^2) - B^(n2-2) * f2o(1/B^2)| */ mpn_add_n (v2pr, v2er, v2or, k2); - v3m_neg ^= signed_mpn_sub_n(v2mr, v2er, v2or, k2); + v3m_neg ^= flint_mpn_signed_sub_n(v2mr, v2er, v2or, k2); /* compute B^(n3-1) * h(1/B) = @@ -286,7 +286,7 @@ _nmod_poly_mul_KS4(mp_ptr res, mp_srcptr op1, slong n1, |B^(n1-1) * f1e(1/B^2) - B^(n1-2) * f1o(1/B^2)| */ mpn_add_n(v1pr, v1er, v1or, k1); - signed_mpn_sub_n(v1mr, v1er, v1or, k1); + flint_mpn_signed_sub_n(v1mr, v1er, v1or, k1); /* compute B^(n3-1) * h(1/B) = (B^(n1-1) * f1(1/B))^2 diff --git a/src/nmod_poly/mul_classical.c b/src/nmod_poly/mul_classical.c index 1f5e5d70e5..eddb86f38a 100644 --- a/src/nmod_poly/mul_classical.c +++ b/src/nmod_poly/mul_classical.c @@ -17,12 +17,12 @@ /* Assumes poly1 and poly2 are not length 0. */ void -_nmod_poly_mul_classical(mp_ptr res, mp_srcptr poly1, - slong len1, mp_srcptr poly2, slong len2, nmod_t mod) +_nmod_poly_mul_classical(nn_ptr res, nn_srcptr poly1, + slong len1, nn_srcptr poly2, slong len2, nmod_t mod) { slong i, j, bits, log_len, nlimbs, n1, n2; int squaring; - mp_limb_t c; + ulong c; if (len1 == 1) { @@ -63,7 +63,7 @@ _nmod_poly_mul_classical(mp_ptr res, mp_srcptr poly1, { for (i = 0; i < len1; i++) { - mp_limb_t c = poly1[i]; + ulong c = poly1[i]; for (j = 0; j < len2; j++) res[i + j] += c * poly2[j]; diff --git a/src/nmod_poly/mulhigh.c b/src/nmod_poly/mulhigh.c index 6ea063ebc0..a25a1d3ae0 100644 --- a/src/nmod_poly/mulhigh.c +++ b/src/nmod_poly/mulhigh.c @@ -11,8 +11,8 @@ #include "nmod_poly.h" -void _nmod_poly_mulhigh(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod) +void _nmod_poly_mulhigh(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod) { slong bits, bits2; diff --git a/src/nmod_poly/mulhigh_classical.c b/src/nmod_poly/mulhigh_classical.c index e161d1a318..cd0d510f3e 100644 --- a/src/nmod_poly/mulhigh_classical.c +++ b/src/nmod_poly/mulhigh_classical.c @@ -9,14 +9,15 @@ (at your option) any later version. See . */ +#include #include "ulong_extras.h" #include "nmod_vec.h" #include "nmod_poly.h" /* Assumes poly1 and poly2 are not length 0. */ void -_nmod_poly_mulhigh_classical(mp_ptr res, mp_srcptr poly1, - slong len1, mp_srcptr poly2, slong len2, slong start, +_nmod_poly_mulhigh_classical(nn_ptr res, nn_srcptr poly1, + slong len1, nn_srcptr poly2, slong len2, slong start, nmod_t mod) { slong m, n; diff --git a/src/nmod_poly/mullow.c b/src/nmod_poly/mullow.c index 44d92a1027..2026b3889e 100644 --- a/src/nmod_poly/mullow.c +++ b/src/nmod_poly/mullow.c @@ -29,8 +29,8 @@ static const short fft_mullow_tab[] = {1115, 1115, 597, 569, 407, 321, 306, 279, #endif -void _nmod_poly_mullow(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod) +void _nmod_poly_mullow(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod) { slong bits; diff --git a/src/nmod_poly/mullow_KS.c b/src/nmod_poly/mullow_KS.c index 29227441de..40a3054c88 100644 --- a/src/nmod_poly/mullow_KS.c +++ b/src/nmod_poly/mullow_KS.c @@ -14,11 +14,11 @@ #include "nmod_poly.h" void -_nmod_poly_mullow_KS(mp_ptr out, mp_srcptr in1, slong len1, - mp_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod) +_nmod_poly_mullow_KS(nn_ptr out, nn_srcptr in1, slong len1, + nn_srcptr in2, slong len2, flint_bitcnt_t bits, slong n, nmod_t mod) { slong limbs1, limbs2; - mp_ptr tmp, mpn1, mpn2, res; + nn_ptr tmp, mpn1, mpn2, res; int squaring; TMP_INIT; @@ -48,7 +48,7 @@ _nmod_poly_mullow_KS(mp_ptr out, mp_srcptr in1, slong len1, limbs2 = (len2 * bits - 1) / FLINT_BITS + 1; TMP_START; - tmp = TMP_ALLOC(sizeof(mp_limb_t) * (limbs1 + limbs2 + limbs1 + (squaring ? 0 : limbs2))); + tmp = TMP_ALLOC(sizeof(ulong) * (limbs1 + limbs2 + limbs1 + (squaring ? 0 : limbs2))); res = tmp; mpn1 = tmp + limbs1 + limbs2; mpn2 = squaring ? mpn1 : (mpn1 + limbs1); diff --git a/src/nmod_poly/mullow_classical.c b/src/nmod_poly/mullow_classical.c index f850299dfc..fe4b5fee2c 100644 --- a/src/nmod_poly/mullow_classical.c +++ b/src/nmod_poly/mullow_classical.c @@ -17,12 +17,12 @@ /* Assumes poly1 and poly2 are not length 0 and 0 < n <= len1 + len2 - 1 */ void -_nmod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, slong n, nmod_t mod) +_nmod_poly_mullow_classical(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, slong n, nmod_t mod) { slong i, j, bits, log_len, nlimbs, n1, n2; int squaring; - mp_limb_t c; + ulong c; len1 = FLINT_MIN(len1, n); len2 = FLINT_MIN(len2, n); @@ -68,7 +68,7 @@ _nmod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, { for (i = 0; i < len1; i++) { - mp_limb_t c = poly1[i]; + ulong c = poly1[i]; for (j = 0; j < FLINT_MIN(len2, n - i); j++) res[i + j] += c * poly2[j]; diff --git a/src/nmod_poly/mulmod.c b/src/nmod_poly/mulmod.c index ad3dd82913..3f228e824c 100644 --- a/src/nmod_poly/mulmod.c +++ b/src/nmod_poly/mulmod.c @@ -12,11 +12,11 @@ #include "nmod_vec.h" #include "nmod_poly.h" -void _nmod_poly_mulmod(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, mp_srcptr f, +void _nmod_poly_mulmod(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nn_srcptr f, slong lenf, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ; lenT = len1 + len2 - 1; @@ -39,7 +39,7 @@ nmod_poly_mulmod(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t poly2, const nmod_poly_t f) { slong len1, len2, lenf; - mp_ptr fcoeffs; + nn_ptr fcoeffs; lenf = f->length; len1 = poly1->length; @@ -60,7 +60,7 @@ nmod_poly_mulmod(nmod_poly_t res, { if (f == res) { - fcoeffs = flint_malloc(sizeof(mp_limb_t) * lenf); + fcoeffs = flint_malloc(sizeof(ulong) * lenf); _nmod_vec_set(fcoeffs, f->coeffs, lenf); } else diff --git a/src/nmod_poly/mulmod_preinv.c b/src/nmod_poly/mulmod_preinv.c index ddd7a1ac52..a94aa22b68 100644 --- a/src/nmod_poly/mulmod_preinv.c +++ b/src/nmod_poly/mulmod_preinv.c @@ -13,11 +13,11 @@ #include "nmod_vec.h" #include "nmod_poly.h" -void _nmod_poly_mulmod_preinv(mp_ptr res, mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, mp_srcptr f, - slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +void _nmod_poly_mulmod_preinv(nn_ptr res, nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nn_srcptr f, + slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ; lenT = len1 + len2 - 1; @@ -42,7 +42,7 @@ nmod_poly_mulmod_preinv(nmod_poly_t res, const nmod_poly_t poly1, const nmod_poly_t finv) { slong len1, len2, lenf; - mp_ptr fcoeffs; + nn_ptr fcoeffs; lenf = f->length; len1 = poly1->length; @@ -68,7 +68,7 @@ nmod_poly_mulmod_preinv(nmod_poly_t res, const nmod_poly_t poly1, { if (f == res) { - fcoeffs = (mp_ptr) flint_malloc(sizeof(mp_limb_t) * lenf); + fcoeffs = (nn_ptr) flint_malloc(sizeof(ulong) * lenf); _nmod_vec_set(fcoeffs, f->coeffs, lenf); } else fcoeffs = f->coeffs; diff --git a/src/nmod_poly/pow.c b/src/nmod_poly/pow.c index 2f38030e32..ea27453347 100644 --- a/src/nmod_poly/pow.c +++ b/src/nmod_poly/pow.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_pow(mp_ptr res, mp_srcptr poly, slong len, ulong e, nmod_t mod) +_nmod_poly_pow(nn_ptr res, nn_srcptr poly, slong len, ulong e, nmod_t mod) { _nmod_poly_pow_binexp(res, poly, len, e, mod); } diff --git a/src/nmod_poly/pow_binexp.c b/src/nmod_poly/pow_binexp.c index 0d7d944e20..8aa868c3d2 100644 --- a/src/nmod_poly/pow_binexp.c +++ b/src/nmod_poly/pow_binexp.c @@ -15,13 +15,13 @@ #include "nmod_poly.h" void -_nmod_poly_pow_binexp(mp_ptr res, mp_srcptr poly, slong len, ulong e, nmod_t mod) +_nmod_poly_pow_binexp(nn_ptr res, nn_srcptr poly, slong len, ulong e, nmod_t mod) { ulong bit = ~((~UWORD(0)) >> 1); slong rlen; slong alloc = (slong) e * (len - 1) + 1; - mp_ptr v = _nmod_vec_init(alloc); - mp_ptr R, S, T; + nn_ptr v = _nmod_vec_init(alloc); + nn_ptr R, S, T; /* Set bits to the bitmask with a 1 one place lower than the msb of e diff --git a/src/nmod_poly/pow_trunc.c b/src/nmod_poly/pow_trunc.c index 32cc664916..add57838cb 100644 --- a/src/nmod_poly/pow_trunc.c +++ b/src/nmod_poly/pow_trunc.c @@ -16,7 +16,7 @@ #include "nmod_poly.h" void -_nmod_poly_pow_trunc(mp_ptr res, mp_srcptr poly, +_nmod_poly_pow_trunc(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod) { _nmod_poly_pow_trunc_binexp(res, poly, e, trunc, mod); @@ -27,7 +27,7 @@ nmod_poly_pow_trunc(nmod_poly_t res, const nmod_poly_t poly, ulong e, slong trunc) { const slong len = poly->length; - mp_ptr p; + nn_ptr p; int pcopy = 0; if (len < 2 || e < UWORD(3) || trunc == 0) @@ -90,12 +90,12 @@ nmod_poly_pow_trunc(nmod_poly_t res, } void -_nmod_poly_pow_trunc_binexp(mp_ptr res, mp_srcptr poly, +_nmod_poly_pow_trunc_binexp(nn_ptr res, nn_srcptr poly, ulong e, slong trunc, nmod_t mod) { ulong bit = ~((~UWORD(0)) >> 1); - mp_ptr v = _nmod_vec_init(trunc); - mp_ptr R, S, T; + nn_ptr v = _nmod_vec_init(trunc); + nn_ptr R, S, T; /* Set bits to the bitmask with a 1 one place lower than the msb of e @@ -169,7 +169,7 @@ nmod_poly_pow_trunc_binexp(nmod_poly_t res, const nmod_poly_t poly, ulong e, slong trunc) { const slong len = poly->length; - mp_ptr p; + nn_ptr p; int pcopy = 0; if (len < 2 || e < UWORD(3) || trunc == 0) diff --git a/src/nmod_poly/power_sums.c b/src/nmod_poly/power_sums.c index f9c6e67713..1f0f65fceb 100644 --- a/src/nmod_poly/power_sums.c +++ b/src/nmod_poly/power_sums.c @@ -13,7 +13,7 @@ #include "nmod_poly.h" void -_nmod_poly_power_sums(mp_ptr res, mp_srcptr poly, slong len, slong n, +_nmod_poly_power_sums(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) { if (10 * n >= len + 75) @@ -80,7 +80,7 @@ nmod_poly_power_sums(nmod_poly_t res, const nmod_poly_t poly, slong n) /* todo: should use dot products */ void -_nmod_poly_power_sums_naive(mp_ptr res, mp_srcptr poly, slong len, slong n, +_nmod_poly_power_sums_naive(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) { slong i, k; @@ -151,12 +151,12 @@ nmod_poly_power_sums_naive(nmod_poly_t res, const nmod_poly_t poly, slong n) } void -_nmod_poly_power_sums_schoenhage(mp_ptr res, mp_srcptr poly, slong len, +_nmod_poly_power_sums_schoenhage(nn_ptr res, nn_srcptr poly, slong len, slong n, nmod_t mod) { - mp_ptr a, b; + nn_ptr a, b; - a = (mp_ptr) flint_malloc((2 * len - 1) * sizeof(mp_limb_t)); + a = (nn_ptr) flint_malloc((2 * len - 1) * sizeof(ulong)); b = a + len; _nmod_poly_reverse(a, poly, len, len); diff --git a/src/nmod_poly/power_sums_to_poly.c b/src/nmod_poly/power_sums_to_poly.c index f036357688..507d9f69b3 100644 --- a/src/nmod_poly/power_sums_to_poly.c +++ b/src/nmod_poly/power_sums_to_poly.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_power_sums_to_poly(mp_ptr res, mp_srcptr poly, slong len, +_nmod_poly_power_sums_to_poly(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) { if (mod.n <= 12 || poly[0] <= 10) @@ -58,7 +58,7 @@ nmod_poly_power_sums_to_poly(nmod_poly_t res, const nmod_poly_t Q) /* todo: should use dot products */ void -_nmod_poly_power_sums_to_poly_naive(mp_ptr res, mp_srcptr poly, slong len, +_nmod_poly_power_sums_to_poly_naive(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) { slong i, k; @@ -121,16 +121,16 @@ nmod_poly_power_sums_to_poly_naive(nmod_poly_t res, const nmod_poly_t Q) } void -_nmod_poly_power_sums_to_poly_schoenhage(mp_ptr res, mp_srcptr poly, slong len, +_nmod_poly_power_sums_to_poly_schoenhage(nn_ptr res, nn_srcptr poly, slong len, nmod_t mod) { - mp_ptr t; + nn_ptr t; slong d = poly[0]; if (len >= d + 1) len = d + 1; - t = flint_malloc(len * sizeof(mp_limb_t)); + t = flint_malloc(len * sizeof(ulong)); _nmod_vec_neg(t, poly + 1, len - 1, mod); _nmod_poly_integral(t, t, len, mod); diff --git a/src/nmod_poly/powers_mod.c b/src/nmod_poly/powers_mod.c index 0413f058e2..2706da8dcb 100644 --- a/src/nmod_poly/powers_mod.c +++ b/src/nmod_poly/powers_mod.c @@ -23,9 +23,9 @@ typedef struct slong n; slong glen; slong ginvlen; - mp_srcptr g; - mp_srcptr ginv; - mp_ptr * res; + nn_srcptr g; + nn_srcptr ginv; + nn_ptr * res; nmod_t mod; #if FLINT_USES_PTHREAD pthread_mutex_t * mutex; @@ -38,8 +38,8 @@ _nmod_poly_powers_mod_preinv_worker(void * arg_ptr) powers_preinv_arg_t arg = *((powers_preinv_arg_t *) arg_ptr); slong i, j, k = arg.k, n = arg.n; slong glen = arg.glen, ginvlen = arg.ginvlen; - mp_ptr * res = arg.res; - mp_srcptr g = arg.g, ginv = arg.ginv; + nn_ptr * res = arg.res; + nn_srcptr g = arg.g, ginv = arg.ginv; const nmod_t mod = arg.mod; while (1) @@ -78,9 +78,9 @@ _nmod_poly_powers_mod_preinv_worker(void * arg_ptr) {ginv, ginvlen} must be set to the power series inverse of the reverse of g */ void -_nmod_poly_powers_mod_preinv_threaded_pool(mp_ptr * res, mp_srcptr f, - slong flen, slong n, mp_srcptr g, slong glen, - mp_srcptr ginv, slong ginvlen, const nmod_t mod, +_nmod_poly_powers_mod_preinv_threaded_pool(nn_ptr * res, nn_srcptr f, + slong flen, slong n, nn_srcptr g, slong glen, + nn_srcptr ginv, slong ginvlen, const nmod_t mod, thread_pool_handle * threads, slong num_threads) { slong i, k, shared_j = 0; @@ -165,9 +165,9 @@ _nmod_poly_powers_mod_preinv_threaded_pool(mp_ptr * res, mp_srcptr f, } void -_nmod_poly_powers_mod_preinv_threaded(mp_ptr * res, mp_srcptr f, - slong flen, slong n, mp_srcptr g, slong glen, - mp_srcptr ginv, slong ginvlen, const nmod_t mod) +_nmod_poly_powers_mod_preinv_threaded(nn_ptr * res, nn_srcptr f, + slong flen, slong n, nn_srcptr g, slong glen, + nn_srcptr ginv, slong ginvlen, const nmod_t mod) { thread_pool_handle * threads; slong num_threads = flint_request_threads(&threads, flint_get_num_threads()); @@ -185,7 +185,7 @@ nmod_poly_powers_mod_bsgs(nmod_poly_struct * res, slong i; nmod_poly_t ginv; - mp_ptr * res_arr; + nn_ptr * res_arr; if (nmod_poly_length(g) == 0) { @@ -219,7 +219,7 @@ nmod_poly_powers_mod_bsgs(nmod_poly_struct * res, return; } - res_arr = (mp_ptr *) flint_malloc(n*sizeof(mp_ptr)); + res_arr = (nn_ptr *) flint_malloc(n*sizeof(nn_ptr)); nmod_poly_init_mod(ginv, g->mod); for (i = 0; i < n; i++) @@ -250,8 +250,8 @@ nmod_poly_powers_mod_bsgs(nmod_poly_struct * res, {ginv, ginvlen} must be set to the power series inverse of the reverse of g */ void -_nmod_poly_powers_mod_preinv_naive(mp_ptr * res, mp_srcptr f, slong flen, slong n, - mp_srcptr g, slong glen, mp_srcptr ginv, slong ginvlen, const nmod_t mod) +_nmod_poly_powers_mod_preinv_naive(nn_ptr * res, nn_srcptr f, slong flen, slong n, + nn_srcptr g, slong glen, nn_srcptr ginv, slong ginvlen, const nmod_t mod) { slong i; @@ -296,7 +296,7 @@ nmod_poly_powers_mod_naive(nmod_poly_struct * res, const nmod_poly_t f, slong i; nmod_poly_t ginv; - mp_ptr * res_arr; + nn_ptr * res_arr; if (nmod_poly_length(g) == 0) { @@ -330,7 +330,7 @@ nmod_poly_powers_mod_naive(nmod_poly_struct * res, const nmod_poly_t f, return; } - res_arr = (mp_ptr *) flint_malloc(n*sizeof(mp_ptr)); + res_arr = (nn_ptr *) flint_malloc(n*sizeof(nn_ptr)); nmod_poly_init_mod(ginv, g->mod); for (i = 0; i < n; i++) diff --git a/src/nmod_poly/powmod_binexp.c b/src/nmod_poly/powmod_binexp.c index 4014f38e72..f27fc3c2c0 100644 --- a/src/nmod_poly/powmod_binexp.c +++ b/src/nmod_poly/powmod_binexp.c @@ -18,10 +18,10 @@ #include "fmpz.h" void -_nmod_poly_powmod_fmpz_binexp(mp_ptr res, mp_srcptr poly, - fmpz_t e, mp_srcptr f, slong lenf, nmod_t mod) +_nmod_poly_powmod_fmpz_binexp(nn_ptr res, nn_srcptr poly, + fmpz_t e, nn_srcptr f, slong lenf, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ; slong bits, i; @@ -80,7 +80,7 @@ void nmod_poly_powmod_fmpz_binexp(nmod_poly_t res, const nmod_poly_t poly, fmpz_t e, const nmod_poly_t f) { - mp_ptr p; + nn_ptr p; slong len = poly->length; slong lenf = f->length; slong trunc = lenf - 1; @@ -174,10 +174,10 @@ nmod_poly_powmod_fmpz_binexp(nmod_poly_t res, } void -_nmod_poly_powmod_ui_binexp(mp_ptr res, mp_srcptr poly, - ulong e, mp_srcptr f, slong lenf, nmod_t mod) +_nmod_poly_powmod_ui_binexp(nn_ptr res, nn_srcptr poly, + ulong e, nn_srcptr f, slong lenf, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ; int i; @@ -216,7 +216,7 @@ nmod_poly_powmod_ui_binexp(nmod_poly_t res, const nmod_poly_t poly, ulong e, const nmod_poly_t f) { - mp_ptr p; + nn_ptr p; slong len = poly->length; slong lenf = f->length; slong trunc = lenf - 1; diff --git a/src/nmod_poly/powmod_binexp_preinv.c b/src/nmod_poly/powmod_binexp_preinv.c index 4a0047c3b5..55bd2d1c9f 100644 --- a/src/nmod_poly/powmod_binexp_preinv.c +++ b/src/nmod_poly/powmod_binexp_preinv.c @@ -19,10 +19,10 @@ #include "fmpz.h" void -_nmod_poly_powmod_fmpz_binexp_preinv (mp_ptr res, mp_srcptr poly, fmpz_t e, - mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +_nmod_poly_powmod_fmpz_binexp_preinv (nn_ptr res, nn_srcptr poly, fmpz_t e, + nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ; slong i, bits; @@ -83,7 +83,7 @@ void nmod_poly_powmod_fmpz_binexp_preinv(nmod_poly_t res, const nmod_poly_t poly, fmpz_t e, const nmod_poly_t f, const nmod_poly_t finv) { - mp_ptr p; + nn_ptr p; slong len = poly->length; slong lenf = f->length; slong trunc = lenf - 1; @@ -178,10 +178,10 @@ nmod_poly_powmod_fmpz_binexp_preinv(nmod_poly_t res, const nmod_poly_t poly, } void -_nmod_poly_powmod_ui_binexp_preinv(mp_ptr res, mp_srcptr poly, ulong e, - mp_srcptr f, slong lenf, mp_srcptr finv, slong lenfinv, nmod_t mod) +_nmod_poly_powmod_ui_binexp_preinv(nn_ptr res, nn_srcptr poly, ulong e, + nn_srcptr f, slong lenf, nn_srcptr finv, slong lenfinv, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ, i; if (lenf == 2) @@ -219,7 +219,7 @@ void nmod_poly_powmod_ui_binexp_preinv(nmod_poly_t res, const nmod_poly_t poly, ulong e, const nmod_poly_t f, const nmod_poly_t finv) { - mp_ptr p; + nn_ptr p; slong len = poly->length; slong lenf = f->length; slong trunc = lenf - 1; diff --git a/src/nmod_poly/powmod_x_preinv.c b/src/nmod_poly/powmod_x_preinv.c index 23bbc861c1..b12266ac72 100644 --- a/src/nmod_poly/powmod_x_preinv.c +++ b/src/nmod_poly/powmod_x_preinv.c @@ -19,10 +19,10 @@ #include "fmpz.h" void -_nmod_poly_powmod_x_fmpz_preinv (mp_ptr res, fmpz_t e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod) +_nmod_poly_powmod_x_fmpz_preinv (nn_ptr res, fmpz_t e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ, window; slong i, l, c; @@ -190,10 +190,10 @@ nmod_poly_powmod_x_fmpz_preinv(nmod_poly_t res, fmpz_t e, const nmod_poly_t f, } void -_nmod_poly_powmod_x_ui_preinv(mp_ptr res, ulong e, mp_srcptr f, slong lenf, - mp_srcptr finv, slong lenfinv, nmod_t mod) +_nmod_poly_powmod_x_ui_preinv(nn_ptr res, ulong e, nn_srcptr f, slong lenf, + nn_srcptr finv, slong lenfinv, nmod_t mod) { - mp_ptr T, Q; + nn_ptr T, Q; slong lenT, lenQ, window; int i, l, c; diff --git a/src/nmod_poly/product_roots_nmod_vec.c b/src/nmod_poly/product_roots_nmod_vec.c index 835924458f..11c1dd3618 100644 --- a/src/nmod_poly/product_roots_nmod_vec.c +++ b/src/nmod_poly/product_roots_nmod_vec.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_product_roots_nmod_vec(mp_ptr poly, mp_srcptr xs, slong n, nmod_t mod) +_nmod_poly_product_roots_nmod_vec(nn_ptr poly, nn_srcptr xs, slong n, nmod_t mod) { if (n == 0) { @@ -45,7 +45,7 @@ _nmod_poly_product_roots_nmod_vec(mp_ptr poly, mp_srcptr xs, slong n, nmod_t mod else { const slong m = (n + 1) / 2; - mp_ptr tmp; + nn_ptr tmp; tmp = _nmod_vec_init(n + 2); @@ -58,7 +58,7 @@ _nmod_poly_product_roots_nmod_vec(mp_ptr poly, mp_srcptr xs, slong n, nmod_t mod } void -nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, mp_srcptr xs, slong n) +nmod_poly_product_roots_nmod_vec(nmod_poly_t poly, nn_srcptr xs, slong n) { nmod_poly_fit_length(poly, n + 1); _nmod_poly_product_roots_nmod_vec(poly->coeffs, xs, n, poly->mod); diff --git a/src/nmod_poly/profile/p-evaluate_mat.c b/src/nmod_poly/profile/p-evaluate_mat.c index 48415d2bb9..9c2d3e0f55 100644 --- a/src/nmod_poly/profile/p-evaluate_mat.c +++ b/src/nmod_poly/profile/p-evaluate_mat.c @@ -29,7 +29,7 @@ main(void) int result; nmod_mat_t A, B, C; nmod_poly_t poly; - mp_limb_t n; + ulong n; clock_t horner_begin, paterson_begin; double horner_time, paterson_time; FLINT_TEST_INIT(state); diff --git a/src/nmod_poly/profile/p-gcd.c b/src/nmod_poly/profile/p-gcd.c index 6380b151ab..5942725465 100644 --- a/src/nmod_poly/profile/p-gcd.c +++ b/src/nmod_poly/profile/p-gcd.c @@ -28,7 +28,7 @@ int main(void) { - mp_limb_t p[] = {17ul, 2147483659ul, 9223372036854775837ul}; + ulong p[] = {17ul, 2147483659ul, 9223372036854775837ul}; const slong degs[] = { 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360, 380, 400, 420, 440, 460, 480, 500, 520, 540, 560, 580, 600, diff --git a/src/nmod_poly/profile/p-mul.c b/src/nmod_poly/profile/p-mul.c index 2397a0b0f2..820cdb06f1 100644 --- a/src/nmod_poly/profile/p-mul.c +++ b/src/nmod_poly/profile/p-mul.c @@ -26,7 +26,7 @@ void sample(void * arg, ulong count) slong scale; nmod_poly_t a, b, c; - mp_limb_t m; + ulong m; FLINT_TEST_INIT(state); diff --git a/src/nmod_poly/profile/p-mulmod.c b/src/nmod_poly/profile/p-mulmod.c index 3f045f0268..5ffce8e522 100644 --- a/src/nmod_poly/profile/p-mulmod.c +++ b/src/nmod_poly/profile/p-mulmod.c @@ -29,7 +29,7 @@ void sample(void * arg, ulong count) slong scale; nmod_poly_t a, b, c, d, dinv; - mp_limb_t m; + ulong m; FLINT_TEST_INIT(state); diff --git a/src/nmod_poly/realloc.c b/src/nmod_poly/realloc.c index 7e60c25b90..93e2d21291 100644 --- a/src/nmod_poly/realloc.c +++ b/src/nmod_poly/realloc.c @@ -25,7 +25,7 @@ nmod_poly_realloc(nmod_poly_t poly, slong alloc) return; } - poly->coeffs = (mp_ptr) flint_realloc(poly->coeffs, alloc * sizeof(mp_limb_t)); + poly->coeffs = (nn_ptr) flint_realloc(poly->coeffs, alloc * sizeof(ulong)); poly->alloc = alloc; diff --git a/src/nmod_poly/rem.c b/src/nmod_poly/rem.c index cbc9baa5fa..395e82b06d 100644 --- a/src/nmod_poly/rem.c +++ b/src/nmod_poly/rem.c @@ -13,12 +13,12 @@ #include "nmod.h" #include "nmod_poly.h" -void _nmod_poly_rem_q1(mp_ptr R, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, +void _nmod_poly_rem_q1(nn_ptr R, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) { slong i; - mp_limb_t invL, t, q0, q1, t2, t1, t0, s1, s0; + ulong invL, t, q0, q1, t2, t1, t0, s1, s0; FLINT_ASSERT(lenA == lenB + 1); invL = (B[lenB-1] == 1) ? 1 : n_invmod(B[lenB-1], mod.n); @@ -76,8 +76,8 @@ void _nmod_poly_rem_q1(mp_ptr R, } } -void _nmod_poly_rem(mp_ptr R, mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +void _nmod_poly_rem(nn_ptr R, nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { if (lenA - lenB == 1) { @@ -85,11 +85,11 @@ void _nmod_poly_rem(mp_ptr R, mp_srcptr A, slong lenA, } else if (lenB >= 2) { - mp_ptr Q; + nn_ptr Q; TMP_INIT; TMP_START; - Q = TMP_ALLOC((lenA - lenB + 1) * sizeof(mp_limb_t)); + Q = TMP_ALLOC((lenA - lenB + 1) * sizeof(ulong)); _nmod_poly_divrem(Q, R, A, lenA, B, lenB, mod); TMP_END; } @@ -99,7 +99,7 @@ void nmod_poly_rem(nmod_poly_t R, const nmod_poly_t A, const nmod_poly_t B) { const slong lenA = A->length, lenB = B->length; nmod_poly_t tR; - mp_ptr r; + nn_ptr r; if (lenB == 0) { diff --git a/src/nmod_poly/resultant.c b/src/nmod_poly/resultant.c index 78e7f5d030..d8fbe4a210 100644 --- a/src/nmod_poly/resultant.c +++ b/src/nmod_poly/resultant.c @@ -17,9 +17,9 @@ #include "nmod_poly.h" #include "gr_poly.h" -mp_limb_t -_nmod_poly_resultant(mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +ulong +_nmod_poly_resultant(nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { if (poly1 == poly2) { @@ -32,7 +32,7 @@ _nmod_poly_resultant(mp_srcptr poly1, slong len1, else { slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; - mp_limb_t res; + ulong res; gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); @@ -49,12 +49,12 @@ _nmod_poly_resultant(mp_srcptr poly1, slong len1, } } -mp_limb_t +ulong nmod_poly_resultant(const nmod_poly_t f, const nmod_poly_t g) { const slong len1 = f->length; const slong len2 = g->length; - mp_limb_t r; + ulong r; if (len1 == 0 || len2 == 0) { @@ -77,9 +77,9 @@ nmod_poly_resultant(const nmod_poly_t f, const nmod_poly_t g) return r; } -mp_limb_t -_nmod_poly_resultant_euclidean(mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +ulong +_nmod_poly_resultant_euclidean(nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { if (poly1 == poly2) { @@ -102,11 +102,11 @@ _nmod_poly_resultant_euclidean(mp_srcptr poly1, slong len1, } else /* len1 >= len2 >= 2 */ { - mp_limb_t res = 1; + ulong res = 1; - mp_ptr u, v, r, t, w; + nn_ptr u, v, r, t, w; slong l0, l1, l2; - mp_limb_t lc; + ulong lc; w = _nmod_vec_init(3 * len1); u = w; @@ -165,12 +165,12 @@ _nmod_poly_resultant_euclidean(mp_srcptr poly1, slong len1, } } -mp_limb_t +ulong nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g) { const slong len1 = f->length; const slong len2 = g->length; - mp_limb_t r; + ulong r; if (len1 == 0 || len2 == 0) { @@ -196,13 +196,13 @@ nmod_poly_resultant_euclidean(const nmod_poly_t f, const nmod_poly_t g) return r; } -mp_limb_t -_nmod_poly_resultant_hgcd(mp_srcptr poly1, slong len1, - mp_srcptr poly2, slong len2, nmod_t mod) +ulong +_nmod_poly_resultant_hgcd(nn_srcptr poly1, slong len1, + nn_srcptr poly2, slong len2, nmod_t mod) { gr_ctx_t ctx; slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; - mp_limb_t res; + ulong res; _gr_ctx_init_nmod(ctx, &mod); GR_MUST_SUCCEED(_gr_poly_resultant_hgcd(&res, poly1, len1, poly2, len2, NMOD_POLY_HGCD_CUTOFF, cutoff, ctx)); @@ -210,12 +210,12 @@ _nmod_poly_resultant_hgcd(mp_srcptr poly1, slong len1, return res; } -mp_limb_t +ulong nmod_poly_resultant_hgcd(const nmod_poly_t f, const nmod_poly_t g) { const slong len1 = f->length; const slong len2 = g->length; - mp_limb_t r; + ulong r; if (len1 == 0 || len2 == 0) { diff --git a/src/nmod_poly/reverse.c b/src/nmod_poly/reverse.c index 1cc30c22a5..43fb9ad5c9 100644 --- a/src/nmod_poly/reverse.c +++ b/src/nmod_poly/reverse.c @@ -11,10 +11,10 @@ #include "nmod_poly.h" -void _nmod_poly_reverse(mp_ptr output, mp_srcptr input, slong len, slong m) +void _nmod_poly_reverse(nn_ptr output, nn_srcptr input, slong len, slong m) { slong i, min; - mp_limb_t temp; + ulong temp; if (input != output) { diff --git a/src/nmod_poly/revert_series.c b/src/nmod_poly/revert_series.c index d1184b22b7..c1a4427190 100644 --- a/src/nmod_poly/revert_series.c +++ b/src/nmod_poly/revert_series.c @@ -17,7 +17,7 @@ #include "gr_poly.h" void -_nmod_poly_revert_series(mp_ptr Qinv, mp_srcptr Q, slong Qlen, slong n, nmod_t mod) +_nmod_poly_revert_series(nn_ptr Qinv, nn_srcptr Q, slong Qlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/scalar.c b/src/nmod_poly/scalar.c index 81f315cec7..3b8ae7c2ec 100644 --- a/src/nmod_poly/scalar.c +++ b/src/nmod_poly/scalar.c @@ -32,7 +32,7 @@ void nmod_poly_scalar_addmul_nmod(nmod_poly_t A, const nmod_poly_t B, ulong x) } void -nmod_poly_scalar_mul_nmod(nmod_poly_t res, const nmod_poly_t poly1, mp_limb_t c) +nmod_poly_scalar_mul_nmod(nmod_poly_t res, const nmod_poly_t poly1, ulong c) { if ((poly1->length == 0) || (c == 0)) { diff --git a/src/nmod_poly/set_str.c b/src/nmod_poly/set_str.c index 5180b215da..d972691a8d 100644 --- a/src/nmod_poly/set_str.c +++ b/src/nmod_poly/set_str.c @@ -17,7 +17,7 @@ int nmod_poly_set_str(nmod_poly_t poly, const char * s) { const char * whitespace = " \t\n\r"; slong i, length; - mp_limb_t n; + ulong n; if (flint_sscanf(s, "%wd %wu", &length, &n) != 2) return 0; diff --git a/src/nmod_poly/shift_left_right.c b/src/nmod_poly/shift_left_right.c index 907006177c..5cf60e6cfd 100644 --- a/src/nmod_poly/shift_left_right.c +++ b/src/nmod_poly/shift_left_right.c @@ -12,7 +12,7 @@ #include "mpn_extras.h" #include "nmod_poly.h" -void _nmod_poly_shift_left(mp_ptr res, mp_srcptr poly, slong len, slong k) +void _nmod_poly_shift_left(nn_ptr res, nn_srcptr poly, slong len, slong k) { flint_mpn_copyd(res + k, poly, len); flint_mpn_zero(res, k); @@ -33,7 +33,7 @@ void nmod_poly_shift_left(nmod_poly_t res, const nmod_poly_t poly, slong k) res->length = poly->length + k; } -void _nmod_poly_shift_right(mp_ptr res, mp_srcptr poly, slong len, slong k) +void _nmod_poly_shift_right(nn_ptr res, nn_srcptr poly, slong len, slong k) { flint_mpn_copyi(res, poly + k, len); } diff --git a/src/nmod_poly/sin_series.c b/src/nmod_poly/sin_series.c index edaaf7cd67..8ec417a6ab 100644 --- a/src/nmod_poly/sin_series.c +++ b/src/nmod_poly/sin_series.c @@ -15,9 +15,9 @@ #include "nmod_poly.h" void -_nmod_poly_sin_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) +_nmod_poly_sin_series(nn_ptr g, nn_srcptr h, slong n, nmod_t mod) { - mp_ptr t, u; + nn_ptr t, u; t = _nmod_vec_init(n); u = _nmod_vec_init(n); @@ -36,7 +36,7 @@ _nmod_poly_sin_series(mp_ptr g, mp_srcptr h, slong n, nmod_t mod) void nmod_poly_sin_series(nmod_poly_t g, const nmod_poly_t h, slong n) { - mp_ptr h_coeffs; + nn_ptr h_coeffs; slong h_len = h->length; if (h_len > 0 && h->coeffs[0] != UWORD(0)) diff --git a/src/nmod_poly/sinh_series.c b/src/nmod_poly/sinh_series.c index db0e396c87..b9f3c4a66f 100644 --- a/src/nmod_poly/sinh_series.c +++ b/src/nmod_poly/sinh_series.c @@ -15,9 +15,9 @@ #include "nmod_poly.h" void -_nmod_poly_sinh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) +_nmod_poly_sinh_series(nn_ptr f, nn_srcptr h, slong n, nmod_t mod) { - mp_ptr g = _nmod_vec_init(n); + nn_ptr g = _nmod_vec_init(n); _nmod_poly_exp_expinv_series(f, g, h, n, n, mod); _nmod_vec_sub(f, f, g, n, mod); _nmod_vec_scalar_mul_nmod(f, f, n, n_invmod(UWORD(2), mod.n), mod); @@ -27,7 +27,7 @@ _nmod_poly_sinh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) void nmod_poly_sinh_series(nmod_poly_t g, const nmod_poly_t h, slong n) { - mp_ptr g_coeffs, h_coeffs; + nn_ptr g_coeffs, h_coeffs; nmod_poly_t t1; slong h_len; diff --git a/src/nmod_poly/sqrt.c b/src/nmod_poly/sqrt.c index eddb158749..44fb8ac97f 100644 --- a/src/nmod_poly/sqrt.c +++ b/src/nmod_poly/sqrt.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" static inline -int _nmod_poly_sqrt_2(mp_ptr s, mp_srcptr p, slong len) +int _nmod_poly_sqrt_2(nn_ptr s, nn_srcptr p, slong len) { slong i; @@ -29,12 +29,12 @@ int _nmod_poly_sqrt_2(mp_ptr s, mp_srcptr p, slong len) } int -_nmod_poly_sqrt(mp_ptr s, mp_srcptr p, slong len, nmod_t mod) +_nmod_poly_sqrt(nn_ptr s, nn_srcptr p, slong len, nmod_t mod) { slong slen; int result; - mp_ptr t; - mp_limb_t c, d; + nn_ptr t; + ulong c, d; if (len % 2 == 0) return len == 0; diff --git a/src/nmod_poly/sqrt_series.c b/src/nmod_poly/sqrt_series.c index f3e899e45f..cf4d116eeb 100644 --- a/src/nmod_poly/sqrt_series.c +++ b/src/nmod_poly/sqrt_series.c @@ -14,7 +14,7 @@ #include "gr_poly.h" void -_nmod_poly_sqrt_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_sqrt_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/sub.c b/src/nmod_poly/sub.c index a79a2cb0c7..b5920aa2f7 100644 --- a/src/nmod_poly/sub.c +++ b/src/nmod_poly/sub.c @@ -15,7 +15,7 @@ #include "nmod_poly.h" void -_nmod_poly_sub(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, +_nmod_poly_sub(nn_ptr res, nn_srcptr poly1, slong len1, nn_srcptr poly2, slong len2, nmod_t mod) { slong i, min = FLINT_MIN(len1, len2); diff --git a/src/nmod_poly/tan_series.c b/src/nmod_poly/tan_series.c index a737e5e86c..0e94d42de5 100644 --- a/src/nmod_poly/tan_series.c +++ b/src/nmod_poly/tan_series.c @@ -13,7 +13,7 @@ #include "gr_poly.h" void -_nmod_poly_tan_series(mp_ptr g, mp_srcptr h, slong hlen, slong n, nmod_t mod) +_nmod_poly_tan_series(nn_ptr g, nn_srcptr h, slong hlen, slong n, nmod_t mod) { gr_ctx_t ctx; _gr_ctx_init_nmod(ctx, &mod); diff --git a/src/nmod_poly/tanh_series.c b/src/nmod_poly/tanh_series.c index 12d7e61554..667b5b3083 100644 --- a/src/nmod_poly/tanh_series.c +++ b/src/nmod_poly/tanh_series.c @@ -14,9 +14,9 @@ #include "nmod_poly.h" void -_nmod_poly_tanh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) +_nmod_poly_tanh_series(nn_ptr f, nn_srcptr h, slong n, nmod_t mod) { - mp_ptr t, u; + nn_ptr t, u; t = _nmod_vec_init(n); u = _nmod_vec_init(n); @@ -35,7 +35,7 @@ _nmod_poly_tanh_series(mp_ptr f, mp_srcptr h, slong n, nmod_t mod) void nmod_poly_tanh_series(nmod_poly_t g, const nmod_poly_t h, slong n) { - mp_ptr h_coeffs; + nn_ptr h_coeffs; slong h_len = h->length; if (h_len > 0 && h->coeffs[0] != UWORD(0)) diff --git a/src/nmod_poly/taylor_shift.c b/src/nmod_poly/taylor_shift.c index 9cd7bf9391..a2a048b829 100644 --- a/src/nmod_poly/taylor_shift.c +++ b/src/nmod_poly/taylor_shift.c @@ -14,7 +14,7 @@ #include "nmod_poly.h" void -_nmod_poly_taylor_shift(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod) +_nmod_poly_taylor_shift(nn_ptr poly, ulong c, slong len, nmod_t mod) { if (len < 100 || (ulong) len > mod.n) _nmod_poly_taylor_shift_horner(poly, c, len, mod); @@ -25,7 +25,7 @@ _nmod_poly_taylor_shift(mp_ptr poly, mp_limb_t c, slong len, nmod_t mod) } void -nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) +nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, ulong c) { if (f != g) nmod_poly_set(g, f); @@ -34,11 +34,11 @@ nmod_poly_taylor_shift(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) } void -_nmod_poly_taylor_shift_convolution(mp_ptr p, mp_limb_t c, slong len, nmod_t mod) +_nmod_poly_taylor_shift_convolution(nn_ptr p, ulong c, slong len, nmod_t mod) { slong i, n = len - 1; - mp_limb_t f, d; - mp_ptr t, u; + ulong f, d; + nn_ptr t, u; if (c == 0 || len <= 1) return; @@ -92,7 +92,7 @@ _nmod_poly_taylor_shift_convolution(mp_ptr p, mp_limb_t c, slong len, nmod_t mod void nmod_poly_taylor_shift_convolution(nmod_poly_t g, const nmod_poly_t f, - mp_limb_t c) + ulong c) { if (f != g) nmod_poly_set(g, f); @@ -101,7 +101,7 @@ nmod_poly_taylor_shift_convolution(nmod_poly_t g, const nmod_poly_t f, } void -_nmod_poly_taylor_shift_horner(mp_ptr poly, mp_limb_t c, slong n, nmod_t mod) +_nmod_poly_taylor_shift_horner(nn_ptr poly, ulong c, slong n, nmod_t mod) { slong i, j; @@ -126,7 +126,7 @@ _nmod_poly_taylor_shift_horner(mp_ptr poly, mp_limb_t c, slong n, nmod_t mod) } void -nmod_poly_taylor_shift_horner(nmod_poly_t g, const nmod_poly_t f, mp_limb_t c) +nmod_poly_taylor_shift_horner(nmod_poly_t g, const nmod_poly_t f, ulong c) { if (f != g) nmod_poly_set(g, f); diff --git a/src/nmod_poly/test/main.c b/src/nmod_poly/test/main.c index cf8945cfb6..7146e0b8db 100644 --- a/src/nmod_poly/test/main.c +++ b/src/nmod_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/nmod_poly/test/t-add.c b/src/nmod_poly/test/t-add.c index 7b07322e31..8891ce2d3c 100644 --- a/src/nmod_poly/test/t-add.c +++ b/src/nmod_poly/test/t-add.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_add, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -52,7 +52,7 @@ TEST_FUNCTION_START(nmod_poly_add, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-asin_series.c b/src/nmod_poly/test/t-asin_series.c index 2c6c5bf571..ca84a46c42 100644 --- a/src/nmod_poly/test/t-asin_series.c +++ b/src/nmod_poly/test/t-asin_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_asin_series, state) { nmod_poly_t A, B, asinA, atanB; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -71,7 +71,7 @@ TEST_FUNCTION_START(nmod_poly_asin_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-asinh_series.c b/src/nmod_poly/test/t-asinh_series.c index a81e6aa066..7749747b4d 100644 --- a/src/nmod_poly/test/t-asinh_series.c +++ b/src/nmod_poly/test/t-asinh_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_asinh_series, state) { nmod_poly_t A, B, asinhA, atanhB; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -70,7 +70,7 @@ TEST_FUNCTION_START(nmod_poly_asinh_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-atan_series.c b/src/nmod_poly/test/t-atan_series.c index 77db2ad352..dc764da642 100644 --- a/src/nmod_poly/test/t-atan_series.c +++ b/src/nmod_poly/test/t-atan_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_atan_series, state) { nmod_poly_t A, B, atanA, atanB; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = 1 + n_randtest(state) % 100; @@ -72,7 +72,7 @@ TEST_FUNCTION_START(nmod_poly_atan_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-atanh_series.c b/src/nmod_poly/test/t-atanh_series.c index 99452cdcb9..8964f73809 100644 --- a/src/nmod_poly/test/t-atanh_series.c +++ b/src/nmod_poly/test/t-atanh_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_atanh_series, state) { nmod_poly_t A, B, atanhA, atanhB; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = 1 + n_randtest(state) % 100; @@ -71,7 +71,7 @@ TEST_FUNCTION_START(nmod_poly_atanh_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-berlekamp_massey.c b/src/nmod_poly/test/t-berlekamp_massey.c index e3eff150c3..b810d0c668 100644 --- a/src/nmod_poly/test/t-berlekamp_massey.c +++ b/src/nmod_poly/test/t-berlekamp_massey.c @@ -57,7 +57,7 @@ TEST_FUNCTION_START(nmod_poly_berlekamp_massey, state) nmod_berlekamp_massey_init(B2, 2); for (j = 0; j < 10; j++) { - mp_limb_t p; + ulong p; p = n_randtest_prime(state, 1); diff --git a/src/nmod_poly/test/t-bit_pack.c b/src/nmod_poly/test/t-bit_pack.c index 46326dd3a7..5dfd5108c7 100644 --- a/src/nmod_poly/test/t-bit_pack.c +++ b/src/nmod_poly/test/t-bit_pack.c @@ -22,9 +22,9 @@ TEST_FUNCTION_START(nmod_poly_bit_pack, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n; + ulong n; ulong bits; - mp_ptr mpn; + nn_ptr mpn; do { @@ -40,7 +40,7 @@ TEST_FUNCTION_START(nmod_poly_bit_pack, state) } while (a->length == 0); mpn = - flint_malloc(sizeof(mp_limb_t) * + flint_malloc(sizeof(ulong) * ((bits * a->length - 1) / FLINT_BITS + 1)); _nmod_poly_bit_pack(mpn, a->coeffs, a->length, bits); @@ -68,7 +68,7 @@ TEST_FUNCTION_START(nmod_poly_bit_pack, state) fmpz_t f; nmod_poly_t A, B; slong b; - mp_limb_t n; + ulong n; do { diff --git a/src/nmod_poly/test/t-compose.c b/src/nmod_poly/test/t-compose.c index b49f83bc3f..5c0e2b186f 100644 --- a/src/nmod_poly/test/t-compose.c +++ b/src/nmod_poly/test/t-compose.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_compose, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, r, xp1, xm1; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(r, n); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_poly_compose, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, r1, r2; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -99,7 +99,7 @@ TEST_FUNCTION_START(nmod_poly_compose, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, b, r1; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -130,7 +130,7 @@ TEST_FUNCTION_START(nmod_poly_compose, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, b, r1; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-compose_horner.c b/src/nmod_poly/test/t-compose_horner.c index df5a3673f5..def9c39883 100644 --- a/src/nmod_poly/test/t-compose_horner.c +++ b/src/nmod_poly/test/t-compose_horner.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_compose_horner, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, r, xp1, xm1; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(r, n); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_poly_compose_horner, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, r1, r2; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-compose_mod.c b/src/nmod_poly/test/t-compose_mod.c index 8b55b0949f..25da6ae491 100644 --- a/src/nmod_poly/test/t-compose_mod.c +++ b/src/nmod_poly/test/t-compose_mod.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d, e; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -94,7 +94,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -129,7 +129,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung.c b/src/nmod_poly/test/t-compose_mod_brent_kung.c index d7effe8e38..abf0761b99 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d, e; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -60,7 +60,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -96,7 +96,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -132,7 +132,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv.c b/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv.c index bad7f22fac..a81a3a306b 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv, state) { nmod_poly_t a, b, c, cinv, d, e; nmod_mat_t B; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -72,7 +72,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv, state) { nmod_poly_t a, b, c, cinv, d; nmod_mat_t B; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -117,7 +117,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv, state) { nmod_poly_t a, b, c, cinv, d; nmod_mat_t B; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -162,7 +162,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv, state) { nmod_poly_t a, b, c, cinv, d; nmod_mat_t B; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv_threaded.c b/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv_threaded.c index 2b4a051fed..1dc15f1033 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv_threaded.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung_precomp_preinv_threaded.c @@ -32,7 +32,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv_threaded, st nmod_poly_struct * tmp; nmod_mat_t B; nmod_mat_struct * C; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); slong j, num_threads; nmod_poly_matrix_precompute_arg_t * args1; thread_pool_handle * threads; @@ -140,7 +140,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_precomp_preinv_threaded, st nmod_poly_t a, b, c, cinv, d; nmod_poly_struct * res; nmod_mat_t B; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); slong j, num_threads; nmod_poly_compose_mod_precomp_preinv_arg_t * args1; thread_pool_handle * threads; diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung_preinv.c b/src/nmod_poly/test/t-compose_mod_brent_kung_preinv.c index 5224af634c..4dd5799a8e 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung_preinv.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung_preinv.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, cinv, d, e; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -66,7 +66,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, cinv, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -107,7 +107,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, cinv, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -148,7 +148,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, cinv, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -189,7 +189,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, cinv, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv.c b/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv.c index 03ee105361..7a278fde36 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_vec_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, ainv, b, c; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); slong j, k, l; nmod_poly_struct * pow, * res; diff --git a/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv_threaded.c b/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv_threaded.c index c9a6d3fe4d..0a3c555353 100644 --- a/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv_threaded.c +++ b/src/nmod_poly/test/t-compose_mod_brent_kung_vec_preinv_threaded.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_brent_kung_vec_preinv_threaded, state) for (i = 0; i < 20 * flint_test_multiplier(); i++) { nmod_poly_t a, ainv, b, c; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); slong j, k, l; nmod_poly_struct * pow, * res; diff --git a/src/nmod_poly/test/t-compose_mod_horner.c b/src/nmod_poly/test/t-compose_mod_horner.c index c7710b1cf4..ad17c46f6d 100644 --- a/src/nmod_poly/test/t-compose_mod_horner.c +++ b/src/nmod_poly/test/t-compose_mod_horner.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_horner, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d, e; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_horner, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -94,7 +94,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_horner, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); @@ -129,7 +129,7 @@ TEST_FUNCTION_START(nmod_poly_compose_mod_horner, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t m = n_randtest_prime(state, 0); + ulong m = n_randtest_prime(state, 0); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-compose_series.c b/src/nmod_poly/test/t-compose_series.c index 638fce9168..c2ffc21c07 100644 --- a/src/nmod_poly/test/t-compose_series.c +++ b/src/nmod_poly/test/t-compose_series.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_compose_series, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t m; + ulong m; slong n; m = n_randtest_prime(state, 0); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_poly_compose_series, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t m; + ulong m; slong n; m = n_randtest_prime(state, 0); @@ -92,7 +92,7 @@ TEST_FUNCTION_START(nmod_poly_compose_series, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h, s, t; - mp_limb_t m; + ulong m; slong n; m = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-cos_series.c b/src/nmod_poly/test/t-cos_series.c index bb06ffc016..b5a5c8555c 100644 --- a/src/nmod_poly/test/t-cos_series.c +++ b/src/nmod_poly/test/t-cos_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_cos_series, state) { nmod_poly_t A, cosA, sinA, B, C, one; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_cos_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-cosh_series.c b/src/nmod_poly/test/t-cosh_series.c index 7288f960df..2bb9fab6e9 100644 --- a/src/nmod_poly/test/t-cosh_series.c +++ b/src/nmod_poly/test/t-cosh_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_cosh_series, state) { nmod_poly_t A, coshA, sinhA, B, C, one; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_cosh_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-deflate.c b/src/nmod_poly/test/t-deflate.c index 7ed2bf61d3..8f3bec88d6 100644 --- a/src/nmod_poly/test/t-deflate.c +++ b/src/nmod_poly/test/t-deflate.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_deflate, state) for (iter = 0; iter < 100 * flint_test_multiplier(); iter++) { nmod_poly_t poly1, poly2, poly3; - mp_limb_t modulus; + ulong modulus; ulong infl1, infl, deflation; modulus = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-derivative.c b/src/nmod_poly/test/t-derivative.c index 22cd3161d0..d41b0975af 100644 --- a/src/nmod_poly/test/t-derivative.c +++ b/src/nmod_poly/test/t-derivative.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_derivative, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -66,7 +66,7 @@ TEST_FUNCTION_START(nmod_poly_derivative, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-discriminant.c b/src/nmod_poly/test/t-discriminant.c index 083ce418d2..687a7e141f 100644 --- a/src/nmod_poly/test/t-discriminant.c +++ b/src/nmod_poly/test/t-discriminant.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_discriminant, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t x, y, z, r; - mp_limb_t n; + ulong x, y, z, r; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -72,8 +72,8 @@ TEST_FUNCTION_START(nmod_poly_discriminant, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t f; - mp_limb_t y; - mp_limb_t n; + ulong y; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-div.c b/src/nmod_poly/test/t-div.c index 9d3b3a0874..7febb10e66 100644 --- a/src/nmod_poly/test/t-div.c +++ b/src/nmod_poly/test/t-div.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_div, state) { nmod_poly_t a, b, q, r, q2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_div, state) { nmod_poly_t a, b, q; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -100,7 +100,7 @@ TEST_FUNCTION_START(nmod_poly_div, state) { nmod_poly_t a, b, q; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-div_newton_n_preinv.c b/src/nmod_poly/test/t-div_newton_n_preinv.c index a7f8eb3451..cf990ae1a5 100644 --- a/src/nmod_poly/test/t-div_newton_n_preinv.c +++ b/src/nmod_poly/test/t-div_newton_n_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_div_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r, test; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -72,7 +72,7 @@ TEST_FUNCTION_START(nmod_poly_div_newton_n_preinv, state) { nmod_poly_t a, b, binv, q; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -116,7 +116,7 @@ TEST_FUNCTION_START(nmod_poly_div_newton_n_preinv, state) { nmod_poly_t a, b, binv, q; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -160,7 +160,7 @@ TEST_FUNCTION_START(nmod_poly_div_newton_n_preinv, state) { nmod_poly_t a, b, binv, q; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-div_root.c b/src/nmod_poly/test/t-div_root.c index 3a6cc62723..75c252d475 100644 --- a/src/nmod_poly/test/t-div_root.c +++ b/src/nmod_poly/test/t-div_root.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_div_root, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q, D, DQ, DR; - mp_limb_t mod, r, rem; + ulong mod, r, rem; slong n; mod = n_randtest_prime(state, 0); @@ -69,7 +69,7 @@ TEST_FUNCTION_START(nmod_poly_div_root, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q1, Q2; - mp_limb_t mod, r, rem1, rem2; + ulong mod, r, rem1, rem2; slong n; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-div_series.c b/src/nmod_poly/test/t-div_series.c index e08a136a5e..8d48736e72 100644 --- a/src/nmod_poly/test/t-div_series.c +++ b/src/nmod_poly/test/t-div_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_div_series, state) nmod_poly_t q, a, b, prod; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -67,7 +67,7 @@ TEST_FUNCTION_START(nmod_poly_div_series, state) nmod_poly_t q, a, b; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -107,7 +107,7 @@ TEST_FUNCTION_START(nmod_poly_div_series, state) nmod_poly_t q, a, b; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-div_series_basecase.c b/src/nmod_poly/test/t-div_series_basecase.c index aa7a8b5366..6ca63145c1 100644 --- a/src/nmod_poly/test/t-div_series_basecase.c +++ b/src/nmod_poly/test/t-div_series_basecase.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_div_series_basecase, state) nmod_poly_t q, a, b, prod; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -67,7 +67,7 @@ TEST_FUNCTION_START(nmod_poly_div_series_basecase, state) nmod_poly_t q, a, b; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -107,7 +107,7 @@ TEST_FUNCTION_START(nmod_poly_div_series_basecase, state) nmod_poly_t q, a, b; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-divexact.c b/src/nmod_poly/test/t-divexact.c index 419ba2a4b4..0244571340 100644 --- a/src/nmod_poly/test/t-divexact.c +++ b/src/nmod_poly/test/t-divexact.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_divexact, state) nmod_poly_t a, b, ab, q; int aliasing; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-divides.c b/src/nmod_poly/test/t-divides.c index 6e1aa65730..2bb457461f 100644 --- a/src/nmod_poly/test/t-divides.c +++ b/src/nmod_poly/test/t-divides.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_divides, state) { nmod_poly_t a, b, q, prod; int divides; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_divides, state) { nmod_poly_t a, b, q, prod; int divides; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -106,7 +106,7 @@ TEST_FUNCTION_START(nmod_poly_divides, state) { nmod_poly_t a, b, q; int divides1, divides2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -144,7 +144,7 @@ TEST_FUNCTION_START(nmod_poly_divides, state) { nmod_poly_t a, b, q; int divides1, divides2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-divides_classical.c b/src/nmod_poly/test/t-divides_classical.c index 95ed910bd0..8f74d74e35 100644 --- a/src/nmod_poly/test/t-divides_classical.c +++ b/src/nmod_poly/test/t-divides_classical.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_divides_classical, state) { nmod_poly_t a, b, q, prod; int divides; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_divides_classical, state) { nmod_poly_t a, b, q, prod; int divides; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -106,7 +106,7 @@ TEST_FUNCTION_START(nmod_poly_divides_classical, state) { nmod_poly_t a, b, q; int divides1, divides2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -144,7 +144,7 @@ TEST_FUNCTION_START(nmod_poly_divides_classical, state) { nmod_poly_t a, b, q; int divides1, divides2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-divrem.c b/src/nmod_poly/test/t-divrem.c index 0c9e30c65c..ec0c92f1e0 100644 --- a/src/nmod_poly/test/t-divrem.c +++ b/src/nmod_poly/test/t-divrem.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r, prod; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -65,7 +65,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -104,7 +104,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -143,7 +143,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -182,7 +182,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -221,7 +221,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r, prod; - mp_limb_t n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); + ulong n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -265,7 +265,7 @@ TEST_FUNCTION_START(nmod_poly_divrem, state) { nmod_poly_t a, b, q, r, prod; - mp_limb_t n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); + ulong n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-divrem_basecase.c b/src/nmod_poly/test/t-divrem_basecase.c index 7323eecdff..f194d1263c 100644 --- a/src/nmod_poly/test/t-divrem_basecase.c +++ b/src/nmod_poly/test/t-divrem_basecase.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_basecase, state) { nmod_poly_t a, b, q, r, prod; - mp_limb_t n; + ulong n; do { n = n_randtest_not_zero(state); @@ -69,7 +69,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_basecase, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do { n = n_randtest(state); @@ -112,7 +112,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_basecase, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do { n = n_randtest(state); @@ -155,7 +155,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_basecase, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do { n = n_randtest(state); @@ -198,7 +198,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_basecase, state) { nmod_poly_t a, b, q, r; - mp_limb_t n; + ulong n; do { n = n_randtest(state); diff --git a/src/nmod_poly/test/t-divrem_newton_n_preinv.c b/src/nmod_poly/test/t-divrem_newton_n_preinv.c index 85c40dfc6e..354537680a 100644 --- a/src/nmod_poly/test/t-divrem_newton_n_preinv.c +++ b/src/nmod_poly/test/t-divrem_newton_n_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r, test; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -74,7 +74,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -168,7 +168,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -216,7 +216,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -263,7 +263,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -310,7 +310,7 @@ TEST_FUNCTION_START(nmod_poly_divrem_newton_n_preinv, state) { nmod_poly_t a, b, binv, q, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-evaluate_mat_horner.c b/src/nmod_poly/test/t-evaluate_mat_horner.c index e1481c2b4b..860b71737d 100644 --- a/src/nmod_poly/test/t-evaluate_mat_horner.c +++ b/src/nmod_poly/test/t-evaluate_mat_horner.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_evaluate_mat_horner, state) { nmod_poly_t a; nmod_mat_t A, B; - mp_limb_t sum, n = n_randtest_not_zero(state); + ulong sum, n = n_randtest_not_zero(state); slong m, k; nmod_poly_init(a, n); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_evaluate_mat_horner, state) { nmod_poly_t a, b; nmod_mat_t A, B, C; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong m; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-evaluate_mat_paterson_stockmeyer.c b/src/nmod_poly/test/t-evaluate_mat_paterson_stockmeyer.c index 7e69a19284..d9a7ff61ea 100644 --- a/src/nmod_poly/test/t-evaluate_mat_paterson_stockmeyer.c +++ b/src/nmod_poly/test/t-evaluate_mat_paterson_stockmeyer.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_evaluate_mat_paterson_stockmeyer, state) { nmod_poly_t a; nmod_mat_t A, B; - mp_limb_t sum, n = n_randtest_not_zero(state); + ulong sum, n = n_randtest_not_zero(state); slong m, k; nmod_poly_init(a, n); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_evaluate_mat_paterson_stockmeyer, state) { nmod_poly_t a, b; nmod_mat_t A, B, C; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong m; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-evaluate_nmod.c b/src/nmod_poly/test/t-evaluate_nmod.c index cca757c4e9..83eb70606f 100644 --- a/src/nmod_poly/test/t-evaluate_nmod.c +++ b/src/nmod_poly/test/t-evaluate_nmod.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_evaluate_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t sum, eval; + ulong n = n_randtest_not_zero(state); + ulong sum, eval; nmod_poly_init(a, n); nmod_poly_randtest(a, state, n_randint(state, 100)); @@ -51,8 +51,8 @@ TEST_FUNCTION_START(nmod_poly_evaluate_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t eval1, eval2, c; + ulong n = n_randtest_not_zero(state); + ulong eval1, eval2, c; nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-evaluate_nmod_vec_fast.c b/src/nmod_poly/test/t-evaluate_nmod_vec_fast.c index 66931a1237..7b5c2c4210 100644 --- a/src/nmod_poly/test/t-evaluate_nmod_vec_fast.c +++ b/src/nmod_poly/test/t-evaluate_nmod_vec_fast.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_evaluate_nmod_vec_fast, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q; - mp_ptr x, y, z; - mp_limb_t mod; + nn_ptr x, y, z; + ulong mod; slong j, n, npoints; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-exp_series.c b/src/nmod_poly/test/t-exp_series.c index 12bf9af0c4..b55c652640 100644 --- a/src/nmod_poly/test/t-exp_series.c +++ b/src/nmod_poly/test/t-exp_series.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_exp_series, state) nmod_poly_t A, B, AB, expA, expB, expAB, S; slong n; slong N = 100; - mp_limb_t mod; + ulong mod; /* Make sure to workout the Newton code */ if (n_randint(state, 100) == 1) @@ -92,7 +92,7 @@ TEST_FUNCTION_START(nmod_poly_exp_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); if (i < 30) diff --git a/src/nmod_poly/test/t-find_distinct_nonzero_roots.c b/src/nmod_poly/test/t-find_distinct_nonzero_roots.c index 7f4db93f82..728afd96a5 100644 --- a/src/nmod_poly/test/t-find_distinct_nonzero_roots.c +++ b/src/nmod_poly/test/t-find_distinct_nonzero_roots.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_find_distinct_nonzero_roots, state) { int highdegreefactor; nmod_poly_t a, b, r; - mp_limb_t p; + ulong p; p = n_randtest_prime(state, 1); diff --git a/src/nmod_poly/test/t-fread_print.c b/src/nmod_poly/test/t-fread_print.c index 84bd86e9da..4997362d10 100644 --- a/src/nmod_poly/test/t-fread_print.c +++ b/src/nmod_poly/test/t-fread_print.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_fread_print, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); FILE * f = fopen("nmod_poly_test", "w+"); if (!f) diff --git a/src/nmod_poly/test/t-gcd.c b/src/nmod_poly/test/t-gcd.c index acd5dbf036..27c16d74a4 100644 --- a/src/nmod_poly/test/t-gcd.c +++ b/src/nmod_poly/test/t-gcd.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(nmod_poly_gcd, state) { nmod_poly_t a, b, c, g; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -75,7 +75,7 @@ TEST_FUNCTION_START(nmod_poly_gcd, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -110,7 +110,7 @@ TEST_FUNCTION_START(nmod_poly_gcd, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-gcd_euclidean.c b/src/nmod_poly/test/t-gcd_euclidean.c index 112f56afbc..229c9aa1d2 100644 --- a/src/nmod_poly/test/t-gcd_euclidean.c +++ b/src/nmod_poly/test/t-gcd_euclidean.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_euclidean, state) { nmod_poly_t a, b, c, g; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -74,7 +74,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_euclidean, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -109,7 +109,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_euclidean, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-gcd_hgcd.c b/src/nmod_poly/test/t-gcd_hgcd.c index 0e2c0c2615..32e5a8ebe3 100644 --- a/src/nmod_poly/test/t-gcd_hgcd.c +++ b/src/nmod_poly/test/t-gcd_hgcd.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_hgcd, state) { nmod_poly_t a, b, c, g; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -75,7 +75,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_hgcd, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -110,7 +110,7 @@ TEST_FUNCTION_START(nmod_poly_gcd_hgcd, state) { nmod_poly_t a, b, g; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-gcdinv.c b/src/nmod_poly/test/t-gcdinv.c index 0b0eb78237..a8d89778ac 100644 --- a/src/nmod_poly/test/t-gcdinv.c +++ b/src/nmod_poly/test/t-gcdinv.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_gcdinv, state) /* Compare with result from XGCD */ for (i = 0; i < 1000; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, d, g, s, t, u; p = n_randtest_prime(state, 0); @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_gcdinv, state) /* Compare with result from XGCD */ for (i = 0; i < 1000; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, d, f, g, s, t, u; p = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-get_set_coeff_ui.c b/src/nmod_poly/test/t-get_set_coeff_ui.c index 3ff4013072..a17fcc4dca 100644 --- a/src/nmod_poly/test/t-get_set_coeff_ui.c +++ b/src/nmod_poly/test/t-get_set_coeff_ui.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_get_set_coeff_ui, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t c1 = n_randtest(state), c2; + ulong n = n_randtest_not_zero(state); + ulong c1 = n_randtest(state), c2; j = n_randint(state, 100); diff --git a/src/nmod_poly/test/t-get_set_str.c b/src/nmod_poly/test/t-get_set_str.c index 65aff3332e..31a57348f2 100644 --- a/src/nmod_poly/test/t-get_set_str.c +++ b/src/nmod_poly/test/t-get_set_str.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_get_set_str, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); char * str; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-hgcd.c b/src/nmod_poly/test/t-hgcd.c index 3313953e4d..42004a992b 100644 --- a/src/nmod_poly/test/t-hgcd.c +++ b/src/nmod_poly/test/t-hgcd.c @@ -41,11 +41,11 @@ TEST_FUNCTION_START(nmod_poly_hgcd, state) { nmod_poly_t a, b, c, d, c1, d1, s, t; - mp_ptr M[4]; + nn_ptr M[4]; slong lenM[4]; slong sgnM; - mp_limb_t n = n_randprime(state, FLINT_BITS, 0); + ulong n = n_randprime(state, FLINT_BITS, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -132,7 +132,7 @@ TEST_FUNCTION_START(nmod_poly_hgcd, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { - mp_limb_t p; + ulong p; slong sgnM, sgnMr; nmod_poly_t m11, m12, m21, m22, A, B, a, b; nmod_poly_t m11r, m12r, m21r, m22r, Ar, Br; diff --git a/src/nmod_poly/test/t-inflate.c b/src/nmod_poly/test/t-inflate.c index 3602fabe6b..5d397da5a4 100644 --- a/src/nmod_poly/test/t-inflate.c +++ b/src/nmod_poly/test/t-inflate.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_inflate, state) for (iter = 0; iter < 100 * flint_test_multiplier(); iter++) { nmod_poly_t poly1, poly2, poly3, xp; - mp_limb_t modulus; + ulong modulus; ulong inflation; modulus = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-init_realloc_clear.c b/src/nmod_poly/test/t-init_realloc_clear.c index a5ad1c84fd..66b8e33905 100644 --- a/src/nmod_poly/test/t-init_realloc_clear.c +++ b/src/nmod_poly/test/t-init_realloc_clear.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_init_realloc_clear, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init2(a, n, n_randint(state, 100)); nmod_poly_clear(a); @@ -29,7 +29,7 @@ TEST_FUNCTION_START(nmod_poly_init_realloc_clear, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init2(a, n, n_randint(state, 100)); nmod_poly_realloc(a, n_randint(state, 100)); @@ -40,7 +40,7 @@ TEST_FUNCTION_START(nmod_poly_init_realloc_clear, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_randtest(a, state, n_randint(state, 100)); diff --git a/src/nmod_poly/test/t-integral.c b/src/nmod_poly/test/t-integral.c index 57ec74b69d..33c2f2817d 100644 --- a/src/nmod_poly/test/t-integral.c +++ b/src/nmod_poly/test/t-integral.c @@ -23,8 +23,8 @@ TEST_FUNCTION_START(nmod_poly_integral, state) { nmod_poly_t a, b, c; ulong len; - mp_limb_t c0; - mp_limb_t n = n_randtest_prime(state, 0); + ulong c0; + ulong n = n_randtest_prime(state, 0); len = n_randint(state, 100); len = FLINT_MIN(len, n - 1); @@ -61,7 +61,7 @@ TEST_FUNCTION_START(nmod_poly_integral, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-interpolate_nmod_vec.c b/src/nmod_poly/test/t-interpolate_nmod_vec.c index c599233c63..8ff5db134e 100644 --- a/src/nmod_poly/test/t-interpolate_nmod_vec.c +++ b/src/nmod_poly/test/t-interpolate_nmod_vec.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_interpolate_nmod_vec, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q; - mp_ptr x, y; - mp_limb_t mod; + nn_ptr x, y; + ulong mod; slong j, n, npoints; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-interpolate_nmod_vec_barycentric.c b/src/nmod_poly/test/t-interpolate_nmod_vec_barycentric.c index 0ffb717088..26c7e000e2 100644 --- a/src/nmod_poly/test/t-interpolate_nmod_vec_barycentric.c +++ b/src/nmod_poly/test/t-interpolate_nmod_vec_barycentric.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_interpolate_nmod_vec_barycentric, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q; - mp_ptr x, y; - mp_limb_t mod; + nn_ptr x, y; + ulong mod; slong j, n, npoints; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-interpolate_nmod_vec_fast.c b/src/nmod_poly/test/t-interpolate_nmod_vec_fast.c index b5643457f5..fdebd9ab7b 100644 --- a/src/nmod_poly/test/t-interpolate_nmod_vec_fast.c +++ b/src/nmod_poly/test/t-interpolate_nmod_vec_fast.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_interpolate_nmod_vec_fast, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q; - mp_ptr x, y; - mp_limb_t mod; + nn_ptr x, y; + ulong mod; slong j, n, npoints; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-interpolate_nmod_vec_newton.c b/src/nmod_poly/test/t-interpolate_nmod_vec_newton.c index accef48c56..2981f9ee6a 100644 --- a/src/nmod_poly/test/t-interpolate_nmod_vec_newton.c +++ b/src/nmod_poly/test/t-interpolate_nmod_vec_newton.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_poly_interpolate_nmod_vec_newton, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t P, Q; - mp_ptr x, y; - mp_limb_t mod; + nn_ptr x, y; + ulong mod; slong j, n, npoints; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-inv_series_basecase.c b/src/nmod_poly/test/t-inv_series_basecase.c index 185d605c71..5384f0b8a4 100644 --- a/src/nmod_poly/test/t-inv_series_basecase.c +++ b/src/nmod_poly/test/t-inv_series_basecase.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_inv_series_basecase, state) nmod_poly_t q, qinv, prod; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_inv_series_basecase, state) nmod_poly_t q, qinv; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-inv_series_newton.c b/src/nmod_poly/test/t-inv_series_newton.c index 1b9a6adbbd..2d3071b179 100644 --- a/src/nmod_poly/test/t-inv_series_newton.c +++ b/src/nmod_poly/test/t-inv_series_newton.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_inv_series_newton, state) nmod_poly_t q, qinv, prod; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_inv_series_newton, state) nmod_poly_t q, qinv; slong m; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-invmod.c b/src/nmod_poly/test/t-invmod.c index 33b30e5212..ddba72016c 100644 --- a/src/nmod_poly/test/t-invmod.c +++ b/src/nmod_poly/test/t-invmod.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_invmod, state) /* Aliasing c and a */ for (i = 0; i < 500; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, c; int ans1, ans2; @@ -61,7 +61,7 @@ TEST_FUNCTION_START(nmod_poly_invmod, state) /* Aliasing c and b */ for (i = 0; i < 500; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, c; int ans1, ans2; @@ -100,7 +100,7 @@ TEST_FUNCTION_START(nmod_poly_invmod, state) /* Compare with result from XGCD */ for (i = 0; i < 1000; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, g, s, t, u; int ans; @@ -152,7 +152,7 @@ TEST_FUNCTION_START(nmod_poly_invmod, state) /* Check correctness */ for (i = 0; i < 1000; i++) { - mp_limb_t p; + ulong p; nmod_poly_t a, b, f, u; int ans; diff --git a/src/nmod_poly/test/t-invsqrt_series.c b/src/nmod_poly/test/t-invsqrt_series.c index 9a19970144..bb8a5e80c1 100644 --- a/src/nmod_poly/test/t-invsqrt_series.c +++ b/src/nmod_poly/test/t-invsqrt_series.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_invsqrt_series, state) nmod_poly_t h, g, r; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_prime(state, 0); while (n == UWORD(2)); @@ -67,7 +67,7 @@ TEST_FUNCTION_START(nmod_poly_invsqrt_series, state) nmod_poly_t g, h; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_prime(state, 0); while (n == UWORD(2)); diff --git a/src/nmod_poly/test/t-log_series.c b/src/nmod_poly/test/t-log_series.c index e24c7943f6..65247770fc 100644 --- a/src/nmod_poly/test/t-log_series.c +++ b/src/nmod_poly/test/t-log_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_log_series, state) { nmod_poly_t A, B, AB, logA, logB, logAB, S; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randtest(state) % 100; @@ -86,7 +86,7 @@ TEST_FUNCTION_START(nmod_poly_log_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-make_monic.c b/src/nmod_poly/test/t-make_monic.c index 19adb92846..6bfc67cb20 100644 --- a/src/nmod_poly/test/t-make_monic.c +++ b/src/nmod_poly/test/t-make_monic.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_make_monic, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t l; + ulong n = n_randtest_not_zero(state); + ulong l; nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -56,8 +56,8 @@ TEST_FUNCTION_START(nmod_poly_make_monic, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t l; + ulong n = n_randtest_not_zero(state); + ulong l; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-mul.c b/src/nmod_poly/test/t-mul.c index caa09c9868..d65c37716f 100644 --- a/src/nmod_poly/test/t-mul.c +++ b/src/nmod_poly/test/t-mul.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mul, state) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_poly_mul, state) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -84,7 +84,7 @@ TEST_FUNCTION_START(nmod_poly_mul, state) { nmod_poly_t a1, a2, b, c, d; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); diff --git a/src/nmod_poly/test/t-mul_KS.c b/src/nmod_poly/test/t-mul_KS.c index 8b22c12c39..902bb737ec 100644 --- a/src/nmod_poly/test/t-mul_KS.c +++ b/src/nmod_poly/test/t-mul_KS.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -51,7 +51,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -81,7 +81,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a1, a2, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); diff --git a/src/nmod_poly/test/t-mul_KS2.c b/src/nmod_poly/test/t-mul_KS2.c index 2ac5bb3226..8a2c12218f 100644 --- a/src/nmod_poly/test/t-mul_KS2.c +++ b/src/nmod_poly/test/t-mul_KS2.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS2, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS2, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -83,7 +83,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS2, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a1, a2, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); @@ -115,7 +115,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS2, state) #if FLINT64 { nmod_poly_t a, b, c, d; - mp_limb_t mod = UWORD(2289285083314003039); + ulong mod = UWORD(2289285083314003039); nmod_poly_init(a, mod); nmod_poly_init(b, mod); diff --git a/src/nmod_poly/test/t-mul_KS4.c b/src/nmod_poly/test/t-mul_KS4.c index d118150b78..1c10ffa28e 100644 --- a/src/nmod_poly/test/t-mul_KS4.c +++ b/src/nmod_poly/test/t-mul_KS4.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS4, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS4, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -83,7 +83,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS4, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a1, a2, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); @@ -115,7 +115,7 @@ TEST_FUNCTION_START(nmod_poly_mul_KS4, state) #if FLINT64 { nmod_poly_t a, b, c, d; - mp_limb_t mod = UWORD(2289285083314003039); + ulong mod = UWORD(2289285083314003039); nmod_poly_init(a, mod); nmod_poly_init(b, mod); diff --git a/src/nmod_poly/test/t-mul_classical.c b/src/nmod_poly/test/t-mul_classical.c index 23207bedc7..516e0f879e 100644 --- a/src/nmod_poly/test/t-mul_classical.c +++ b/src/nmod_poly/test/t-mul_classical.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mul_classical, state) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -53,7 +53,7 @@ TEST_FUNCTION_START(nmod_poly_mul_classical, state) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -84,7 +84,7 @@ TEST_FUNCTION_START(nmod_poly_mul_classical, state) { nmod_poly_t a1, a2, b, c, d; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); @@ -124,7 +124,7 @@ TEST_FUNCTION_START(nmod_poly_mul_classical, state) { nmod_poly_t a1, a2, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a1, n); nmod_poly_init(a2, n); diff --git a/src/nmod_poly/test/t-mulhigh.c b/src/nmod_poly/test/t-mulhigh.c index 691d7f7236..4c064f963f 100644 --- a/src/nmod_poly/test/t-mulhigh.c +++ b/src/nmod_poly/test/t-mulhigh.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mulhigh, state) nmod_poly_t a, b, c; slong j, n; - mp_limb_t m = n_randtest_not_zero(state); + ulong m = n_randtest_not_zero(state); nmod_poly_init(a, m); nmod_poly_init(b, m); diff --git a/src/nmod_poly/test/t-mulhigh_classical.c b/src/nmod_poly/test/t-mulhigh_classical.c index 12d4fd7bf2..9bccc1552a 100644 --- a/src/nmod_poly/test/t-mulhigh_classical.c +++ b/src/nmod_poly/test/t-mulhigh_classical.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mulhigh_classical, state) nmod_poly_t a, b, c; slong j, start; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -65,7 +65,7 @@ TEST_FUNCTION_START(nmod_poly_mulhigh_classical, state) { nmod_poly_t a, b, c; slong j, start; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -107,7 +107,7 @@ TEST_FUNCTION_START(nmod_poly_mulhigh_classical, state) { nmod_poly_t a, b, c, d; slong j, start; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-mullow.c b/src/nmod_poly/test/t-mullow.c index 0dce581b4c..c4862ba9cc 100644 --- a/src/nmod_poly/test/t-mullow.c +++ b/src/nmod_poly/test/t-mullow.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mullow, state) { nmod_poly_t a, b, c; slong trunc; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-mullow_KS.c b/src/nmod_poly/test/t-mullow_KS.c index 12ce9e6987..37e943e778 100644 --- a/src/nmod_poly/test/t-mullow_KS.c +++ b/src/nmod_poly/test/t-mullow_KS.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong trunc = 0; nmod_poly_init(a, n); @@ -55,7 +55,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong trunc = 0; nmod_poly_init(a, n); @@ -89,7 +89,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_KS, state) for (i = 0; i < 200 * flint_test_multiplier(); i++) { nmod_poly_t a1, a2, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong trunc = 0; nmod_poly_init(a1, n); diff --git a/src/nmod_poly/test/t-mullow_classical.c b/src/nmod_poly/test/t-mullow_classical.c index 3be557a501..a2dd653375 100644 --- a/src/nmod_poly/test/t-mullow_classical.c +++ b/src/nmod_poly/test/t-mullow_classical.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_classical, state) { nmod_poly_t a, b, c; slong len, trunc; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_classical, state) { nmod_poly_t a, b, c; slong len, trunc; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -96,7 +96,7 @@ TEST_FUNCTION_START(nmod_poly_mullow_classical, state) { nmod_poly_t a, b, c, d; slong len, trunc; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-mulmod.c b/src/nmod_poly/test/t-mulmod.c index f69dfa2c42..ea5634ddfe 100644 --- a/src/nmod_poly/test/t-mulmod.c +++ b/src/nmod_poly/test/t-mulmod.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod, state) { nmod_poly_t a, b, res, t, f; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod, state) { nmod_poly_t a, b, res, t, f; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -105,7 +105,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod, state) { nmod_poly_t a, b, res, t, f; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -146,7 +146,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod, state) { nmod_poly_t a, b, res1, res2, t, f; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-mulmod_preinv.c b/src/nmod_poly/test/t-mulmod_preinv.c index e95597c3ac..38e7f29dd4 100644 --- a/src/nmod_poly/test/t-mulmod_preinv.c +++ b/src/nmod_poly/test/t-mulmod_preinv.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod_preinv, state) { nmod_poly_t a, b, res, t, f, finv; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -74,7 +74,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod_preinv, state) { nmod_poly_t a, b, res, t, f, finv; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -124,7 +124,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod_preinv, state) { nmod_poly_t a, b, res, t, f, finv; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -174,7 +174,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod_preinv, state) { nmod_poly_t a, b, res, t, f, finv; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -225,7 +225,7 @@ TEST_FUNCTION_START(nmod_poly_mulmod_preinv, state) { nmod_poly_t a, b, res1, res2, t, f, finv; - mp_limb_t n = n_randtest_prime(state, 0); + ulong n = n_randtest_prime(state, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-multi_crt.c b/src/nmod_poly/test/t-multi_crt.c index 311160122f..23f9013b68 100644 --- a/src/nmod_poly/test/t-multi_crt.c +++ b/src/nmod_poly/test/t-multi_crt.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_multi_crt, state) { nmod_poly_multi_crt_t P; nmod_poly_struct ** moduli, ** inputs, * outputs; - mp_limb_t modulus = 1009; + ulong modulus = 1009; slong moduli_count = 1000; moduli = (nmod_poly_struct **) flint_malloc(moduli_count*sizeof(nmod_poly_struct *)); @@ -84,7 +84,7 @@ TEST_FUNCTION_START(nmod_poly_multi_crt, state) slong total_degree, moduli_length, moduli_count; nmod_poly_struct ** moduli, ** inputs; nmod_poly_t output; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -208,7 +208,7 @@ TEST_FUNCTION_START(nmod_poly_multi_crt, state) slong total_degree, moduli_length, moduli_count; nmod_poly_struct * moduli, * inputs; nmod_poly_t output; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); @@ -323,7 +323,7 @@ TEST_FUNCTION_START(nmod_poly_multi_crt, state) slong total_degree, moduli_length, moduli_count; nmod_poly_struct * moduli, * inputs; nmod_poly_t output; - mp_limb_t modulus; + ulong modulus; modulus = n_randint(state, FLINT_BITS - 1) + 1; modulus = n_randbits(state, modulus); diff --git a/src/nmod_poly/test/t-neg.c b/src/nmod_poly/test/t-neg.c index cda06a75bc..5a0886d534 100644 --- a/src/nmod_poly/test/t-neg.c +++ b/src/nmod_poly/test/t-neg.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_neg, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-pow.c b/src/nmod_poly/test/t-pow.c index 1f71f58fc7..c1e213f3cb 100644 --- a/src/nmod_poly/test/t-pow.c +++ b/src/nmod_poly/test/t-pow.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_pow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e; nmod_poly_init(a, n); @@ -57,7 +57,7 @@ TEST_FUNCTION_START(nmod_poly_pow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-pow_binexp.c b/src/nmod_poly/test/t-pow_binexp.c index dac11a18d7..ae50a46a1b 100644 --- a/src/nmod_poly/test/t-pow_binexp.c +++ b/src/nmod_poly/test/t-pow_binexp.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_pow_binexp, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e; nmod_poly_init(a, n); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_poly_pow_binexp, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-pow_trunc.c b/src/nmod_poly/test/t-pow_trunc.c index 15a2133dbd..d2809a34f9 100644 --- a/src/nmod_poly/test/t-pow_trunc.c +++ b/src/nmod_poly/test/t-pow_trunc.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_pow_trunc, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e, trunc; nmod_poly_init(a, n); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(nmod_poly_pow_trunc, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e, trunc; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-pow_trunc_binexp.c b/src/nmod_poly/test/t-pow_trunc_binexp.c index 777a3be016..d391823deb 100644 --- a/src/nmod_poly/test/t-pow_trunc_binexp.c +++ b/src/nmod_poly/test/t-pow_trunc_binexp.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_pow_trunc_binexp, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e, trunc; nmod_poly_init(a, n); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(nmod_poly_pow_trunc_binexp, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong e, trunc; nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-power_sums.c b/src/nmod_poly/test/t-power_sums.c index aeae26e869..73279a2085 100644 --- a/src/nmod_poly/test/t-power_sums.c +++ b/src/nmod_poly/test/t-power_sums.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums, state) /* Check that the different version coincide and aliasing in nmod_poly_power_sums */ for (i = 0; i < 100 * flint_test_multiplier(); i++) { - mp_limb_t n; + ulong n; nmod_poly_t a, b, c, d, e; do{ diff --git a/src/nmod_poly/test/t-power_sums_naive.c b/src/nmod_poly/test/t-power_sums_naive.c index 1299daf47c..1f48e22536 100644 --- a/src/nmod_poly/test/t-power_sums_naive.c +++ b/src/nmod_poly/test/t-power_sums_naive.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_naive, state) { int l, result; - mp_limb_t i, j, k, tot; + ulong i, j, k, tot; /* Check that it is valid in degree 3 with integer roots, ie */ /* for polynomials of the form (x-i)(x-j)(x-k) */ @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_naive, state) for (j = 0; j < 4; j++) for (k = 0; k < 4; k++) { - mp_limb_t n; + ulong n; nmod_t mod; nmod_poly_t a, b, c, d; @@ -103,7 +103,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_naive, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n; + ulong n; do{ n = n_randtest_prime(state, 1); @@ -149,7 +149,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_naive, state) for (i = 0; i < 20 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n; + ulong n; do{ n = n_randtest_prime(state, 1); diff --git a/src/nmod_poly/test/t-power_sums_schoenhage.c b/src/nmod_poly/test/t-power_sums_schoenhage.c index f58dc07553..1cf25b271e 100644 --- a/src/nmod_poly/test/t-power_sums_schoenhage.c +++ b/src/nmod_poly/test/t-power_sums_schoenhage.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_schoenhage, state) { int l, result; - mp_limb_t i, j, k, tot; + ulong i, j, k, tot; /* Check that it is valid in degree 3 with integer roots, ie */ /* for polynomials of the form (x-i)(x-j)(x-k) */ @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_schoenhage, state) for (j = 0; j < 4; j++) for (k = 0; k < 4; k++) { - mp_limb_t n; + ulong n; nmod_t mod; nmod_poly_t a, b, c, d; @@ -101,7 +101,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_schoenhage, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n; + ulong n; do{ n = n_randtest_prime(state, 1); @@ -149,7 +149,7 @@ TEST_FUNCTION_START(nmod_poly_power_sums_schoenhage, state) for (i = 0; i < 20 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n; + ulong n; do{ n = n_randtest_prime(state, 1); diff --git a/src/nmod_poly/test/t-powers_mod_bsgs.c b/src/nmod_poly/test/t-powers_mod_bsgs.c index 698c82e085..5dd7a7936e 100644 --- a/src/nmod_poly/test/t-powers_mod_bsgs.c +++ b/src/nmod_poly/test/t-powers_mod_bsgs.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_powers_mod_bsgs, state) { nmod_poly_t f, g, pow; nmod_poly_struct * res; - mp_limb_t n; + ulong n; ulong exp; slong j; diff --git a/src/nmod_poly/test/t-powers_mod_naive.c b/src/nmod_poly/test/t-powers_mod_naive.c index 3d8727af94..2283bc1520 100644 --- a/src/nmod_poly/test/t-powers_mod_naive.c +++ b/src/nmod_poly/test/t-powers_mod_naive.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_powers_mod_naive, state) { nmod_poly_t f, g, pow; nmod_poly_struct * res; - mp_limb_t n; + ulong n; ulong exp; slong j; diff --git a/src/nmod_poly/test/t-powmod_fmpz_binexp.c b/src/nmod_poly/test/t-powmod_fmpz_binexp.c index a2507103e5..f6fddfbafc 100644 --- a/src/nmod_poly/test/t-powmod_fmpz_binexp.c +++ b/src/nmod_poly/test/t-powmod_fmpz_binexp.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -67,7 +67,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -111,7 +111,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f; - mp_limb_t n; + ulong n; fmpz_t exp; int j; diff --git a/src/nmod_poly/test/t-powmod_fmpz_binexp_preinv.c b/src/nmod_poly/test/t-powmod_fmpz_binexp_preinv.c index f2bfe252b5..d8e2abbede 100644 --- a/src/nmod_poly/test/t-powmod_fmpz_binexp_preinv.c +++ b/src/nmod_poly/test/t-powmod_fmpz_binexp_preinv.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -122,7 +122,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -172,7 +172,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_fmpz_binexp_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); diff --git a/src/nmod_poly/test/t-powmod_ui_binexp.c b/src/nmod_poly/test/t-powmod_ui_binexp.c index ca8a87d935..cf0d5b2ca2 100644 --- a/src/nmod_poly/test/t-powmod_ui_binexp.c +++ b/src/nmod_poly/test/t-powmod_ui_binexp.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -63,7 +63,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -104,7 +104,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f; - mp_limb_t n; + ulong n; ulong exp; int j; diff --git a/src/nmod_poly/test/t-powmod_ui_binexp_preinv.c b/src/nmod_poly/test/t-powmod_ui_binexp_preinv.c index 58dbbd73be..9ffef73df2 100644 --- a/src/nmod_poly/test/t-powmod_ui_binexp_preinv.c +++ b/src/nmod_poly/test/t-powmod_ui_binexp_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -69,7 +69,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -115,7 +115,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -162,7 +162,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_ui_binexp_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; int j; diff --git a/src/nmod_poly/test/t-powmod_x_fmpz_preinv.c b/src/nmod_poly/test/t-powmod_x_fmpz_preinv.c index 73631c39bc..43d025b03e 100644 --- a/src/nmod_poly/test/t-powmod_x_fmpz_preinv.c +++ b/src/nmod_poly/test/t-powmod_x_fmpz_preinv.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_fmpz_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -77,7 +77,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_fmpz_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t res1, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); @@ -122,7 +122,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_fmpz_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t res1, t, f, finv; - mp_limb_t n; + ulong n; fmpz_t exp; fmpz_init(exp); diff --git a/src/nmod_poly/test/t-powmod_x_ui_preinv.c b/src/nmod_poly/test/t-powmod_x_ui_preinv.c index 23d1bfe2cc..ab416232cf 100644 --- a/src/nmod_poly/test/t-powmod_x_ui_preinv.c +++ b/src/nmod_poly/test/t-powmod_x_ui_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_ui_preinv, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t a, res1, res2, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_ui_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t res1, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); @@ -115,7 +115,7 @@ TEST_FUNCTION_START(nmod_poly_powmod_x_ui_preinv, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t res1, t, f, finv; - mp_limb_t n; + ulong n; ulong exp; n = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-product_roots_nmod_vec.c b/src/nmod_poly/test/t-product_roots_nmod_vec.c index 00a4601e1a..0375addd87 100644 --- a/src/nmod_poly/test/t-product_roots_nmod_vec.c +++ b/src/nmod_poly/test/t-product_roots_nmod_vec.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_product_roots_nmod_vec, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t P, Q, tmp; - mp_ptr x; - mp_limb_t mod; + nn_ptr x; + ulong mod; slong j, n; n = n_randint(state, 100); diff --git a/src/nmod_poly/test/t-rem.c b/src/nmod_poly/test/t-rem.c index 9c62062956..cada87a69e 100644 --- a/src/nmod_poly/test/t-rem.c +++ b/src/nmod_poly/test/t-rem.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_rem, state) { nmod_poly_t a, b, q, r, prod; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -66,7 +66,7 @@ TEST_FUNCTION_START(nmod_poly_rem, state) { nmod_poly_t a, b, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -102,7 +102,7 @@ TEST_FUNCTION_START(nmod_poly_rem, state) { nmod_poly_t a, b, r; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -138,7 +138,7 @@ TEST_FUNCTION_START(nmod_poly_rem, state) { nmod_poly_t a, b, q0, r0, r; - mp_limb_t n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); + ulong n = n_randprime(state, n_randint(state,FLINT_BITS-1)+2, 0); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-resultant.c b/src/nmod_poly/test/t-resultant.c index 7973226d68..99f69a6a1a 100644 --- a/src/nmod_poly/test/t-resultant.c +++ b/src/nmod_poly/test/t-resultant.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_resultant, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t x, y; - mp_limb_t n; + ulong x, y; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -60,8 +60,8 @@ TEST_FUNCTION_START(nmod_poly_resultant, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t x, y, z; - mp_limb_t n; + ulong x, y, z; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-resultant_euclidean.c b/src/nmod_poly/test/t-resultant_euclidean.c index b5dfe49161..fb85a2b8ff 100644 --- a/src/nmod_poly/test/t-resultant_euclidean.c +++ b/src/nmod_poly/test/t-resultant_euclidean.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_resultant_euclidean, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t x, y; - mp_limb_t n; + ulong x, y; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -60,8 +60,8 @@ TEST_FUNCTION_START(nmod_poly_resultant_euclidean, state) for (i = 0; i < 50 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t x, y, z; - mp_limb_t n; + ulong x, y, z; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-resultant_hgcd.c b/src/nmod_poly/test/t-resultant_hgcd.c index b9f68b5a9d..06b1efd26b 100644 --- a/src/nmod_poly/test/t-resultant_hgcd.c +++ b/src/nmod_poly/test/t-resultant_hgcd.c @@ -21,8 +21,8 @@ TEST_FUNCTION_START(nmod_poly_resultant_hgcd, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t x, y; - mp_limb_t n; + ulong x, y; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -60,8 +60,8 @@ TEST_FUNCTION_START(nmod_poly_resultant_hgcd, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t x, y, z; - mp_limb_t n; + ulong x, y, z; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-reverse.c b/src/nmod_poly/test/t-reverse.c index e8e4f5eda4..3be94a9d6b 100644 --- a/src/nmod_poly/test/t-reverse.c +++ b/src/nmod_poly/test/t-reverse.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_reverse, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong len; nmod_poly_init(a, n); @@ -52,7 +52,7 @@ TEST_FUNCTION_START(nmod_poly_reverse, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong m = n_randint(state, 100) + 1; slong len = n_randint(state, m); diff --git a/src/nmod_poly/test/t-revert_series.c b/src/nmod_poly/test/t-revert_series.c index 6d6803fee5..3b85823302 100644 --- a/src/nmod_poly/test/t-revert_series.c +++ b/src/nmod_poly/test/t-revert_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_revert_series, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t m; + ulong m; slong n; m = n_randtest_prime(state, 0); @@ -58,7 +58,7 @@ TEST_FUNCTION_START(nmod_poly_revert_series, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h; - mp_limb_t m; + ulong m; slong n; m = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-scalar_addmul_nmod.c b/src/nmod_poly/test/t-scalar_addmul_nmod.c index 88da46da97..16a407bfff 100644 --- a/src/nmod_poly/test/t-scalar_addmul_nmod.c +++ b/src/nmod_poly/test/t-scalar_addmul_nmod.c @@ -19,8 +19,8 @@ TEST_FUNCTION_START(nmod_poly_scalar_addmul_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t x = n_randint(state, n); + ulong n = n_randtest_not_zero(state); + ulong x = n_randint(state, n); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-scalar_mul_nmod.c b/src/nmod_poly/test/t-scalar_mul_nmod.c index 83f2ab8f9f..aee7c38d96 100644 --- a/src/nmod_poly/test/t-scalar_mul_nmod.c +++ b/src/nmod_poly/test/t-scalar_mul_nmod.c @@ -20,8 +20,8 @@ TEST_FUNCTION_START(nmod_poly_scalar_mul_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t c = n_randint(state, n); + ulong n = n_randtest_not_zero(state); + ulong c = n_randint(state, n); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -48,8 +48,8 @@ TEST_FUNCTION_START(nmod_poly_scalar_mul_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, d1, d2; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t c = n_randint(state, n); + ulong n = n_randtest_not_zero(state); + ulong c = n_randint(state, n); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-shift_left_right.c b/src/nmod_poly/test/t-shift_left_right.c index 2a87b846b5..aa7b1c1da9 100644 --- a/src/nmod_poly/test/t-shift_left_right.c +++ b/src/nmod_poly/test/t-shift_left_right.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_shift_left_right, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong shift = n_randint(state, 100); nmod_poly_init(a, n); @@ -50,7 +50,7 @@ TEST_FUNCTION_START(nmod_poly_shift_left_right, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong shift = n_randint(state, 100); nmod_poly_init(a, n); diff --git a/src/nmod_poly/test/t-sin_series.c b/src/nmod_poly/test/t-sin_series.c index 827d0ea40b..33c8318772 100644 --- a/src/nmod_poly/test/t-sin_series.c +++ b/src/nmod_poly/test/t-sin_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_sin_series, state) { nmod_poly_t A, sinA, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_sin_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-sinh_series.c b/src/nmod_poly/test/t-sinh_series.c index 7a30d6c8ae..a2c65d86fb 100644 --- a/src/nmod_poly/test/t-sinh_series.c +++ b/src/nmod_poly/test/t-sinh_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_sinh_series, state) { nmod_poly_t A, sinhA, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_sinh_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-sqrt.c b/src/nmod_poly/test/t-sqrt.c index ebe3397179..3b53585462 100644 --- a/src/nmod_poly/test/t-sqrt.c +++ b/src/nmod_poly/test/t-sqrt.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_sqrt, state) { nmod_poly_t a, b; int square1, square2; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); nmod_poly_init(a, mod); @@ -55,7 +55,7 @@ TEST_FUNCTION_START(nmod_poly_sqrt, state) { nmod_poly_t a, b, c; int square; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); nmod_poly_init(a, mod); @@ -98,7 +98,7 @@ TEST_FUNCTION_START(nmod_poly_sqrt, state) nmod_poly_t a, b, c; slong j; int square; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); nmod_poly_init(a, mod); diff --git a/src/nmod_poly/test/t-sqrt_series.c b/src/nmod_poly/test/t-sqrt_series.c index 6e4ed97864..66cb4bf8f5 100644 --- a/src/nmod_poly/test/t-sqrt_series.c +++ b/src/nmod_poly/test/t-sqrt_series.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_sqrt_series, state) nmod_poly_t h, g, r; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_prime(state, 0); while (n == UWORD(2)); @@ -65,7 +65,7 @@ TEST_FUNCTION_START(nmod_poly_sqrt_series, state) nmod_poly_t g, h; slong m; - mp_limb_t n; + ulong n; do n = n_randtest_prime(state, 0); while (n == UWORD(2)); diff --git a/src/nmod_poly/test/t-sub.c b/src/nmod_poly/test/t-sub.c index 54363eb0fe..f52e1264d2 100644 --- a/src/nmod_poly/test/t-sub.c +++ b/src/nmod_poly/test/t-sub.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_sub, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c, d; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -56,7 +56,7 @@ TEST_FUNCTION_START(nmod_poly_sub, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_poly_sub, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_poly_init(a, n); nmod_poly_init(b, n); diff --git a/src/nmod_poly/test/t-tan_series.c b/src/nmod_poly/test/t-tan_series.c index acea09c528..5a4c3081d0 100644 --- a/src/nmod_poly/test/t-tan_series.c +++ b/src/nmod_poly/test/t-tan_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_tan_series, state) { nmod_poly_t A, tanA, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = 1 + n_randtest(state) % 100; @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_tan_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-tanh_series.c b/src/nmod_poly/test/t-tanh_series.c index 348ce73dca..b4b469768f 100644 --- a/src/nmod_poly/test/t-tanh_series.c +++ b/src/nmod_poly/test/t-tanh_series.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_tanh_series, state) { nmod_poly_t A, tanhA, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = 1 + n_randtest(state) % 100; @@ -64,7 +64,7 @@ TEST_FUNCTION_START(nmod_poly_tanh_series, state) { nmod_poly_t A, B; slong n; - mp_limb_t mod; + ulong mod; do { mod = n_randtest_prime(state, 0); } while (mod == 2); n = n_randtest(state) % 50; n = FLINT_MIN(n, mod); diff --git a/src/nmod_poly/test/t-taylor_shift.c b/src/nmod_poly/test/t-taylor_shift.c index 9488d7ca33..925e429d2a 100644 --- a/src/nmod_poly/test/t-taylor_shift.c +++ b/src/nmod_poly/test/t-taylor_shift.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t c, mod; + ulong c, mod; mod = n_randtest_prime(state, 0); @@ -51,7 +51,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h1, h2; - mp_limb_t mod, c; + ulong mod, c; mod = n_randtest_prime(state, 0); @@ -90,7 +90,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift, state) for (i = 0; i < 10 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h1, h2; - mp_limb_t mod, c; + ulong mod, c; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-taylor_shift_convolution.c b/src/nmod_poly/test/t-taylor_shift_convolution.c index 7831166134..a6675dd7c7 100644 --- a/src/nmod_poly/test/t-taylor_shift_convolution.c +++ b/src/nmod_poly/test/t-taylor_shift_convolution.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift_convolution, state) { nmod_poly_t f, g; slong n; - mp_limb_t c, mod; + ulong c, mod; n = n_randint(state, 100); do { @@ -55,7 +55,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift_convolution, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h1, h2; - mp_limb_t mod, c; + ulong mod, c; slong n; n = n_randint(state, 100); diff --git a/src/nmod_poly/test/t-taylor_shift_horner.c b/src/nmod_poly/test/t-taylor_shift_horner.c index ce43489a37..0de7382056 100644 --- a/src/nmod_poly/test/t-taylor_shift_horner.c +++ b/src/nmod_poly/test/t-taylor_shift_horner.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift_horner, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t f, g; - mp_limb_t c, mod; + ulong c, mod; mod = n_randtest_prime(state, 0); @@ -51,7 +51,7 @@ TEST_FUNCTION_START(nmod_poly_taylor_shift_horner, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_t f, g, h1, h2; - mp_limb_t mod, c; + ulong mod, c; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly/test/t-xgcd.c b/src/nmod_poly/test/t-xgcd.c index 226fbb7dd9..3558353d13 100644 --- a/src/nmod_poly/test/t-xgcd.c +++ b/src/nmod_poly/test/t-xgcd.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, c, g1, g2, s, t, sum, temp; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -126,7 +126,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -165,7 +165,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -203,7 +203,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -241,7 +241,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -279,7 +279,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-xgcd_euclidean.c b/src/nmod_poly/test/t-xgcd_euclidean.c index 96d33d82e8..3a279bbb73 100644 --- a/src/nmod_poly/test/t-xgcd_euclidean.c +++ b/src/nmod_poly/test/t-xgcd_euclidean.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, c, g1, s, t, g2; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -81,7 +81,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -120,7 +120,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -159,7 +159,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -197,7 +197,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -235,7 +235,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -273,7 +273,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_euclidean, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/test/t-xgcd_hgcd.c b/src/nmod_poly/test/t-xgcd_hgcd.c index c1f57ac6f9..9b356da7bd 100644 --- a/src/nmod_poly/test/t-xgcd_hgcd.c +++ b/src/nmod_poly/test/t-xgcd_hgcd.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, c, g1, g2, s, t, sum, temp; - mp_limb_t n; + ulong n; do n = n_randtest_not_zero(state); while (!n_is_probabprime(n)); @@ -86,7 +86,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -125,7 +125,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -164,7 +164,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -202,7 +202,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -240,7 +240,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); @@ -278,7 +278,7 @@ TEST_FUNCTION_START(nmod_poly_xgcd_hgcd, state) { nmod_poly_t a, b, g, s, t; - mp_limb_t n; + ulong n; do n = n_randtest(state); while (!n_is_probabprime(n)); diff --git a/src/nmod_poly/tree.c b/src/nmod_poly/tree.c index a00f456867..35748685f5 100644 --- a/src/nmod_poly/tree.c +++ b/src/nmod_poly/tree.c @@ -13,15 +13,15 @@ #include "nmod_vec.h" #include "nmod_poly.h" -mp_ptr * _nmod_poly_tree_alloc(slong len) +nn_ptr * _nmod_poly_tree_alloc(slong len) { - mp_ptr * tree = NULL; + nn_ptr * tree = NULL; if (len) { slong i, height = FLINT_CLOG2(len); - tree = flint_malloc(sizeof(mp_ptr) * (height + 1)); + tree = flint_malloc(sizeof(nn_ptr) * (height + 1)); for (i = 0; i <= height; i++) tree[i] = _nmod_vec_init(len + (len >> i) + 1); } @@ -29,7 +29,7 @@ mp_ptr * _nmod_poly_tree_alloc(slong len) return tree; } -void _nmod_poly_tree_free(mp_ptr * tree, slong len) +void _nmod_poly_tree_free(nn_ptr * tree, slong len) { if (len) { @@ -43,10 +43,10 @@ void _nmod_poly_tree_free(mp_ptr * tree, slong len) } void -_nmod_poly_tree_build(mp_ptr * tree, mp_srcptr roots, slong len, nmod_t mod) +_nmod_poly_tree_build(nn_ptr * tree, nn_srcptr roots, slong len, nmod_t mod) { slong height, pow, left, i; - mp_ptr pa, pb; + nn_ptr pa, pb; if (len == 0) return; @@ -67,7 +67,7 @@ _nmod_poly_tree_build(mp_ptr * tree, mp_srcptr roots, slong len, nmod_t mod) for (i = 0; i < len / 2; i++) { - mp_limb_t a, b; + ulong a, b; a = roots[2 * i]; b = roots[2 * i + 1]; diff --git a/src/nmod_poly/xgcd.c b/src/nmod_poly/xgcd.c index 12deee853d..10e41f150d 100644 --- a/src/nmod_poly/xgcd.c +++ b/src/nmod_poly/xgcd.c @@ -17,8 +17,8 @@ #include "nmod_poly.h" #include "gr_poly.h" -slong _nmod_poly_xgcd(mp_ptr G, mp_ptr S, mp_ptr T, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_xgcd(nn_ptr G, nn_ptr S, nn_ptr T, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) { slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; @@ -47,7 +47,7 @@ nmod_poly_xgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, else /* lenA >= lenB >= 0 */ { const slong lenA = A->length, lenB = B->length; - mp_limb_t inv; + ulong inv; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -73,7 +73,7 @@ nmod_poly_xgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, } else /* lenA >= lenB >= 2 */ { - mp_ptr g, s, t; + nn_ptr g, s, t; slong lenG; if (G == A || G == B) @@ -147,9 +147,9 @@ nmod_poly_xgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, } } -slong _nmod_poly_xgcd_euclidean(mp_ptr G, mp_ptr S, mp_ptr T, - mp_srcptr A, slong lenA, - mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_xgcd_euclidean(nn_ptr G, nn_ptr S, nn_ptr T, + nn_srcptr A, slong lenA, + nn_srcptr B, slong lenB, nmod_t mod) { flint_mpn_zero(G, lenB); flint_mpn_zero(S, lenB - 1); @@ -163,7 +163,7 @@ slong _nmod_poly_xgcd_euclidean(mp_ptr G, mp_ptr S, mp_ptr T, } else { - mp_ptr Q, R; + nn_ptr Q, R; slong lenQ, lenR, lenG; Q = _nmod_vec_init(2 * lenA); @@ -181,7 +181,7 @@ slong _nmod_poly_xgcd_euclidean(mp_ptr G, mp_ptr S, mp_ptr T, } else { - mp_ptr D, U, V1, V3, W; + nn_ptr D, U, V1, V3, W; slong lenD, lenU, lenV1, lenV3, lenW; W = _nmod_vec_init(FLINT_MAX(5 * lenB, lenA + lenB)); @@ -216,7 +216,7 @@ slong _nmod_poly_xgcd_euclidean(mp_ptr G, mp_ptr S, mp_ptr T, MPN_SWAP(U, lenU, V1, lenV1); { - mp_ptr __t; + nn_ptr __t; slong __tn; __t = D; @@ -264,7 +264,7 @@ nmod_poly_xgcd_euclidean(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, else /* lenA >= lenB >= 0 */ { const slong lenA = A->length, lenB = B->length; - mp_limb_t inv; + ulong inv; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -290,7 +290,7 @@ nmod_poly_xgcd_euclidean(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, } else /* lenA >= lenB >= 2 */ { - mp_ptr g, s, t; + nn_ptr g, s, t; slong lenG; if (G == A || G == B) @@ -364,8 +364,8 @@ nmod_poly_xgcd_euclidean(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, } } -slong _nmod_poly_xgcd_hgcd(mp_ptr G, mp_ptr S, mp_ptr T, - mp_srcptr A, slong lenA, mp_srcptr B, slong lenB, nmod_t mod) +slong _nmod_poly_xgcd_hgcd(nn_ptr G, nn_ptr S, nn_ptr T, + nn_srcptr A, slong lenA, nn_srcptr B, slong lenB, nmod_t mod) { slong cutoff = NMOD_BITS(mod) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; slong lenG = 0; @@ -386,7 +386,7 @@ nmod_poly_xgcd_hgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, else /* lenA >= lenB >= 0 */ { const slong lenA = A->length, lenB = B->length; - mp_limb_t inv; + ulong inv; if (lenA == 0) /* lenA = lenB = 0 */ { @@ -412,7 +412,7 @@ nmod_poly_xgcd_hgcd(nmod_poly_t G, nmod_poly_t S, nmod_poly_t T, } else /* lenA >= lenB >= 2 */ { - mp_ptr g, s, t; + nn_ptr g, s, t; slong lenG; if (G == A || G == B) diff --git a/src/nmod_poly_factor.h b/src/nmod_poly_factor.h index 08a4e1db9b..ca2a61e421 100644 --- a/src/nmod_poly_factor.h +++ b/src/nmod_poly_factor.h @@ -36,7 +36,7 @@ typedef struct nmod_poly_struct * H; nmod_poly_struct * v; nmod_poly_struct * vinv; - mp_ptr tmp; + nn_ptr tmp; slong m; } nmod_poly_interval_poly_arg_t; @@ -94,7 +94,7 @@ int nmod_poly_is_irreducible_rabin(const nmod_poly_t f); int nmod_poly_is_irreducible_ddf(const nmod_poly_t f); -int _nmod_poly_is_squarefree(mp_srcptr f, slong len, nmod_t mod); +int _nmod_poly_is_squarefree(nn_srcptr f, slong len, nmod_t mod); int nmod_poly_is_squarefree(const nmod_poly_t f); @@ -109,16 +109,16 @@ void nmod_poly_factor_kaltofen_shoup(nmod_poly_factor_t res, void nmod_poly_factor_squarefree(nmod_poly_factor_t res, const nmod_poly_t f); -mp_limb_t nmod_poly_factor_with_berlekamp(nmod_poly_factor_t result, +ulong nmod_poly_factor_with_berlekamp(nmod_poly_factor_t result, const nmod_poly_t input); -mp_limb_t nmod_poly_factor_with_cantor_zassenhaus(nmod_poly_factor_t result, +ulong nmod_poly_factor_with_cantor_zassenhaus(nmod_poly_factor_t result, const nmod_poly_t input); -mp_limb_t nmod_poly_factor_with_kaltofen_shoup(nmod_poly_factor_t result, +ulong nmod_poly_factor_with_kaltofen_shoup(nmod_poly_factor_t result, const nmod_poly_t input); -mp_limb_t nmod_poly_factor(nmod_poly_factor_t result, +ulong nmod_poly_factor(nmod_poly_factor_t result, const nmod_poly_t input); void _nmod_poly_interval_poly_worker(void* arg_ptr); diff --git a/src/nmod_poly_factor/factor.c b/src/nmod_poly_factor/factor.c index 15874b9dea..9346fc859a 100644 --- a/src/nmod_poly_factor/factor.c +++ b/src/nmod_poly_factor/factor.c @@ -31,13 +31,13 @@ __nmod_poly_factor1(nmod_poly_factor_t res, const nmod_poly_t f, int algorithm) nmod_poly_factor_berlekamp(res, f); } -mp_limb_t +ulong __nmod_poly_factor(nmod_poly_factor_t result, const nmod_poly_t input, int algorithm) { nmod_poly_t monic_input; nmod_poly_factor_t sqfree_factors, factors; - mp_limb_t leading_coeff; + ulong leading_coeff; slong i, len; len = input->length; @@ -82,7 +82,7 @@ __nmod_poly_factor(nmod_poly_factor_t result, return leading_coeff; } -mp_limb_t +ulong __nmod_poly_factor_deflation(nmod_poly_factor_t result, const nmod_poly_t input, int algorithm) { @@ -106,7 +106,7 @@ __nmod_poly_factor_deflation(nmod_poly_factor_t result, { nmod_poly_factor_t def_res; nmod_poly_t def; - mp_limb_t leading_coeff; + ulong leading_coeff; nmod_poly_init_mod(def, input->mod); @@ -150,31 +150,31 @@ __nmod_poly_factor_deflation(nmod_poly_factor_t result, } } -mp_limb_t +ulong nmod_poly_factor_with_berlekamp(nmod_poly_factor_t result, const nmod_poly_t input) { return __nmod_poly_factor_deflation(result, input, BERLEKAMP); } -mp_limb_t +ulong nmod_poly_factor_with_cantor_zassenhaus(nmod_poly_factor_t result, const nmod_poly_t input) { return __nmod_poly_factor_deflation(result, input, ZASSENHAUS); } -mp_limb_t +ulong nmod_poly_factor_with_kaltofen_shoup(nmod_poly_factor_t result, const nmod_poly_t input) { return __nmod_poly_factor_deflation(result, input, KALTOFEN); } -mp_limb_t +ulong nmod_poly_factor(nmod_poly_factor_t result, const nmod_poly_t input) { - mp_limb_t p = input->mod.n; + ulong p = input->mod.n; unsigned int bits = FLINT_BIT_COUNT (p); slong n = nmod_poly_degree(input); diff --git a/src/nmod_poly_factor/factor_berlekamp.c b/src/nmod_poly_factor/factor_berlekamp.c index b49d2ed512..92ac0a7afd 100644 --- a/src/nmod_poly_factor/factor_berlekamp.c +++ b/src/nmod_poly_factor/factor_berlekamp.c @@ -56,7 +56,7 @@ static void __nmod_poly_factor_berlekamp(nmod_poly_factor_t factors, flint_rand_t state, const nmod_poly_t f) { - const mp_limb_t p = nmod_poly_modulus(f); + const ulong p = nmod_poly_modulus(f); const slong n = nmod_poly_degree(f); nmod_poly_factor_t fac1, fac2; @@ -64,7 +64,7 @@ __nmod_poly_factor_berlekamp(nmod_poly_factor_t factors, nmod_poly_t x_pi, x_pi2; nmod_poly_t Q; nmod_mat_t matrix; - mp_limb_t coeff; + ulong coeff; slong i, nullity, col, row, *shift; nmod_poly_t *basis; diff --git a/src/nmod_poly_factor/factor_distinct_deg_threaded.c b/src/nmod_poly_factor/factor_distinct_deg_threaded.c index d4e80a62f4..eca9be1fd9 100644 --- a/src/nmod_poly_factor/factor_distinct_deg_threaded.c +++ b/src/nmod_poly_factor/factor_distinct_deg_threaded.c @@ -63,7 +63,7 @@ _nmod_poly_compose_mod_brent_kung_precomp_preinv_worker(void * arg_ptr) nmod_poly_compose_mod_precomp_preinv_arg_t arg = *((nmod_poly_compose_mod_precomp_preinv_arg_t*) arg_ptr); nmod_mat_t B, C; - mp_ptr t, h; + nn_ptr t, h; slong i, n, m; nmod_poly_struct * res = arg.res; nmod_poly_struct * poly1 = arg.poly1; @@ -136,7 +136,7 @@ _nmod_poly_interval_poly_worker(void * arg_ptr) nmod_poly_struct * vinv = arg.vinv; nmod_poly_struct * baby = arg.baby; nmod_t mod = v->mod; - mp_ptr tmp = arg.tmp; + nn_ptr tmp = arg.tmp; res->coeffs[0] = 1; diff --git a/src/nmod_poly_factor/factor_squarefree.c b/src/nmod_poly_factor/factor_squarefree.c index 125456e0a8..5b5657c615 100644 --- a/src/nmod_poly_factor/factor_squarefree.c +++ b/src/nmod_poly_factor/factor_squarefree.c @@ -19,7 +19,7 @@ void nmod_poly_factor_squarefree(nmod_poly_factor_t res, const nmod_poly_t f) { nmod_poly_t f_d, g, g_1; - mp_limb_t p; + ulong p; slong deg, i; if (f->length <= 1) diff --git a/src/nmod_poly_factor/is_irreducible.c b/src/nmod_poly_factor/is_irreducible.c index 11e523dadd..37ae6ddeca 100644 --- a/src/nmod_poly_factor/is_irreducible.c +++ b/src/nmod_poly_factor/is_irreducible.c @@ -173,7 +173,7 @@ nmod_poly_is_irreducible_rabin(const nmod_poly_t f) { if (nmod_poly_length(f) > 2) { - const mp_limb_t p = nmod_poly_modulus(f); + const ulong p = nmod_poly_modulus(f); const slong n = nmod_poly_degree(f); nmod_poly_t a, x, x_p; diff --git a/src/nmod_poly_factor/is_squarefree.c b/src/nmod_poly_factor/is_squarefree.c index 86dd7323d5..684def44ee 100644 --- a/src/nmod_poly_factor/is_squarefree.c +++ b/src/nmod_poly_factor/is_squarefree.c @@ -14,16 +14,16 @@ #include "nmod_poly_factor.h" int -_nmod_poly_is_squarefree(mp_srcptr f, slong len, nmod_t mod) +_nmod_poly_is_squarefree(nn_srcptr f, slong len, nmod_t mod) { - mp_ptr fd, g; + nn_ptr fd, g; slong dlen; int res; if (len <= 2) return len != 0; - fd = flint_malloc(sizeof(mp_limb_t) * 2 * (len - 1)); + fd = flint_malloc(sizeof(ulong) * 2 * (len - 1)); g = fd + len - 1; _nmod_poly_derivative(fd, f, len, mod); diff --git a/src/nmod_poly_factor/profile/p-factor.c b/src/nmod_poly_factor/profile/p-factor.c index ad241b1e93..078d5f8ed9 100644 --- a/src/nmod_poly_factor/profile/p-factor.c +++ b/src/nmod_poly_factor/profile/p-factor.c @@ -33,7 +33,7 @@ int main(void) { nmod_poly_t f, g; nmod_poly_factor_t res; - mp_limb_t modulus; + ulong modulus; int i, j, k, n, num; double t, T1, T2, T3, T4; diff --git a/src/nmod_poly_factor/test/main.c b/src/nmod_poly_factor/test/main.c index c0bad1312d..2aac1a9b8e 100644 --- a/src/nmod_poly_factor/test/main.c +++ b/src/nmod_poly_factor/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor_berlekamp.c" diff --git a/src/nmod_poly_factor/test/t-factor.c b/src/nmod_poly_factor/test/t-factor.c index 954387f0e0..81c278aada 100644 --- a/src/nmod_poly_factor/test/t-factor.c +++ b/src/nmod_poly_factor/test/t-factor.c @@ -27,7 +27,7 @@ TEST_FUNCTION_START(nmod_poly_factor, state) int result = 1; nmod_poly_t pol1, poly, quot, rem, product; nmod_poly_factor_t res; - mp_limb_t modulus, lead = 1; + ulong modulus, lead = 1; slong length, num, i, j; ulong exp[5]; @@ -131,7 +131,7 @@ TEST_FUNCTION_START(nmod_poly_factor, state) { nmod_poly_t pol1, poly, quot, rem; nmod_poly_factor_t res, res2; - mp_limb_t modulus; + ulong modulus; slong length, num, i, j; slong exp[5]; ulong inflation; diff --git a/src/nmod_poly_factor/test/t-factor_berlekamp.c b/src/nmod_poly_factor/test/t-factor_berlekamp.c index 6deb9c5621..94182c8b4a 100644 --- a/src/nmod_poly_factor/test/t-factor_berlekamp.c +++ b/src/nmod_poly_factor/test/t-factor_berlekamp.c @@ -26,7 +26,7 @@ TEST_FUNCTION_START(nmod_poly_factor_berlekamp, state) int result = 1; nmod_poly_t pol1, poly, quot, rem; nmod_poly_factor_t res; - mp_limb_t modulus; + ulong modulus; slong i, length, num; modulus = n_randtest_prime(state, 0); diff --git a/src/nmod_poly_factor/test/t-factor_cantor_zassenhaus.c b/src/nmod_poly_factor/test/t-factor_cantor_zassenhaus.c index 85c6b3e29e..295f053f8d 100644 --- a/src/nmod_poly_factor/test/t-factor_cantor_zassenhaus.c +++ b/src/nmod_poly_factor/test/t-factor_cantor_zassenhaus.c @@ -27,7 +27,7 @@ TEST_FUNCTION_START(nmod_poly_factor_cantor_zassenhaus, state) nmod_poly_t pol1, poly, quot, rem; nmod_poly_t product; nmod_poly_factor_t res; - mp_limb_t modulus, lead; + ulong modulus, lead; slong i, j, length, num; slong exp[5]; diff --git a/src/nmod_poly_factor/test/t-factor_distinct_deg.c b/src/nmod_poly_factor/test/t-factor_distinct_deg.c index 40bb999b83..2c5e9b3d4b 100644 --- a/src/nmod_poly_factor/test/t-factor_distinct_deg.c +++ b/src/nmod_poly_factor/test/t-factor_distinct_deg.c @@ -28,7 +28,7 @@ TEST_FUNCTION_START(nmod_poly_factor_distinct_deg, state) { nmod_poly_t poly1, poly, q, r, product; nmod_poly_factor_t res; - mp_limb_t modulus, lead; + ulong modulus, lead; slong i, length, num; slong *degs; slong num_of_deg[MAX_DEG + 1]; diff --git a/src/nmod_poly_factor/test/t-factor_distinct_deg_threaded.c b/src/nmod_poly_factor/test/t-factor_distinct_deg_threaded.c index 4ea0f2907b..60dbb698a5 100644 --- a/src/nmod_poly_factor/test/t-factor_distinct_deg_threaded.c +++ b/src/nmod_poly_factor/test/t-factor_distinct_deg_threaded.c @@ -29,7 +29,7 @@ TEST_FUNCTION_START(nmod_poly_factor_distinct_deg_threaded, state) { nmod_poly_t poly1, poly, q, r, product; nmod_poly_factor_t res; - mp_limb_t modulus, lead; + ulong modulus, lead; slong i, length, num; slong *degs; slong num_of_deg[MAX_DEG + 1]; diff --git a/src/nmod_poly_factor/test/t-factor_kaltofen_shoup.c b/src/nmod_poly_factor/test/t-factor_kaltofen_shoup.c index 6aa9bd2526..913f74adeb 100644 --- a/src/nmod_poly_factor/test/t-factor_kaltofen_shoup.c +++ b/src/nmod_poly_factor/test/t-factor_kaltofen_shoup.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_factor_kaltofen_shoup, state) { nmod_poly_t poly1, poly, q, r, product; nmod_poly_factor_t res; - mp_limb_t modulus, lead; + ulong modulus, lead; slong i, j, length, num; slong exp[5]; diff --git a/src/nmod_poly_factor/test/t-factor_squarefree.c b/src/nmod_poly_factor/test/t-factor_squarefree.c index 787b024a8b..0844db0aed 100644 --- a/src/nmod_poly_factor/test/t-factor_squarefree.c +++ b/src/nmod_poly_factor/test/t-factor_squarefree.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_factor_squarefree, state) int result = 1; nmod_poly_t pol1, poly, quot, rem; nmod_poly_factor_t res; - mp_limb_t modulus; + ulong modulus; slong exp[5], prod1; slong length, i, j, num; diff --git a/src/nmod_poly_factor/test/t-interval_threaded.c b/src/nmod_poly_factor/test/t-interval_threaded.c index ae32c34589..353c248c96 100644 --- a/src/nmod_poly_factor/test/t-interval_threaded.c +++ b/src/nmod_poly_factor/test/t-interval_threaded.c @@ -28,7 +28,7 @@ TEST_FUNCTION_START(nmod_poly_factor_interval_threaded, state) nmod_poly_t a, b, c, cinv, d; nmod_poly_struct * tmp; nmod_poly_struct * e; - mp_limb_t modulus; + ulong modulus; slong j, num_threads, l; nmod_poly_interval_poly_arg_t * args1; thread_pool_handle * threads; diff --git a/src/nmod_poly_factor/test/t-is_irreducible.c b/src/nmod_poly_factor/test/t-is_irreducible.c index c0f8b6e531..a614db47f5 100644 --- a/src/nmod_poly_factor/test/t-is_irreducible.c +++ b/src/nmod_poly_factor/test/t-is_irreducible.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_factor_is_irreducible, state) { nmod_poly_t poly, poly2, poly3; nmod_poly_factor_t factors; - mp_limb_t modulus; + ulong modulus; slong length, length2; int result = 1; diff --git a/src/nmod_poly_factor/test/t-is_irreducible_ddf.c b/src/nmod_poly_factor/test/t-is_irreducible_ddf.c index d2932b851c..5f202bee25 100644 --- a/src/nmod_poly_factor/test/t-is_irreducible_ddf.c +++ b/src/nmod_poly_factor/test/t-is_irreducible_ddf.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_factor_is_irreducible_ddf, state) for (iter = 0; iter < 200 * flint_test_multiplier(); iter++) { nmod_poly_t poly, poly2, poly3; - mp_limb_t modulus; + ulong modulus; slong length, length2; int result = 1; diff --git a/src/nmod_poly_factor/test/t-is_irreducible_rabin.c b/src/nmod_poly_factor/test/t-is_irreducible_rabin.c index 9063412fb7..8dbd3270c5 100644 --- a/src/nmod_poly_factor/test/t-is_irreducible_rabin.c +++ b/src/nmod_poly_factor/test/t-is_irreducible_rabin.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_factor_is_irreducible_rabin, state) { nmod_poly_t poly, poly2, poly3; nmod_poly_factor_t factors; - mp_limb_t modulus; + ulong modulus; slong length, length2; int result = 1; diff --git a/src/nmod_poly_factor/test/t-is_squarefree.c b/src/nmod_poly_factor/test/t-is_squarefree.c index 3137371843..e890889543 100644 --- a/src/nmod_poly_factor/test/t-is_squarefree.c +++ b/src/nmod_poly_factor/test/t-is_squarefree.c @@ -20,7 +20,7 @@ TEST_FUNCTION_START(nmod_poly_factor_is_squarefree, state) for (iter = 0; iter < 200 * flint_test_multiplier(); iter++) { nmod_poly_t poly, Q, R, t; - mp_limb_t modulus; + ulong modulus; slong i, num_factors, exp, max_exp; int v, result; diff --git a/src/nmod_poly_factor/test/t-roots.c b/src/nmod_poly_factor/test/t-roots.c index e36e3b31f2..fd8262d08e 100644 --- a/src/nmod_poly_factor/test/t-roots.c +++ b/src/nmod_poly_factor/test/t-roots.c @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_poly_factor_roots, state) for (i = 0; i < 20 * flint_test_multiplier(); i++) { - mp_limb_t p; + ulong p; nmod_poly_t f; nmod_poly_factor_t r; diff --git a/src/nmod_poly_factor/test/t-roots_factored.c b/src/nmod_poly_factor/test/t-roots_factored.c index d1f3746ea5..414edc7de4 100644 --- a/src/nmod_poly_factor/test/t-roots_factored.c +++ b/src/nmod_poly_factor/test/t-roots_factored.c @@ -122,8 +122,8 @@ TEST_FUNCTION_START(nmod_poly_factor_roots_factored, state) { nmod_poly_t f; nmod_poly_factor_t roots; - mp_limb_t a, n; - mp_limb_t * sqrt; + ulong a, n; + ulong * sqrt; n_factor_t nfac; n = n_randtest_bits(state, n_randint(state, FLINT_BITS) + 1); diff --git a/src/nmod_poly_mat.h b/src/nmod_poly_mat.h index 388d8530b2..5570734c9f 100644 --- a/src/nmod_poly_mat.h +++ b/src/nmod_poly_mat.h @@ -44,7 +44,7 @@ nmod_poly_mat_ncols(const nmod_poly_mat_t mat) /* Memory management *********************************************************/ -void nmod_poly_mat_init(nmod_poly_mat_t mat, slong rows, slong cols, mp_limb_t n); +void nmod_poly_mat_init(nmod_poly_mat_t mat, slong rows, slong cols, ulong n); void nmod_poly_mat_init_set(nmod_poly_mat_t mat, const nmod_poly_mat_t src); @@ -92,7 +92,7 @@ void nmod_poly_mat_shift_right(nmod_poly_mat_t res, /* Basic properties **********************************************************/ -NMOD_POLY_MAT_INLINE mp_limb_t +NMOD_POLY_MAT_INLINE ulong nmod_poly_mat_modulus(const nmod_poly_mat_t mat) { return mat->modulus; @@ -179,7 +179,7 @@ void nmod_poly_mat_scalar_mul_nmod_poly(nmod_poly_mat_t B, const nmod_poly_mat_t A, const nmod_poly_t c); void nmod_poly_mat_scalar_mul_nmod(nmod_poly_mat_t B, - const nmod_poly_mat_t A, mp_limb_t c); + const nmod_poly_mat_t A, ulong c); /* Matrix arithmetic *********************************************************/ @@ -215,7 +215,7 @@ void nmod_poly_mat_pow(nmod_poly_mat_t B, const nmod_poly_mat_t A, ulong exp); /* Evaluation ****************************************************************/ -void nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, mp_limb_t x); +void nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, ulong x); /* Row reduction *************************************************************/ diff --git a/src/nmod_poly_mat/det.c b/src/nmod_poly_mat/det.c index 0f54177a9b..c9851287ab 100644 --- a/src/nmod_poly_mat/det.c +++ b/src/nmod_poly_mat/det.c @@ -44,7 +44,7 @@ nmod_poly_mat_det_interpolate(nmod_poly_t det, const nmod_poly_mat_t A) slong i, l, n, len; nmod_mat_t X; - mp_ptr x, d; + nn_ptr x, d; n = A->r; diff --git a/src/nmod_poly_mat/evaluate_nmod.c b/src/nmod_poly_mat/evaluate_nmod.c index 282ef1c1e5..c63ebb968f 100644 --- a/src/nmod_poly_mat/evaluate_nmod.c +++ b/src/nmod_poly_mat/evaluate_nmod.c @@ -15,7 +15,7 @@ #include "nmod_poly_mat.h" void -nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, mp_limb_t x) +nmod_poly_mat_evaluate_nmod(nmod_mat_t B, const nmod_poly_mat_t A, ulong x) { slong i, j; diff --git a/src/nmod_poly_mat/init.c b/src/nmod_poly_mat/init.c index 0ea8b1bae0..518b9fda3f 100644 --- a/src/nmod_poly_mat/init.c +++ b/src/nmod_poly_mat/init.c @@ -14,7 +14,7 @@ #include "nmod_poly_mat.h" void -nmod_poly_mat_init(nmod_poly_mat_t A, slong rows, slong cols, mp_limb_t n) +nmod_poly_mat_init(nmod_poly_mat_t A, slong rows, slong cols, ulong n) { slong i; diff --git a/src/nmod_poly_mat/mul.c b/src/nmod_poly_mat/mul.c index 5f28ce5656..4ba5ee6a42 100644 --- a/src/nmod_poly_mat/mul.c +++ b/src/nmod_poly_mat/mul.c @@ -37,7 +37,7 @@ nmod_poly_mat_mul(nmod_poly_mat_t C, const nmod_poly_mat_t A, else { slong Alen, Blen; - mp_limb_t mod = nmod_poly_mat_modulus(A); + ulong mod = nmod_poly_mat_modulus(A); Alen = nmod_poly_mat_max_length(A); Blen = nmod_poly_mat_max_length(B); diff --git a/src/nmod_poly_mat/mul_interpolate.c b/src/nmod_poly_mat/mul_interpolate.c index c575d12314..d44bc82d37 100644 --- a/src/nmod_poly_mat/mul_interpolate.c +++ b/src/nmod_poly_mat/mul_interpolate.c @@ -24,10 +24,10 @@ nmod_poly_mat_mul_interpolate(nmod_poly_mat_t C, const nmod_poly_mat_t A, nmod_mat_t *C_mod, *A_mod, *B_mod; - mp_ptr xs; - mp_ptr tt, uu; - mp_ptr * tree; - mp_ptr weights; + nn_ptr xs; + nn_ptr tt, uu; + nn_ptr * tree; + nn_ptr weights; nmod_t mod; if (B->r == 0) diff --git a/src/nmod_poly_mat/scalar.c b/src/nmod_poly_mat/scalar.c index c8b71dcf81..c5cb3abef6 100644 --- a/src/nmod_poly_mat/scalar.c +++ b/src/nmod_poly_mat/scalar.c @@ -14,7 +14,7 @@ void nmod_poly_mat_scalar_mul_nmod(nmod_poly_mat_t B, const nmod_poly_mat_t A, - mp_limb_t c) + ulong c) { slong i, j; diff --git a/src/nmod_poly_mat/sqr.c b/src/nmod_poly_mat/sqr.c index d646d1482f..5a1ce9f070 100644 --- a/src/nmod_poly_mat/sqr.c +++ b/src/nmod_poly_mat/sqr.c @@ -32,7 +32,7 @@ nmod_poly_mat_sqr(nmod_poly_mat_t C, const nmod_poly_mat_t A) else { ulong Alen; - mp_limb_t mod = nmod_poly_mat_modulus(A); + ulong mod = nmod_poly_mat_modulus(A); Alen = nmod_poly_mat_max_length(A); @@ -105,10 +105,10 @@ nmod_poly_mat_sqr_interpolate(nmod_poly_mat_t C, const nmod_poly_mat_t A) nmod_mat_t *C_mod, *A_mod; - mp_ptr xs; - mp_ptr tt, uu; - mp_ptr * tree; - mp_ptr weights; + nn_ptr xs; + nn_ptr tt, uu; + nn_ptr * tree; + nn_ptr weights; nmod_t mod; if (A->c == 0) diff --git a/src/nmod_poly_mat/test/main.c b/src/nmod_poly_mat/test/main.c index e903255b6f..b0fa12aadf 100644 --- a/src/nmod_poly_mat/test/main.c +++ b/src/nmod_poly_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/nmod_poly_mat/test/t-add.c b/src/nmod_poly_mat/test/t-add.c index bc182275d4..c440d3771f 100644 --- a/src/nmod_poly_mat/test/t-add.c +++ b/src/nmod_poly_mat/test/t-add.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_add, state) { nmod_poly_mat_t A, B, C; nmod_mat_t a, b, c, d; - mp_limb_t mod, x; + ulong mod, x; slong m, n, deg; m = n_randint(state, 20); @@ -80,7 +80,7 @@ TEST_FUNCTION_START(nmod_poly_mat_add, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_poly_mat_add, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-concat_horizontal.c b/src/nmod_poly_mat/test/t-concat_horizontal.c index fb9a2b44c3..4ca1e3d5e0 100644 --- a/src/nmod_poly_mat/test/t-concat_horizontal.c +++ b/src/nmod_poly_mat/test/t-concat_horizontal.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_concat_horizontal, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { slong c1, c2, r1; - mp_limb_t mod; + ulong mod; c1 = n_randint(state, 10); c2 = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-concat_vertical.c b/src/nmod_poly_mat/test/t-concat_vertical.c index 91ae86ba5c..2833b7ffc0 100644 --- a/src/nmod_poly_mat/test/t-concat_vertical.c +++ b/src/nmod_poly_mat/test/t-concat_vertical.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_concat_vertical, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { slong r1, r2, c1; - mp_limb_t mod; + ulong mod; r1 = n_randint(state, 10); r2 = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-det.c b/src/nmod_poly_mat/test/t-det.c index fb85afa253..424b5599f2 100644 --- a/src/nmod_poly_mat/test/t-det.c +++ b/src/nmod_poly_mat/test/t-det.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_det, state) nmod_poly_mat_t A, B, C; nmod_poly_t a, b, ab, c; slong n, deg; - mp_limb_t mod; + ulong mod; float density; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly_mat/test/t-det_interpolate.c b/src/nmod_poly_mat/test/t-det_interpolate.c index 321b0bbde3..59520bae56 100644 --- a/src/nmod_poly_mat/test/t-det_interpolate.c +++ b/src/nmod_poly_mat/test/t-det_interpolate.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mat_det_interpolate, state) nmod_poly_mat_t A; nmod_poly_t a, b; slong n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-get_set_coeff_mat.c b/src/nmod_poly_mat/test/t-get_set_coeff_mat.c index 2a9834be26..aedfc3822d 100644 --- a/src/nmod_poly_mat/test/t-get_set_coeff_mat.c +++ b/src/nmod_poly_mat/test/t-get_set_coeff_mat.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_mat_get_set_coeff_mat, state) nmod_poly_mat_t pmat2; nmod_mat_t cmat1; nmod_mat_t cmat2; - mp_limb_t mod; + ulong mod; slong m, n, deg; int jx; diff --git a/src/nmod_poly_mat/test/t-init_clear.c b/src/nmod_poly_mat/test/t-init_clear.c index c942ae8960..3344c07c5d 100644 --- a/src/nmod_poly_mat/test/t-init_clear.c +++ b/src/nmod_poly_mat/test/t-init_clear.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_init_clear, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_mat_t a; - mp_limb_t mod; + ulong mod; slong j, k; slong rows = n_randint(state, 100); slong cols = n_randint(state, 100); diff --git a/src/nmod_poly_mat/test/t-inv.c b/src/nmod_poly_mat/test/t-inv.c index 9997fcb721..78665354c6 100644 --- a/src/nmod_poly_mat/test/t-inv.c +++ b/src/nmod_poly_mat/test/t-inv.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_poly_mat_inv, state) slong n, deg; float density; int ns1, ns2, result; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randint(state, 8); @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_mat_inv, state) slong n, deg; float density; int nonsingular; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-mul.c b/src/nmod_poly_mat/test/t-mul.c index ca5c86b131..5a31e87005 100644 --- a/src/nmod_poly_mat/test/t-mul.c +++ b/src/nmod_poly_mat/test/t-mul.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul, state) { nmod_poly_mat_t A, B, C; nmod_mat_t a, b, c, d; - mp_limb_t mod, x; + ulong mod, x; slong m, n, k, deg; mod = n_randtest_prime(state, 0); @@ -83,7 +83,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); @@ -125,7 +125,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-mul_KS.c b/src/nmod_poly_mat/test/t-mul_KS.c index 9bc316b56f..0bcf894a27 100644 --- a/src/nmod_poly_mat/test/t-mul_KS.c +++ b/src/nmod_poly_mat/test/t-mul_KS.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_KS, state) { nmod_poly_mat_t A, B, C, D; slong m, n, k, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 15); @@ -69,7 +69,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_KS, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); @@ -111,7 +111,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_KS, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-mul_interpolate.c b/src/nmod_poly_mat/test/t-mul_interpolate.c index babefadb14..ef958d48ef 100644 --- a/src/nmod_poly_mat/test/t-mul_interpolate.c +++ b/src/nmod_poly_mat/test/t-mul_interpolate.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_interpolate, state) { nmod_poly_mat_t A, B, C; nmod_mat_t a, b, c, d; - mp_limb_t mod, x; + ulong mod, x; slong m, n, k, deg; mod = n_randtest_prime(state, 0); @@ -87,7 +87,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_interpolate, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); @@ -134,7 +134,7 @@ TEST_FUNCTION_START(nmod_poly_mat_mul_interpolate, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-neg.c b/src/nmod_poly_mat/test/t-neg.c index 80d135d2a7..fef16a1732 100644 --- a/src/nmod_poly_mat/test/t-neg.c +++ b/src/nmod_poly_mat/test/t-neg.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_neg, state) { nmod_poly_mat_t A, B; nmod_mat_t a, b, c; - mp_limb_t x, mod; + ulong x, mod; slong m, n, deg; mod = n_randtest_prime(state, 0); @@ -72,7 +72,7 @@ TEST_FUNCTION_START(nmod_poly_mat_neg, state) { nmod_poly_mat_t A, B; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-nullspace.c b/src/nmod_poly_mat/test/t-nullspace.c index bb20719f0b..488718fe8a 100644 --- a/src/nmod_poly_mat/test/t-nullspace.c +++ b/src/nmod_poly_mat/test/t-nullspace.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mat_nullspace, state) nmod_poly_mat_t A, N, AN; slong n, m, deg, rank, nullity; float density; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 13); diff --git a/src/nmod_poly_mat/test/t-one.c b/src/nmod_poly_mat/test/t-one.c index d674853f0a..4c74e86fc6 100644 --- a/src/nmod_poly_mat/test/t-one.c +++ b/src/nmod_poly_mat/test/t-one.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_one, state) { nmod_poly_mat_t A; slong m, n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-pow.c b/src/nmod_poly_mat/test/t-pow.c index d06449af66..2d7eb3a60d 100644 --- a/src/nmod_poly_mat/test/t-pow.c +++ b/src/nmod_poly_mat/test/t-pow.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_pow, state) { nmod_poly_mat_t A, B, C; slong m, j, exp, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 6); @@ -65,7 +65,7 @@ TEST_FUNCTION_START(nmod_poly_mat_pow, state) { nmod_poly_mat_t A, B; slong m, exp, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 6); diff --git a/src/nmod_poly_mat/test/t-rank.c b/src/nmod_poly_mat/test/t-rank.c index c5a86d97c1..c777906b7a 100644 --- a/src/nmod_poly_mat/test/t-rank.c +++ b/src/nmod_poly_mat/test/t-rank.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mat_rank, state) { nmod_poly_mat_t A; nmod_mat_t Ax; - mp_limb_t mod, x; + ulong mod, x; slong j, m, n, deg, rank, zrank; float density; diff --git a/src/nmod_poly_mat/test/t-rref.c b/src/nmod_poly_mat/test/t-rref.c index 4fb92654b6..1fee281387 100644 --- a/src/nmod_poly_mat/test/t-rref.c +++ b/src/nmod_poly_mat/test/t-rref.c @@ -67,7 +67,7 @@ TEST_FUNCTION_START(nmod_poly_mat_rref, state) slong *perm; float density; int equal; - mp_limb_t p; + ulong p; m = n_randint(state, 10); n = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-set_nmod_mat.c b/src/nmod_poly_mat/test/t-set_nmod_mat.c index 4eb0432403..a1b64127bb 100644 --- a/src/nmod_poly_mat/test/t-set_nmod_mat.c +++ b/src/nmod_poly_mat/test/t-set_nmod_mat.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mat_set_nmod_mat, state) { nmod_poly_mat_t pmat; nmod_mat_t cmat; - mp_limb_t mod; + ulong mod; slong m, n, deg; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly_mat/test/t-shift_left_right.c b/src/nmod_poly_mat/test/t-shift_left_right.c index 9fd3cd1a4d..968755962f 100644 --- a/src/nmod_poly_mat/test/t-shift_left_right.c +++ b/src/nmod_poly_mat/test/t-shift_left_right.c @@ -18,7 +18,7 @@ void test_with_dimensions1(ulong rdim, ulong cdim, flint_rand_t state) int result; nmod_poly_mat_t a, b; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong shift = n_randint(state, 100); nmod_poly_mat_init(a, rdim, cdim, n); @@ -50,7 +50,7 @@ void test_with_dimensions2(ulong rdim, ulong cdim, flint_rand_t state) int result; nmod_poly_mat_t a, b, c; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); slong shift = n_randint(state, 100); nmod_poly_mat_init(a, rdim, cdim, n); diff --git a/src/nmod_poly_mat/test/t-solve_fflu.c b/src/nmod_poly_mat/test/t-solve_fflu.c index 861dc26bb1..d9f1f1098f 100644 --- a/src/nmod_poly_mat/test/t-solve_fflu.c +++ b/src/nmod_poly_mat/test/t-solve_fflu.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(nmod_poly_mat_solve_fflu, state) slong n, m, deg; float density; int solved; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randint(state, 15); diff --git a/src/nmod_poly_mat/test/t-sqr.c b/src/nmod_poly_mat/test/t-sqr.c index 9ca9166938..64d40c57ff 100644 --- a/src/nmod_poly_mat/test/t-sqr.c +++ b/src/nmod_poly_mat/test/t-sqr.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr, state) { nmod_poly_mat_t A, C; nmod_mat_t a, c, d; - mp_limb_t x, mod; + ulong x, mod; slong m, deg; mod = n_randtest_prime(state, 0); @@ -73,7 +73,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr, state) { nmod_poly_mat_t A, B; slong m, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-sqr_KS.c b/src/nmod_poly_mat/test/t-sqr_KS.c index 133f45211a..fe0df810e3 100644 --- a/src/nmod_poly_mat/test/t-sqr_KS.c +++ b/src/nmod_poly_mat/test/t-sqr_KS.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr_KS, state) { nmod_poly_mat_t A, B, C; slong n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); n = n_randint(state, 15); @@ -63,7 +63,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr_KS, state) { nmod_poly_mat_t A, B; slong m, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-sqr_interpolate.c b/src/nmod_poly_mat/test/t-sqr_interpolate.c index 768c5a10ae..1c7d619e07 100644 --- a/src/nmod_poly_mat/test/t-sqr_interpolate.c +++ b/src/nmod_poly_mat/test/t-sqr_interpolate.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr_interpolate, state) { nmod_poly_mat_t A, C; nmod_mat_t a, c, d; - mp_limb_t x, mod; + ulong x, mod; slong m, deg; mod = n_randtest_prime(state, 0); @@ -76,7 +76,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sqr_interpolate, state) { nmod_poly_mat_t A, B; slong m, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-sub.c b/src/nmod_poly_mat/test/t-sub.c index caaaebaa94..ea95042daf 100644 --- a/src/nmod_poly_mat/test/t-sub.c +++ b/src/nmod_poly_mat/test/t-sub.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sub, state) { nmod_poly_mat_t A, B, C; nmod_mat_t a, b, c, d; - mp_limb_t mod, x; + ulong mod, x; slong m, n, deg; m = n_randint(state, 20); @@ -80,7 +80,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sub, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); @@ -121,7 +121,7 @@ TEST_FUNCTION_START(nmod_poly_mat_sub, state) { nmod_poly_mat_t A, B, C; slong m, n, deg; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 20); diff --git a/src/nmod_poly_mat/test/t-trace.c b/src/nmod_poly_mat/test/t-trace.c index 82b80837ac..9ae68ad8d7 100644 --- a/src/nmod_poly_mat/test/t-trace.c +++ b/src/nmod_poly_mat/test/t-trace.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(nmod_poly_mat_trace, state) { nmod_poly_mat_t A, B, AB, BA; nmod_poly_t trab, trba; - mp_limb_t mod; + ulong mod; slong m, n; mod = n_randtest_prime(state, 0); diff --git a/src/nmod_poly_mat/test/t-window_init_clear.c b/src/nmod_poly_mat/test/t-window_init_clear.c index 34c8e13712..0f79da33d5 100644 --- a/src/nmod_poly_mat/test/t-window_init_clear.c +++ b/src/nmod_poly_mat/test/t-window_init_clear.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_poly_mat_window_init_clear, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_poly_mat_t a, w; - mp_limb_t mod; + ulong mod; slong j, k, r1, r2, c1, c2; slong rows = n_randint(state, 10); slong cols = n_randint(state, 10); diff --git a/src/nmod_poly_mat/test/t-zero.c b/src/nmod_poly_mat/test/t-zero.c index cab5bcb01b..23a5489525 100644 --- a/src/nmod_poly_mat/test/t-zero.c +++ b/src/nmod_poly_mat/test/t-zero.c @@ -21,7 +21,7 @@ TEST_FUNCTION_START(nmod_poly_mat_zero, state) { nmod_poly_mat_t A; slong m, n; - mp_limb_t mod; + ulong mod; mod = n_randtest_prime(state, 0); m = n_randint(state, 10); diff --git a/src/nmod_types.h b/src/nmod_types.h index 7eee3ae71a..5e203eb207 100644 --- a/src/nmod_types.h +++ b/src/nmod_types.h @@ -20,10 +20,10 @@ extern "C" { typedef struct { - mp_limb_t * entries; + ulong * entries; slong r; slong c; - mp_limb_t ** rows; + ulong ** rows; nmod_t mod; } nmod_mat_struct; @@ -32,7 +32,7 @@ typedef nmod_mat_struct nmod_mat_t[1]; typedef struct { - mp_ptr coeffs; + nn_ptr coeffs; slong alloc; slong length; nmod_t mod; @@ -58,7 +58,7 @@ typedef struct slong r; slong c; nmod_poly_struct ** rows; - mp_limb_t modulus; + ulong modulus; } nmod_poly_mat_struct; @@ -66,11 +66,11 @@ typedef nmod_poly_mat_struct nmod_poly_mat_t[1]; typedef struct { - mp_limb_t * coeffs; + ulong * coeffs; ulong * exps; slong length; flint_bitcnt_t bits; /* number of bits per exponent */ - slong coeffs_alloc; /* abs size in mp_limb_t units */ + slong coeffs_alloc; /* abs size in ulong units */ slong exps_alloc; /* abs size in ulong units */ } nmod_mpoly_struct; @@ -78,7 +78,7 @@ typedef nmod_mpoly_struct nmod_mpoly_t[1]; typedef struct { - mp_limb_t constant; + ulong constant; nmod_mpoly_struct * poly; fmpz * exp; slong num; diff --git a/src/nmod_vec.h b/src/nmod_vec.h index dd1826c7f1..b908f30eea 100644 --- a/src/nmod_vec.h +++ b/src/nmod_vec.h @@ -32,31 +32,31 @@ do { \ } while (0) NMOD_VEC_INLINE -mp_ptr _nmod_vec_init(slong len) +nn_ptr _nmod_vec_init(slong len) { - return (mp_ptr) flint_malloc(len * sizeof(mp_limb_t)); + return (nn_ptr) flint_malloc(len * sizeof(ulong)); } NMOD_VEC_INLINE -void _nmod_vec_clear(mp_ptr vec) +void _nmod_vec_clear(nn_ptr vec) { flint_free(vec); } -void _nmod_vec_randtest(mp_ptr vec, flint_rand_t state, slong len, nmod_t mod); +void _nmod_vec_randtest(nn_ptr vec, flint_rand_t state, slong len, nmod_t mod); NMOD_VEC_INLINE -void _nmod_vec_zero(mp_ptr vec, slong len) +void _nmod_vec_zero(nn_ptr vec, slong len) { slong i; for (i = 0; i < len; i++) vec[i] = 0; } -flint_bitcnt_t _nmod_vec_max_bits(mp_srcptr vec, slong len); +flint_bitcnt_t _nmod_vec_max_bits(nn_srcptr vec, slong len); NMOD_VEC_INLINE -void _nmod_vec_set(mp_ptr res, mp_srcptr vec, slong len) +void _nmod_vec_set(nn_ptr res, nn_srcptr vec, slong len) { slong i; for (i = 0; i < len; i++) @@ -64,19 +64,19 @@ void _nmod_vec_set(mp_ptr res, mp_srcptr vec, slong len) } NMOD_VEC_INLINE -void _nmod_vec_swap(mp_ptr a, mp_ptr b, slong length) +void _nmod_vec_swap(nn_ptr a, nn_ptr b, slong length) { slong i; for (i = 0; i < length; i++) { - mp_limb_t t = a[i]; + ulong t = a[i]; a[i] = b[i]; b[i] = t; } } NMOD_VEC_INLINE -int _nmod_vec_equal(mp_srcptr vec, mp_srcptr vec2, slong len) +int _nmod_vec_equal(nn_srcptr vec, nn_srcptr vec2, slong len) { slong i; @@ -87,7 +87,7 @@ int _nmod_vec_equal(mp_srcptr vec, mp_srcptr vec2, slong len) } NMOD_VEC_INLINE -int _nmod_vec_is_zero(mp_srcptr vec, slong len) +int _nmod_vec_is_zero(nn_srcptr vec, slong len) { slong i; @@ -97,26 +97,26 @@ int _nmod_vec_is_zero(mp_srcptr vec, slong len) return 1; } -void _nmod_vec_reduce(mp_ptr res, mp_srcptr vec, +void _nmod_vec_reduce(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod); -void _nmod_vec_add(mp_ptr res, mp_srcptr vec1, - mp_srcptr vec2, slong len, nmod_t mod); +void _nmod_vec_add(nn_ptr res, nn_srcptr vec1, + nn_srcptr vec2, slong len, nmod_t mod); -void _nmod_vec_sub(mp_ptr res, mp_srcptr vec1, - mp_srcptr vec2, slong len, nmod_t mod); +void _nmod_vec_sub(nn_ptr res, nn_srcptr vec1, + nn_srcptr vec2, slong len, nmod_t mod); -void _nmod_vec_neg(mp_ptr res, mp_srcptr vec, +void _nmod_vec_neg(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod); -void _nmod_vec_scalar_mul_nmod(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod); +void _nmod_vec_scalar_mul_nmod(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod); -void _nmod_vec_scalar_mul_nmod_shoup(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod); +void _nmod_vec_scalar_mul_nmod_shoup(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod); -void _nmod_vec_scalar_addmul_nmod(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod); +void _nmod_vec_scalar_addmul_nmod(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod); int _nmod_vec_dot_bound_limbs(slong len, nmod_t mod); @@ -124,7 +124,7 @@ int _nmod_vec_dot_bound_limbs(slong len, nmod_t mod); #define NMOD_VEC_DOT(res, i, len, expr1, expr2, mod, nlimbs) \ do \ { \ - mp_limb_t s0, s1, s2, t0, t1; \ + ulong s0, s1, s2, t0, t1; \ s0 = s1 = s2 = UWORD(0); \ switch (nlimbs) \ { \ @@ -154,7 +154,7 @@ int _nmod_vec_dot_bound_limbs(slong len, nmod_t mod); } \ else \ { \ - mp_limb_t v0, v1, u0, u1; \ + ulong v0, v1, u0, u1; \ i = 0; \ if ((len) & 1) \ umul_ppmm(v1, v0, (expr1), (expr2)); \ @@ -185,23 +185,23 @@ int _nmod_vec_dot_bound_limbs(slong len, nmod_t mod); res = s0; \ } while (0); -mp_limb_t _nmod_vec_dot(mp_srcptr vec1, mp_srcptr vec2, +ulong _nmod_vec_dot(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs); -mp_limb_t _nmod_vec_dot_rev(mp_srcptr vec1, mp_srcptr vec2, +ulong _nmod_vec_dot_rev(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs); -mp_limb_t _nmod_vec_dot_ptr(mp_srcptr vec1, const mp_ptr * vec2, slong offset, +ulong _nmod_vec_dot_ptr(nn_srcptr vec1, const nn_ptr * vec2, slong offset, slong len, nmod_t mod, int nlimbs); /* some IO functions */ #ifdef FLINT_HAVE_FILE -int _nmod_vec_fprint_pretty(FILE * file, mp_srcptr vec, slong len, nmod_t mod); -int _nmod_vec_fprint(FILE * f, mp_srcptr vec, slong len, nmod_t mod); +int _nmod_vec_fprint_pretty(FILE * file, nn_srcptr vec, slong len, nmod_t mod); +int _nmod_vec_fprint(FILE * f, nn_srcptr vec, slong len, nmod_t mod); #endif -void _nmod_vec_print_pretty(mp_srcptr vec, slong len, nmod_t mod); -int _nmod_vec_print(mp_srcptr vec, slong len, nmod_t mod); +void _nmod_vec_print_pretty(nn_srcptr vec, slong len, nmod_t mod); +int _nmod_vec_print(nn_srcptr vec, slong len, nmod_t mod); diff --git a/src/nmod_vec/add.c b/src/nmod_vec/add.c index 1d1e0897ed..8a37a394ff 100644 --- a/src/nmod_vec/add.c +++ b/src/nmod_vec/add.c @@ -12,8 +12,8 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_add(mp_ptr res, mp_srcptr vec1, - mp_srcptr vec2, slong len, nmod_t mod) +void _nmod_vec_add(nn_ptr res, nn_srcptr vec1, + nn_srcptr vec2, slong len, nmod_t mod) { slong i; diff --git a/src/nmod_vec/discrete_log_pohlig_hellman.c b/src/nmod_vec/discrete_log_pohlig_hellman.c index 278ffe9665..1bdaa64caa 100644 --- a/src/nmod_vec/discrete_log_pohlig_hellman.c +++ b/src/nmod_vec/discrete_log_pohlig_hellman.c @@ -63,7 +63,7 @@ static slong _pow_ui_cost(ulong pow) Assume that p is prime, don't check. Return an estimate on the number of multiplications need for one run. */ -double nmod_discrete_log_pohlig_hellman_precompute_prime(nmod_discrete_log_pohlig_hellman_t L, mp_limb_t p) +double nmod_discrete_log_pohlig_hellman_precompute_prime(nmod_discrete_log_pohlig_hellman_t L, ulong p) { slong i; ulong c; @@ -198,12 +198,12 @@ double nmod_discrete_log_pohlig_hellman_precompute_prime(nmod_discrete_log_pohli } /* return x such that y = alpha^x mod p, alpha is the p.r. L->alpha*/ -ulong nmod_discrete_log_pohlig_hellman_run(const nmod_discrete_log_pohlig_hellman_t L, mp_limb_t y) +ulong nmod_discrete_log_pohlig_hellman_run(const nmod_discrete_log_pohlig_hellman_t L, ulong y) { slong i, j; ulong x, q, r, e, x0 = 0, x1 = 0, x2 = 0, pp0, pp1, acc, g, pipow; ulong lo, mid, hi, d; - mp_limb_t beta, z, w; + ulong beta, z, w; nmod_discrete_log_pohlig_hellman_entry_struct * Li; FLINT_ASSERT(y != 0); diff --git a/src/nmod_vec/dot.c b/src/nmod_vec/dot.c index a1239bef0a..9317b172a8 100644 --- a/src/nmod_vec/dot.c +++ b/src/nmod_vec/dot.c @@ -12,10 +12,10 @@ #include "nmod.h" #include "nmod_vec.h" -mp_limb_t -_nmod_vec_dot(mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod, int nlimbs) +ulong +_nmod_vec_dot(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs) { - mp_limb_t res; + ulong res; slong i; NMOD_VEC_DOT(res, i, len, vec1[i], vec2[i], mod, nlimbs); return res; @@ -24,7 +24,7 @@ _nmod_vec_dot(mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod, int nlimbs) int _nmod_vec_dot_bound_limbs(slong len, nmod_t mod) { - mp_limb_t t2, t1, t0, u1, u0; + ulong t2, t1, t0, u1, u0; umul_ppmm(t1, t0, mod.n - 1, mod.n - 1); umul_ppmm(t2, t1, t1, len); @@ -36,28 +36,28 @@ _nmod_vec_dot_bound_limbs(slong len, nmod_t mod) return (u0 != 0); } -mp_limb_t -_nmod_vec_dot_ptr(mp_srcptr vec1, const mp_ptr * vec2, slong offset, +ulong +_nmod_vec_dot_ptr(nn_srcptr vec1, const nn_ptr * vec2, slong offset, slong len, nmod_t mod, int nlimbs) { - mp_limb_t res; + ulong res; slong i; NMOD_VEC_DOT(res, i, len, vec1[i], vec2[i][offset], mod, nlimbs); return res; } -static mp_limb_t -nmod_fmma(mp_limb_t a, mp_limb_t b, mp_limb_t c, mp_limb_t d, nmod_t mod) +static ulong +nmod_fmma(ulong a, ulong b, ulong c, ulong d, nmod_t mod) { a = nmod_mul(a, b, mod); NMOD_ADDMUL(a, c, d, mod); return a; } -mp_limb_t -_nmod_vec_dot_rev(mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod, int nlimbs) +ulong +_nmod_vec_dot_rev(nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod, int nlimbs) { - mp_limb_t res; + ulong res; slong i; if (len <= 2 && nlimbs >= 2) diff --git a/src/nmod_vec/io.c b/src/nmod_vec/io.c index 9b783bd6f9..e9af9cf23c 100644 --- a/src/nmod_vec/io.c +++ b/src/nmod_vec/io.c @@ -13,7 +13,7 @@ #include "ulong_extras.h" #include "nmod_vec.h" -int _nmod_vec_fprint_pretty(FILE * file, mp_srcptr vec, slong len, nmod_t mod) +int _nmod_vec_fprint_pretty(FILE * file, nn_srcptr vec, slong len, nmod_t mod) { slong j; int z, width; @@ -55,17 +55,17 @@ int _nmod_vec_fprint_pretty(FILE * file, mp_srcptr vec, slong len, nmod_t mod) return z; } -void _nmod_vec_print_pretty(mp_srcptr vec, slong len, nmod_t mod) +void _nmod_vec_print_pretty(nn_srcptr vec, slong len, nmod_t mod) { _nmod_vec_fprint_pretty(stdout, vec, len, mod); } -int _nmod_vec_print(mp_srcptr vec, slong len, nmod_t mod) +int _nmod_vec_print(nn_srcptr vec, slong len, nmod_t mod) { return _nmod_vec_fprint_pretty(stdout, vec, len, mod); } -int _nmod_vec_fprint(FILE * f, mp_srcptr vec, slong len, nmod_t mod) +int _nmod_vec_fprint(FILE * f, nn_srcptr vec, slong len, nmod_t mod) { return _nmod_vec_fprint_pretty(f, vec, len, mod); } diff --git a/src/nmod_vec/max_bits.c b/src/nmod_vec/max_bits.c index 869eef9f1b..b9fed615c0 100644 --- a/src/nmod_vec/max_bits.c +++ b/src/nmod_vec/max_bits.c @@ -12,10 +12,10 @@ #include "nmod_vec.h" -flint_bitcnt_t _nmod_vec_max_bits(mp_srcptr vec, slong len) +flint_bitcnt_t _nmod_vec_max_bits(nn_srcptr vec, slong len) { slong i; - mp_limb_t mask = 0; + ulong mask = 0; for (i = 0; i < len; i++) { diff --git a/src/nmod_vec/neg.c b/src/nmod_vec/neg.c index 96b5be9f51..7c27d8d7c4 100644 --- a/src/nmod_vec/neg.c +++ b/src/nmod_vec/neg.c @@ -12,7 +12,7 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_neg(mp_ptr res, mp_srcptr vec, slong len, nmod_t mod) +void _nmod_vec_neg(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod) { slong i; for (i = 0 ; i < len; i++) diff --git a/src/nmod_vec/profile/p-add.c b/src/nmod_vec/profile/p-add.c index 1455e13fa6..a3540c4264 100644 --- a/src/nmod_vec/profile/p-add.c +++ b/src/nmod_vec/profile/p-add.c @@ -15,11 +15,11 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_add_fast(mp_ptr res, mp_srcptr vec1, mp_srcptr vec2, slong len, nmod_t mod); +void _nmod_vec_add_fast(nn_ptr res, nn_srcptr vec1, nn_srcptr vec2, slong len, nmod_t mod); #define NUMTYPES 1 -void (* funcs[])(mp_ptr, mp_srcptr, mp_srcptr, slong, nmod_t) = {_nmod_vec_add /*, _nmod_vec_add_fast */}; +void (* funcs[])(nn_ptr, nn_srcptr, nn_srcptr, slong, nmod_t) = {_nmod_vec_add /*, _nmod_vec_add_fast */}; char * str[] = {"_nmod_vec_add" /*, "_nmod_vec_add_fast" */}; @@ -35,14 +35,14 @@ info_t; void sample(void * arg, ulong unused) { - mp_limb_t n; + ulong n; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t mod_bits = info->mod_bits; flint_bitcnt_t len = info->len; slong type; slong ix; - mp_ptr vec1, vec2, res; + nn_ptr vec1, vec2, res; double * timers = info->timers; double start; FLINT_TEST_INIT(state); diff --git a/src/nmod_vec/profile/p-add_sub_neg.c b/src/nmod_vec/profile/p-add_sub_neg.c index 3c818679f7..98408f563a 100644 --- a/src/nmod_vec/profile/p-add_sub_neg.c +++ b/src/nmod_vec/profile/p-add_sub_neg.c @@ -23,14 +23,14 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t n; + ulong n; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; int type = info->type; - mp_size_t j; + slong j; slong i; - mp_ptr vec1, vec2, res; + nn_ptr vec1, vec2, res; FLINT_TEST_INIT(state); diff --git a/src/nmod_vec/profile/p-mul.c b/src/nmod_vec/profile/p-mul.c index 97f8d53a7f..4132a52856 100644 --- a/src/nmod_vec/profile/p-mul.c +++ b/src/nmod_vec/profile/p-mul.c @@ -23,14 +23,14 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t n; + ulong n; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; int fullword = info->fullword; - mp_size_t j; + slong j; slong i; - mp_ptr vec1, vec2, res; + nn_ptr vec1, vec2, res; FLINT_TEST_INIT(state); n = n_randbits(state, bits); diff --git a/src/nmod_vec/profile/p-reduce.c b/src/nmod_vec/profile/p-reduce.c index a47b87a9e0..5fd57267cd 100644 --- a/src/nmod_vec/profile/p-reduce.c +++ b/src/nmod_vec/profile/p-reduce.c @@ -22,13 +22,13 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t n; + ulong n; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; - mp_ptr vec = _nmod_vec_init(1000); - mp_ptr vec2 = _nmod_vec_init(1000); - mp_size_t j; + nn_ptr vec = _nmod_vec_init(1000); + nn_ptr vec2 = _nmod_vec_init(1000); + slong j; slong i; FLINT_TEST_INIT(state); diff --git a/src/nmod_vec/profile/p-scalar_addmul.c b/src/nmod_vec/profile/p-scalar_addmul.c index df8446267d..fd61a7a24b 100644 --- a/src/nmod_vec/profile/p-scalar_addmul.c +++ b/src/nmod_vec/profile/p-scalar_addmul.c @@ -23,14 +23,14 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t n, c; + ulong n, c; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; slong length = info->length; slong i, j; - mp_ptr vec = _nmod_vec_init(length); - mp_ptr vec2 = _nmod_vec_init(length); + nn_ptr vec = _nmod_vec_init(length); + nn_ptr vec2 = _nmod_vec_init(length); FLINT_TEST_INIT(state); diff --git a/src/nmod_vec/profile/p-scalar_mul.c b/src/nmod_vec/profile/p-scalar_mul.c index 4a1e5c0abb..49d24db385 100644 --- a/src/nmod_vec/profile/p-scalar_mul.c +++ b/src/nmod_vec/profile/p-scalar_mul.c @@ -23,14 +23,14 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t n, c; + ulong n, c; nmod_t mod; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; slong length = info->length; slong i, j; - mp_ptr vec = _nmod_vec_init(length); - mp_ptr vec2 = _nmod_vec_init(length); + nn_ptr vec = _nmod_vec_init(length); + nn_ptr vec2 = _nmod_vec_init(length); FLINT_TEST_INIT(state); diff --git a/src/nmod_vec/randtest.c b/src/nmod_vec/randtest.c index 6b5d4e6549..2359d49015 100644 --- a/src/nmod_vec/randtest.c +++ b/src/nmod_vec/randtest.c @@ -12,7 +12,7 @@ #include "ulong_extras.h" #include "nmod_vec.h" -void _nmod_vec_randtest(mp_ptr vec, flint_rand_t state, slong len, nmod_t mod) +void _nmod_vec_randtest(nn_ptr vec, flint_rand_t state, slong len, nmod_t mod) { slong i, sparseness; diff --git a/src/nmod_vec/reduce.c b/src/nmod_vec/reduce.c index e21ec487d8..59c4998eb5 100644 --- a/src/nmod_vec/reduce.c +++ b/src/nmod_vec/reduce.c @@ -12,7 +12,7 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_reduce(mp_ptr res, mp_srcptr vec, slong len, nmod_t mod) +void _nmod_vec_reduce(nn_ptr res, nn_srcptr vec, slong len, nmod_t mod) { slong i; for (i = 0 ; i < len; i++) diff --git a/src/nmod_vec/scalar.c b/src/nmod_vec/scalar.c index 9d42947c7b..8ccb8b9978 100644 --- a/src/nmod_vec/scalar.c +++ b/src/nmod_vec/scalar.c @@ -14,11 +14,11 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_scalar_addmul_nmod_fullword(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_addmul_nmod_fullword(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; - mp_limb_t t; + ulong t; for (i = 0; i < len; i++) { @@ -27,11 +27,11 @@ void _nmod_vec_scalar_addmul_nmod_fullword(mp_ptr res, mp_srcptr vec, } } -void _nmod_vec_scalar_addmul_nmod_generic(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_addmul_nmod_generic(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; - mp_limb_t t; + ulong t; for (i = 0; i < len; i++) { @@ -40,11 +40,11 @@ void _nmod_vec_scalar_addmul_nmod_generic(mp_ptr res, mp_srcptr vec, } } -void _nmod_vec_scalar_addmul_nmod_shoup(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_addmul_nmod_shoup(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; - mp_limb_t t, cinv; + ulong t, cinv; cinv = n_mulmod_precomp_shoup(c, mod.n); @@ -55,8 +55,8 @@ void _nmod_vec_scalar_addmul_nmod_shoup(mp_ptr res, mp_srcptr vec, } } -void _nmod_vec_scalar_addmul_nmod(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_addmul_nmod(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { if (NMOD_BITS(mod) == FLINT_BITS) _nmod_vec_scalar_addmul_nmod_fullword(res, vec, len, c, mod); @@ -66,8 +66,8 @@ void _nmod_vec_scalar_addmul_nmod(mp_ptr res, mp_srcptr vec, _nmod_vec_scalar_addmul_nmod_generic(res, vec, len, c, mod); } -void _nmod_vec_scalar_mul_nmod_fullword(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_mul_nmod_fullword(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; @@ -75,8 +75,8 @@ void _nmod_vec_scalar_mul_nmod_fullword(mp_ptr res, mp_srcptr vec, NMOD_MUL_FULLWORD(res[i], vec[i], c, mod); } -void _nmod_vec_scalar_mul_nmod_generic(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_mul_nmod_generic(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; @@ -84,8 +84,8 @@ void _nmod_vec_scalar_mul_nmod_generic(mp_ptr res, mp_srcptr vec, NMOD_MUL_PRENORM(res[i], vec[i], c << mod.norm, mod); } -void _nmod_vec_scalar_mul_nmod(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_mul_nmod(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { if (NMOD_BITS(mod) == FLINT_BITS) _nmod_vec_scalar_mul_nmod_fullword(res, vec, len, c, mod); @@ -95,11 +95,11 @@ void _nmod_vec_scalar_mul_nmod(mp_ptr res, mp_srcptr vec, _nmod_vec_scalar_mul_nmod_generic(res, vec, len, c, mod); } -void _nmod_vec_scalar_mul_nmod_shoup(mp_ptr res, mp_srcptr vec, - slong len, mp_limb_t c, nmod_t mod) +void _nmod_vec_scalar_mul_nmod_shoup(nn_ptr res, nn_srcptr vec, + slong len, ulong c, nmod_t mod) { slong i; - mp_limb_t w_pr; + ulong w_pr; w_pr = n_mulmod_precomp_shoup(c, mod.n); for (i = 0; i < len; i++) res[i] = n_mulmod_shoup(c, vec[i], w_pr, mod.n); diff --git a/src/nmod_vec/sub.c b/src/nmod_vec/sub.c index e8cbfa78f9..88f79059bd 100644 --- a/src/nmod_vec/sub.c +++ b/src/nmod_vec/sub.c @@ -12,8 +12,8 @@ #include "nmod.h" #include "nmod_vec.h" -void _nmod_vec_sub(mp_ptr res, mp_srcptr vec1, - mp_srcptr vec2, slong len, nmod_t mod) +void _nmod_vec_sub(nn_ptr res, nn_srcptr vec1, + nn_srcptr vec2, slong len, nmod_t mod) { slong i; if (mod.norm) diff --git a/src/nmod_vec/test/main.c b/src/nmod_vec/test/main.c index ba27bbd755..71ad164969 100644 --- a/src/nmod_vec/test/main.c +++ b/src/nmod_vec/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_sub_neg.c" diff --git a/src/nmod_vec/test/t-add_sub_neg.c b/src/nmod_vec/test/t-add_sub_neg.c index 522343f6c5..77912e465a 100644 --- a/src/nmod_vec/test/t-add_sub_neg.c +++ b/src/nmod_vec/test/t-add_sub_neg.c @@ -22,11 +22,11 @@ TEST_FUNCTION_START(nmod_vec_add_sub_neg, state) { slong len = n_randint(state, 100) + 1; nmod_t mod; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); - mp_ptr vec3 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec3 = _nmod_vec_init(len); nmod_init(&mod, n); @@ -49,12 +49,12 @@ TEST_FUNCTION_START(nmod_vec_add_sub_neg, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { slong len = n_randint(state, 100) + 1; - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_t mod; - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); - mp_ptr vec3 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec3 = _nmod_vec_init(len); nmod_init(&mod, n); diff --git a/src/nmod_vec/test/t-discrete_log_pohlig_hellman.c b/src/nmod_vec/test/t-discrete_log_pohlig_hellman.c index f76eaad14a..5e8f9ba232 100644 --- a/src/nmod_vec/test/t-discrete_log_pohlig_hellman.c +++ b/src/nmod_vec/test/t-discrete_log_pohlig_hellman.c @@ -25,7 +25,7 @@ TEST_FUNCTION_START(nmod_vec_discrete_log_pohlig_hellman, state) { double score; nmod_t fpctx; - mp_limb_t p; + ulong p; p = n_randtest_prime(state, 1); nmod_init(&fpctx, p); @@ -38,7 +38,7 @@ TEST_FUNCTION_START(nmod_vec_discrete_log_pohlig_hellman, state) for (k = 0; k < 10; k++) { ulong x; - mp_limb_t y, alpha = nmod_discrete_log_pohlig_hellman_primitive_root(L); + ulong y, alpha = nmod_discrete_log_pohlig_hellman_primitive_root(L); x = n_urandint(state, p - 1); y = nmod_pow_ui(alpha, x, fpctx); diff --git a/src/nmod_vec/test/t-dot.c b/src/nmod_vec/test/t-dot.c index 430eb12d06..e1215a57c3 100644 --- a/src/nmod_vec/test/t-dot.c +++ b/src/nmod_vec/test/t-dot.c @@ -22,8 +22,8 @@ TEST_FUNCTION_START(nmod_vec_dot, state) { slong len; nmod_t mod; - mp_limb_t m, res; - mp_ptr x, y; + ulong m, res; + nn_ptr x, y; int limbs1; mpz_t s, t; slong j; diff --git a/src/nmod_vec/test/t-dot_bound_limbs.c b/src/nmod_vec/test/t-dot_bound_limbs.c index b45b789bfe..ac050d18e3 100644 --- a/src/nmod_vec/test/t-dot_bound_limbs.c +++ b/src/nmod_vec/test/t-dot_bound_limbs.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_vec_dot_bound_limbs, state) { slong len; nmod_t mod; - mp_limb_t m; + ulong m; int limbs1, limbs2; mpz_t t; diff --git a/src/nmod_vec/test/t-dot_ptr.c b/src/nmod_vec/test/t-dot_ptr.c index 3f0bbfe765..cc8485e71a 100644 --- a/src/nmod_vec/test/t-dot_ptr.c +++ b/src/nmod_vec/test/t-dot_ptr.c @@ -21,9 +21,9 @@ TEST_FUNCTION_START(nmod_vec_dot_ptr, state) { slong len; nmod_t mod; - mp_limb_t m, res, res2; - mp_ptr x, y; - mp_ptr * z; + ulong m, res, res2; + nn_ptr x, y; + nn_ptr * z; int limbs1; slong j, offset; @@ -35,7 +35,7 @@ TEST_FUNCTION_START(nmod_vec_dot_ptr, state) x = _nmod_vec_init(len); y = _nmod_vec_init(len); - z = flint_malloc(sizeof(mp_ptr) * len); + z = flint_malloc(sizeof(nn_ptr) * len); _nmod_vec_randtest(x, state, len, mod); _nmod_vec_randtest(y, state, len, mod); diff --git a/src/nmod_vec/test/t-nmod.c b/src/nmod_vec/test/t-nmod.c index 1df2874a32..ff738575f6 100644 --- a/src/nmod_vec/test/t-nmod.c +++ b/src/nmod_vec/test/t-nmod.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, a, b, c; + ulong m, a, b, c; mpz_t x, y, z; m = n_randtest_not_zero(state); @@ -54,7 +54,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, a, b, c; + ulong m, a, b, c; mpz_t x, y, z; m = n_randtest_not_zero(state); @@ -86,7 +86,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, a, b, c; + ulong m, a, b, c; mpz_t x, y, z; m = n_randtest_not_zero(state); @@ -118,7 +118,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, a, b, c; + ulong m, a, b, c; mpz_t x, y, z; m = n_randtest_prime(state, 0); @@ -153,7 +153,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, b, c; + ulong m, b, c; mpz_t y, z; m = n_randtest_prime(state, 0); @@ -182,7 +182,7 @@ TEST_FUNCTION_START(nmod_vec_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t m, b, c; + ulong m, b, c; mpz_t y, z; ulong exp; diff --git a/src/nmod_vec/test/t-nmod_pow_fmpz.c b/src/nmod_vec/test/t-nmod_pow_fmpz.c index 2a822d54ea..55533336c5 100644 --- a/src/nmod_vec/test/t-nmod_pow_fmpz.c +++ b/src/nmod_vec/test/t-nmod_pow_fmpz.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(nmod_vec_nmod_pow_fmpz, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t b, c1, c2; + ulong b, c1, c2; ulong exp1; fmpz_t exp2; @@ -48,7 +48,7 @@ TEST_FUNCTION_START(nmod_vec_nmod_pow_fmpz, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { nmod_t mod; - mp_limb_t b, c1, c2, c3; + ulong b, c1, c2, c3; fmpz_t exp1, exp2, exp3; nmod_init(&mod, n_randtest_not_zero(state)); diff --git a/src/nmod_vec/test/t-reduce.c b/src/nmod_vec/test/t-reduce.c index 823767bd83..863d29c1d1 100644 --- a/src/nmod_vec/test/t-reduce.c +++ b/src/nmod_vec/test/t-reduce.c @@ -20,10 +20,10 @@ TEST_FUNCTION_START(nmod_vec_reduce, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { slong j, len = n_randint(state, 100) + 1; - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); - mp_limb_t n = n_randtest_not_zero(state); + ulong n = n_randtest_not_zero(state); nmod_t mod; nmod_init(&mod, n); diff --git a/src/nmod_vec/test/t-scalar_addmul_nmod.c b/src/nmod_vec/test/t-scalar_addmul_nmod.c index 23c042ed6d..2a3438ba69 100644 --- a/src/nmod_vec/test/t-scalar_addmul_nmod.c +++ b/src/nmod_vec/test/t-scalar_addmul_nmod.c @@ -22,13 +22,13 @@ TEST_FUNCTION_START(nmod_vec_scalar_addmul_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { slong len = n_randint(state, 100) + 1; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t c = n_randint(state, n); + ulong n = n_randtest_not_zero(state); + ulong c = n_randint(state, n); nmod_t mod; - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); - mp_ptr vec3 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec3 = _nmod_vec_init(len); nmod_init(&mod, n); diff --git a/src/nmod_vec/test/t-scalar_mul_nmod.c b/src/nmod_vec/test/t-scalar_mul_nmod.c index d7d8743de5..38d091c850 100644 --- a/src/nmod_vec/test/t-scalar_mul_nmod.c +++ b/src/nmod_vec/test/t-scalar_mul_nmod.c @@ -27,13 +27,13 @@ TEST_FUNCTION_START(nmod_vec_scalar_mul_nmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { slong len = n_randint(state, 100) + 1; - mp_limb_t n = n_randtest_not_zero(state); - mp_limb_t c = n_randint(state, n); + ulong n = n_randtest_not_zero(state); + ulong c = n_randint(state, n); nmod_t mod; - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); - mp_ptr vec3 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec3 = _nmod_vec_init(len); nmod_init(&mod, n); diff --git a/src/nmod_vec/test/t-scalar_mul_nmod_shoup.c b/src/nmod_vec/test/t-scalar_mul_nmod_shoup.c index 73a8637fc8..4611ff1a53 100644 --- a/src/nmod_vec/test/t-scalar_mul_nmod_shoup.c +++ b/src/nmod_vec/test/t-scalar_mul_nmod_shoup.c @@ -22,13 +22,13 @@ TEST_FUNCTION_START(nmod_vec_scalar_mul_nmod_shoup, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { slong len = n_randint(state, 100) + 1; - mp_limb_t n = n_randtest_not_zero(state) / 2 + 1; - mp_limb_t c = n_randint(state, n) / 2; + ulong n = n_randtest_not_zero(state) / 2 + 1; + ulong c = n_randint(state, n) / 2; nmod_t mod; - mp_ptr vec = _nmod_vec_init(len); - mp_ptr vec2 = _nmod_vec_init(len); - mp_ptr vec3 = _nmod_vec_init(len); + nn_ptr vec = _nmod_vec_init(len); + nn_ptr vec2 = _nmod_vec_init(len); + nn_ptr vec3 = _nmod_vec_init(len); nmod_init(&mod, n); diff --git a/src/padic/test/main.c b/src/padic/test/main.c index d56a8d7cf4..59560c2f8a 100644 --- a/src/padic/test/main.c +++ b/src/padic/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/padic_mat/test/main.c b/src/padic_mat/test/main.c index 9ce48c8e4d..aa7c535765 100644 --- a/src/padic_mat/test/main.c +++ b/src/padic_mat/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/padic_poly/fit_length.c b/src/padic_poly/fit_length.c index 8bf09031c4..c69bb89966 100644 --- a/src/padic_poly/fit_length.c +++ b/src/padic_poly/fit_length.c @@ -24,7 +24,7 @@ void padic_poly_fit_length(padic_poly_t poly, slong len) if (poly->alloc) /* Realloc */ { poly->coeffs = (fmpz *) flint_realloc(poly->coeffs, len * sizeof(fmpz)); - mpn_zero((mp_ptr) (poly->coeffs + poly->alloc), len - poly->alloc); + mpn_zero((nn_ptr) (poly->coeffs + poly->alloc), len - poly->alloc); } else /* Nothing allocated already so do it now */ { diff --git a/src/padic_poly/inv_series.c b/src/padic_poly/inv_series.c index 95c4b5b08a..5058696b56 100644 --- a/src/padic_poly/inv_series.c +++ b/src/padic_poly/inv_series.c @@ -99,7 +99,7 @@ void padic_poly_inv_series(padic_poly_t Qinv, const padic_poly_t Q, slong n, Qcopy = (fmpz *) flint_malloc(n * sizeof(fmpz)); for (i = 0; i < Q->length; i++) Qcopy[i] = Q->coeffs[i]; - mpn_zero((mp_ptr) Qcopy + i, n - i); + mpn_zero((nn_ptr) Qcopy + i, n - i); Qalloc = 1; } diff --git a/src/padic_poly/realloc.c b/src/padic_poly/realloc.c index e2f61b5ba9..00ac62aa62 100644 --- a/src/padic_poly/realloc.c +++ b/src/padic_poly/realloc.c @@ -29,7 +29,7 @@ void padic_poly_realloc(padic_poly_t poly, slong alloc, const fmpz_t p) poly->coeffs = (fmpz *) flint_realloc(poly->coeffs, alloc * sizeof(fmpz)); if (alloc > poly->alloc) - mpn_zero((mp_ptr) (poly->coeffs + poly->alloc), + mpn_zero((nn_ptr) (poly->coeffs + poly->alloc), alloc - poly->alloc); } else /* Nothing allocated already so do it now */ diff --git a/src/padic_poly/set_coeff_padic.c b/src/padic_poly/set_coeff_padic.c index 0806be5e50..f161efeff7 100644 --- a/src/padic_poly/set_coeff_padic.c +++ b/src/padic_poly/set_coeff_padic.c @@ -30,7 +30,7 @@ void padic_poly_set_coeff_padic(padic_poly_t poly, slong n, const padic_t x, if (n + 1 > poly->length) { - mpn_zero((mp_ptr) (poly->coeffs + poly->length), n - poly->length); + mpn_zero((nn_ptr) (poly->coeffs + poly->length), n - poly->length); poly->length = n + 1; } diff --git a/src/padic_poly/test/main.c b/src/padic_poly/test/main.c index 0e546a4c59..9ea2abb3cb 100644 --- a/src/padic_poly/test/main.c +++ b/src/padic_poly/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/partitions/fmpz_fmpz.c b/src/partitions/fmpz_fmpz.c index 8c98c49bee..4ef3059973 100644 --- a/src/partitions/fmpz_fmpz.c +++ b/src/partitions/fmpz_fmpz.c @@ -73,10 +73,10 @@ partitions_fmpz_fmpz_hrr(fmpz_t p, const fmpz_t n, int use_doubles) /* To compute p(n) mod 2^64. */ static void -partitions_vec(mp_ptr v, slong len) +partitions_vec(nn_ptr v, slong len) { slong i, j, n; - mp_limb_t p; + ulong p; for (n = 0; n < FLINT_MIN(len, NUMBER_OF_SMALL_PARTITIONS); n++) v[n] = partitions_lookup[n]; @@ -108,7 +108,7 @@ _partitions_fmpz_ui(fmpz_t res, ulong n, int use_doubles) } else if (FLINT_BITS == 64 && (n < 500 || (!use_doubles && n < 1200))) { - mp_ptr tmp = flint_malloc((n + 1) * sizeof(mp_limb_t)); + nn_ptr tmp = flint_malloc((n + 1) * sizeof(ulong)); if (n < 417) /* p(n) < 2^64 */ { diff --git a/src/partitions/hrr_sum_arb.c b/src/partitions/hrr_sum_arb.c index 2cad273f1d..28296f4026 100644 --- a/src/partitions/hrr_sum_arb.c +++ b/src/partitions/hrr_sum_arb.c @@ -81,7 +81,7 @@ partitions_term_bound(double n, double k) } /* Bound number of prime factors in k */ -static mp_limb_t primorial_tab[] = { +static ulong primorial_tab[] = { 1, 2, 6, 30, 210, 2310, 30030, 510510, 9699690, 223092870, #if FLINT64 UWORD(6469693230), UWORD(200560490130), UWORD(7420738134810), @@ -94,7 +94,7 @@ bound_primes(ulong k) { int i; - for (i = 0; i < sizeof(primorial_tab) / sizeof(mp_limb_t); i++) + for (i = 0; i < sizeof(primorial_tab) / sizeof(ulong); i++) if (k <= primorial_tab[i]) return i; @@ -125,7 +125,7 @@ static void eval_trig_prod(arb_t sum, trig_prod_t prod, slong prec) { int i; - mp_limb_t v; + ulong v; arb_t t; if (prod->prefactor == 0) diff --git a/src/partitions/test/main.c b/src/partitions/test/main.c index 19df11c1a9..c89f9e3596 100644 --- a/src/partitions/test/main.c +++ b/src/partitions/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-partitions_fmpz_ui.c" diff --git a/src/perm/test/main.c b/src/perm/test/main.c index 030db94920..9fffbd3ada 100644 --- a/src/perm/test/main.c +++ b/src/perm/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-compose.c" diff --git a/src/profile/p-invert_limb.c b/src/profile/p-invert_limb.c index 2fe6a10bfb..4098c97933 100644 --- a/src/profile/p-invert_limb.c +++ b/src/profile/p-invert_limb.c @@ -20,7 +20,7 @@ typedef struct #define invert_limb_naive(ninv, n) \ do { \ - mp_limb_t dummy; \ + ulong dummy; \ udiv_qrnnd (ninv, dummy, ~(n), ~(WORD(0)), n); \ } while (0) diff --git a/src/profile/p-udiv_qrnnd.c b/src/profile/p-udiv_qrnnd.c index 2fc23bf7e8..8779855c85 100644 --- a/src/profile/p-udiv_qrnnd.c +++ b/src/profile/p-udiv_qrnnd.c @@ -15,8 +15,8 @@ void sample(void * arg, ulong count) { - mp_limb_t d; - mp_ptr array = (mp_ptr) flint_malloc(200 * sizeof(mp_limb_t)); + ulong d; + nn_ptr array = (nn_ptr) flint_malloc(200 * sizeof(ulong)); ulong i; int j; diff --git a/src/profile/p-udiv_qrnnd_preinv.c b/src/profile/p-udiv_qrnnd_preinv.c index 035a39d64f..7872209107 100644 --- a/src/profile/p-udiv_qrnnd_preinv.c +++ b/src/profile/p-udiv_qrnnd_preinv.c @@ -15,8 +15,8 @@ void sample(void * arg, ulong count) { - mp_limb_t d, q, r, dinv, norm; - mp_ptr array = (mp_ptr) flint_malloc(200 * sizeof(mp_limb_t)); + ulong d, q, r, dinv, norm; + nn_ptr array = (nn_ptr) flint_malloc(200 * sizeof(ulong)); ulong i; int j; diff --git a/src/profiler.h b/src/profiler.h index 5f75f84a80..bbc26d9307 100644 --- a/src/profiler.h +++ b/src/profiler.h @@ -116,8 +116,8 @@ void timeit_stop_us(timeit_t t) ******************************************************************************/ -#if (defined( _MSC_VER ) || (GMP_LIMB_BITS == 64 && defined (__amd64__)) || \ - (GMP_LIMB_BITS == 32 && (defined (__i386__) || \ +#if (defined( _MSC_VER ) || (FLINT_BITS == 64 && defined (__amd64__)) || \ + (FLINT_BITS == 32 && (defined (__i386__) || \ defined (__i486__) || defined(__amd64__)))) #define FLINT_NUM_CLOCKS 20 diff --git a/src/qadic/ctx_init.c b/src/qadic/ctx_init.c index 07ec252927..4174dc64d0 100644 --- a/src/qadic/ctx_init.c +++ b/src/qadic/ctx_init.c @@ -27,7 +27,7 @@ int _qadic_ctx_init_conway_ui(qadic_ctx_t ctx, ulong p, slong d, ulong tmp[410]; /* Largest degree is 409 */ slong num_nzcoeffs; slong * idx_nzcoeffs; - mp_ptr nzcoeffs; + nn_ptr nzcoeffs; slong ix, jx; result = _nmod_poly_conway(tmp, p, d); diff --git a/src/qadic/sqrt.c b/src/qadic/sqrt.c index 0cf14bbed6..d99cb97df7 100644 --- a/src/qadic/sqrt.c +++ b/src/qadic/sqrt.c @@ -159,7 +159,7 @@ _artin_schreier_preimage(fmpz *rop, const fmpz *op, slong len, for (k = 0; k < d; k++) { - nmod_mat_entry(A, k, i) = (mp_limb_t) f[k]; + nmod_mat_entry(A, k, i) = (ulong) f[k]; } fmpz_zero(e + i); } diff --git a/src/qadic/test/main.c b/src/qadic/test/main.c index afccc1c5c0..be86b019b3 100644 --- a/src/qadic/test/main.c +++ b/src/qadic/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add.c" diff --git a/src/qfb/exponent_element.c b/src/qfb/exponent_element.c index 615931f3e7..5fce2da6bd 100644 --- a/src/qfb/exponent_element.c +++ b/src/qfb/exponent_element.c @@ -131,7 +131,7 @@ int qfb_exponent_element(fmpz_t exponent, qfb_t f, fmpz_t n, ulong B1, ulong B2_ n_primes_t iter; ulong hi, lo; double quot; - mp_bitcnt_t bits0; + flint_bitcnt_t bits0; n_primes_init(iter); diff --git a/src/qfb/reduced_forms.c b/src/qfb/reduced_forms.c index 7c6fd59ab6..e97493c395 100644 --- a/src/qfb/reduced_forms.c +++ b/src/qfb/reduced_forms.c @@ -36,9 +36,9 @@ int pow_incr(int * pows, int * exp, int n) slong qfb_reduced_forms_large(qfb ** forms, slong d) { slong a, j, k, p, alim, alloc, num, roots, sqrt, i, prod, prime_i; - mp_srcptr primes; + nn_srcptr primes; const double * prime_inverses; - mp_limb_t a2; + ulong a2; n_factor_t * fac; if (d >= 0) @@ -106,13 +106,13 @@ slong qfb_reduced_forms_large(qfb ** forms, slong d) for (a = 1; a <= alim; a++) /* loop through possible a's */ { - mp_limb_t * s; + ulong * s; roots = n_sqrtmodn(&s, n_negmod((-d)%(4*a), 4*a), fac + a); for (j = 0; j < roots; j++) /* loop through all square roots of d mod 4a */ { - mp_limb_signed_t b = s[j]; + slong b = s[j]; if (b > 2*a) b -= 4*a; @@ -123,11 +123,11 @@ slong qfb_reduced_forms_large(qfb ** forms, slong d) -sqrt(2^(B-1)/3) < b < sqrt(2^(B-1)/3) 0 < -d < 2^(B-1) */ - mp_limb_t c = ((mp_limb_t) (b*b) + (mp_limb_t) (-d))/(4*(mp_limb_t) a); + ulong c = ((ulong) (b*b) + (ulong) (-d))/(4*(ulong) a); - if (c >= (mp_limb_t) a && (b >= 0 || a != c)) /* we have a form */ + if (c >= (ulong) a && (b >= 0 || a != c)) /* we have a form */ { - mp_limb_t g; + ulong g; if (b) { @@ -165,11 +165,11 @@ slong qfb_reduced_forms_large(qfb ** forms, slong d) slong qfb_reduced_forms(qfb ** forms, slong d) { slong a, b, k, c, p, blim, alloc, num, sqrt, i, prod, prime_i; - mp_srcptr primes; + nn_srcptr primes; const double * prime_inverses; - mp_limb_t b2, exp, primes_cutoff = 0; + ulong b2, exp, primes_cutoff = 0; n_factor_t * fac; - mp_limb_t * s; + ulong * s; if (d >= 0) flint_throw(FLINT_ERROR, "%s not implemented for positive discriminant\n", __func__); @@ -199,10 +199,10 @@ slong qfb_reduced_forms(qfb ** forms, slong d) for (i = 0; i < num; i++) /* sieve with each sqrt mod p */ { - mp_limb_t off = s[i]; + ulong off = s[i]; while (off <= blim) { - b2 = (off*off - (mp_limb_t) d)/4; + b2 = (off*off - (ulong) d)/4; fac[off].p[fac[off].num] = p; fac[off].exp[fac[off].num] = n_remove2_precomp(&b2, p, prime_inverses[prime_i]); @@ -219,7 +219,7 @@ slong qfb_reduced_forms(qfb ** forms, slong d) for (b = (d & 1); b <= blim; b += 2) /* write any remaining factors, including 2^exp */ { - b2 = ((mp_limb_t)(b*b - d))/4; + b2 = ((ulong)(b*b - d))/4; exp = flint_ctz(b2); /* powers of 2 */ if (exp) @@ -249,7 +249,7 @@ slong qfb_reduced_forms(qfb ** forms, slong d) int pows[FLINT_MAX_FACTORS_IN_LIMB]; int n = fac[b].num; - b2 = ((mp_limb_t)(b*b - d))/4; + b2 = ((ulong)(b*b - d))/4; for (i = 0; i < n; i++) pows[i] = 0; @@ -265,7 +265,7 @@ slong qfb_reduced_forms(qfb ** forms, slong d) if (a <= c && b <= a) /* we have a form */ { - mp_limb_t g; + ulong g; if (b) { diff --git a/src/qfb/test/main.c b/src/qfb/test/main.c index 18f0142851..e6e99b0b70 100644 --- a/src/qfb/test/main.c +++ b/src/qfb/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-exponent.c" diff --git a/src/qfb/test/t-prime_form.c b/src/qfb/test/t-prime_form.c index fde492d286..91bbedaedd 100644 --- a/src/qfb/test/t-prime_form.c +++ b/src/qfb/test/t-prime_form.c @@ -47,7 +47,7 @@ TEST_FUNCTION_START(qfb_prime_form, state) n = n_randprime(state, n_randint(state, FLINT_BITS - 1) + 2, 0); fmpz_set_ui(p, n); Dmodp = fmpz_fdiv_ui(D, n); - } while ((mp_limb_signed_t) Dmodp < 0 /* Jacobi can't handle this */ + } while ((slong) Dmodp < 0 /* Jacobi can't handle this */ || (n == 2 && ((q = fmpz_fdiv_ui(D, 8)) == 2 || q == 3 || q == 5)) || (n != 2 && Dmodp != 0 && n_jacobi(Dmodp, n) < 0)); diff --git a/src/qqbar/test/main.c b/src/qqbar/test/main.c index a7aea93e31..f02c7a10e1 100644 --- a/src/qqbar/test/main.c +++ b/src/qqbar/test/main.c @@ -9,8 +9,6 @@ (at your option) any later version. See . */ -#include -#include #include "fexpr.h" /* Include functions *********************************************************/ diff --git a/src/qsieve.h b/src/qsieve.h index b5e7891d97..1797653a9d 100644 --- a/src/qsieve.h +++ b/src/qsieve.h @@ -32,7 +32,7 @@ extern "C" { typedef struct { - mp_limb_t pinv; /* precomputed inverse */ + ulong pinv; /* precomputed inverse */ int p; /* prime */ char size; } prime_t; @@ -53,14 +53,14 @@ typedef struct /* matrix column */ typedef struct /* entry in hash table */ { - mp_limb_t prime; /* value of prime */ - mp_limb_t next; /* next prime which have same hash value as 'prime' */ - mp_limb_t count; /* number of occurrence of 'prime' */ + ulong prime; /* value of prime */ + ulong next; /* next prime which have same hash value as 'prime' */ + ulong count; /* number of occurrence of 'prime' */ } hash_t; typedef struct /* format for relation */ { - mp_limb_t lp; /* large prime, is 1, if relation is full */ + ulong lp; /* large prime, is 1, if relation is full */ slong num_factors; /* number of factors, excluding small factor */ slong small_primes; /* number of small factors */ slong * small; /* exponent of small factors */ @@ -97,7 +97,7 @@ typedef struct ulong ks_primes; /* number of Knuth-Schroeppel primes */ - mp_limb_t k; /* Multiplier */ + ulong k; /* Multiplier */ fmpz_t kn; /* kn as a multiprecision integer */ slong num_primes; /* number of factor base primes including k and 2 */ @@ -120,18 +120,18 @@ typedef struct fmpz_t A; /* current value of coeff A of poly Ax^2 + Bx + C */ fmpz_t B; /* B value of poly */ - mp_limb_t * A_ind; /* indices of factor base primes dividing A */ + ulong * A_ind; /* indices of factor base primes dividing A */ fmpz_t * A_divp; /* A_divp[i] = (A/p_i), where the p_i are the prime factors of A */ - mp_limb_t * B0_terms; /* B0_terms[i] = min(gamma_i, p - gamma_i) where + ulong * B0_terms; /* B0_terms[i] = min(gamma_i, p - gamma_i) where gamma_i = (sqrt(kn)*(A_divp[i])^(-1)) mod p_i, where the p_i are the prime factors of A */ fmpz_t * B_terms; /* B_terms[i] = A_divp[i]*B0_terms[i] (multprec) */ - mp_limb_t * A_inv; /* A_inv[k] = A^(-1) mod p_k, for FB prime p_k */ - mp_limb_t ** A_inv2B; /* A_inv2B[i][k] = 2 * B_terms[i] * A^(-1) mod p_k + ulong * A_inv; /* A_inv[k] = A^(-1) mod p_k, for FB prime p_k */ + ulong ** A_inv2B; /* A_inv2B[i][k] = 2 * B_terms[i] * A^(-1) mod p_k for FB prime p_k */ int * soln1; /* soln1[k] = first poly root mod FB prime p_k */ @@ -156,9 +156,9 @@ typedef struct slong h; /* tuple entry we just set, numbered from 1 at end of tuple */ slong m; /* last value we just set a tuple entry to */ slong A_ind_diff; /* diff. between indices of (s-1) and (s-2)-th A-factor */ - mp_limb_t * curr_subset; /* current tuple */ - mp_limb_t * first_subset; /* first tuple, in case of restart */ - mp_limb_t j; /* index of s-th factor of first A, if s > 3 */ + ulong * curr_subset; /* current tuple */ + ulong * first_subset; /* first tuple, in case of restart */ + ulong j; /* index of s-th factor of first A, if s > 3 */ #if QS_DEBUG slong poly_count; /* keep track of the number of polynomials used */ @@ -182,7 +182,7 @@ typedef struct slong table_size; /* size of table */ hash_t * table; /* store 'prime' occurring in partial */ - mp_limb_t * hash_table; /* to keep track of location of primes in 'table' */ + ulong * hash_table; /* to keep track of location of primes in 'table' */ slong extra_rels; /* number of extra relations beyond num_primes */ slong max_factors; /* maximum number of factors a relation can have */ @@ -228,7 +228,7 @@ typedef qs_s qs_t[1]; #if 0 /* TODO have the tuning values taken from here if multithreaded */ -static const mp_limb_t qsieve_tune[][6] = +static const ulong qsieve_tune[][6] = { {10, 50, 100, 5, 2 * 2000, 30}, /* */ {20, 50, 120, 6, 2 * 2500, 30}, /* */ @@ -261,7 +261,7 @@ static const mp_limb_t qsieve_tune[][6] = #else /* currently tuned for four threads */ -static const mp_limb_t qsieve_tune[][6] = +static const ulong qsieve_tune[][6] = { {10, 50, 90, 5, 2 * 1500, 18}, /* */ {20, 50, 90, 6, 2 * 1600, 18}, /* */ @@ -298,24 +298,24 @@ static const mp_limb_t qsieve_tune[][6] = #endif /* number of entries in the tuning table */ -#define QS_TUNE_SIZE (sizeof(qsieve_tune)/(6*sizeof(mp_limb_t))) +#define QS_TUNE_SIZE (sizeof(qsieve_tune)/(6*sizeof(ulong))) void qsieve_init(qs_t qs_inf, const fmpz_t n); -mp_limb_t qsieve_knuth_schroeppel(qs_t qs_inf); +ulong qsieve_knuth_schroeppel(qs_t qs_inf); void qsieve_clear(qs_t qs_inf); void qsieve_factor(fmpz_factor_t factors, const fmpz_t n); -prime_t * compute_factor_base(mp_limb_t * small_factor, qs_t qs_inf, +prime_t * compute_factor_base(ulong * small_factor, qs_t qs_inf, slong num_primes); -mp_limb_t qsieve_primes_init(qs_t qs_inf); +ulong qsieve_primes_init(qs_t qs_inf); -mp_limb_t qsieve_primes_increment(qs_t qs_inf, mp_limb_t delta); +ulong qsieve_primes_increment(qs_t qs_inf, ulong delta); -mp_limb_t qsieve_poly_init(qs_t qs_inf); +ulong qsieve_poly_init(qs_t qs_inf); int qsieve_init_A(qs_t qs_inf); @@ -353,12 +353,12 @@ int qsieve_relations_cmp(const void * a, const void * b); slong qsieve_merge_relations(qs_t qs_inf); -void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, +void qsieve_write_to_file(qs_t qs_inf, ulong prime, const fmpz_t Y, const qs_poly_t poly); -hash_t * qsieve_get_table_entry(qs_t qs_inf, mp_limb_t prime); +hash_t * qsieve_get_table_entry(qs_t qs_inf, ulong prime); -void qsieve_add_to_hashtable(qs_t qs_inf, mp_limb_t prime); +void qsieve_add_to_hashtable(qs_t qs_inf, ulong prime); relation_t qsieve_parse_relation(qs_t qs_inf); diff --git a/src/qsieve/collect_relations.c b/src/qsieve/collect_relations.c index 0d91eeec18..6de4bf1ab8 100644 --- a/src/qsieve/collect_relations.c +++ b/src/qsieve/collect_relations.c @@ -32,7 +32,7 @@ void qsieve_do_sieving(qs_t qs_inf, unsigned char * sieve, qs_poly_t poly) int * soln1 = poly->soln1; int * soln2 = poly->soln2; prime_t * factor_base = qs_inf->factor_base; - mp_limb_t p; + ulong p; unsigned char * end = sieve + qs_inf->sieve_size; register unsigned char * pos1; @@ -92,7 +92,7 @@ void qsieve_do_sieving2(qs_t qs_inf, unsigned char * sieve, qs_poly_t poly) { slong b, d1, d2, i; slong pind, size; - mp_limb_t p; + ulong p; slong num_primes = qs_inf->num_primes; int * soln1 = poly->soln1; int * soln2 = poly->soln2; @@ -210,15 +210,15 @@ void qsieve_do_sieving2(qs_t qs_inf, unsigned char * sieve, qs_poly_t poly) slong qsieve_evaluate_candidate(qs_t qs_inf, ulong i, unsigned char * sieve, qs_poly_t poly) { slong bits, exp, extra_bits; - mp_limb_t modp, prime; + ulong modp, prime; slong num_primes = qs_inf->num_primes; prime_t * factor_base = qs_inf->factor_base; slong * small = poly->small; /* exponents of small primes and mult. */ fac_t * factor = poly->factor; int * soln1 = poly->soln1; int * soln2 = poly->soln2; - mp_limb_t * A_ind = qs_inf->A_ind; - mp_limb_t pinv; + ulong * A_ind = qs_inf->A_ind; + ulong pinv; slong num_factors = 0; slong relations = 0; slong j, k; diff --git a/src/qsieve/compute_poly_data.c b/src/qsieve/compute_poly_data.c index 8805ad8ee4..e85365a323 100644 --- a/src/qsieve/compute_poly_data.c +++ b/src/qsieve/compute_poly_data.c @@ -51,10 +51,10 @@ int qsieve_init_A(qs_t qs_inf) { slong i, j; slong s, low, high, span, m, h; - mp_limb_t bits, num_factors, rem, mid; - mp_limb_t factor_bound[40]; - mp_limb_t * A_ind; - mp_limb_t * curr_subset, * first_subset; + ulong bits, num_factors, rem, mid; + ulong factor_bound[40]; + ulong * A_ind; + ulong * curr_subset, * first_subset; prime_t * factor_base = qs_inf->factor_base; fmpz_t prod, temp, upper_bound, lower_bound; int ret = 1, found_j; @@ -349,9 +349,9 @@ int qsieve_init_A(qs_t qs_inf) void qsieve_reinit_A(qs_t qs_inf) { slong low, s, j; - mp_limb_t * A_ind = qs_inf->A_ind; - mp_limb_t * curr_subset = qs_inf->curr_subset; - mp_limb_t * first_subset = qs_inf->first_subset; + ulong * A_ind = qs_inf->A_ind; + ulong * curr_subset = qs_inf->curr_subset; + ulong * first_subset = qs_inf->first_subset; prime_t * factor_base = qs_inf->factor_base; low = qs_inf->low; @@ -399,9 +399,9 @@ int qsieve_next_A(qs_t qs_inf) slong span = qs_inf->span; slong h = qs_inf->h; slong m = qs_inf->m; - mp_limb_t ret = 1; - mp_limb_t * curr_subset = qs_inf->curr_subset; - mp_limb_t * A_ind = qs_inf->A_ind; + ulong ret = 1; + ulong * curr_subset = qs_inf->curr_subset; + ulong * A_ind = qs_inf->A_ind; prime_t * factor_base = qs_inf->factor_base; fmpz_t prod, temp; int found_j, inc_diff; @@ -538,17 +538,17 @@ void qsieve_init_poly_first(qs_t qs_inf) { slong i, k; slong s = qs_inf->s; - mp_limb_t * A_ind = qs_inf->A_ind; - mp_limb_t * A_inv = qs_inf->A_inv; - mp_limb_t * B0_terms = qs_inf->B0_terms; - mp_limb_t ** A_inv2B = qs_inf->A_inv2B; + ulong * A_ind = qs_inf->A_ind; + ulong * A_inv = qs_inf->A_inv; + ulong * B0_terms = qs_inf->B0_terms; + ulong ** A_inv2B = qs_inf->A_inv2B; fmpz_t * B_terms = qs_inf->B_terms; fmpz_t * A_divp = qs_inf->A_divp; prime_t * factor_base = qs_inf->factor_base; int * sqrts = qs_inf->sqrts; int * soln1 = qs_inf->soln1; int * soln2 = qs_inf->soln2; - mp_limb_t p, pinv, temp, temp2; + ulong p, pinv, temp, temp2; #if QS_DEBUG qs_inf->poly_count += 1; @@ -667,8 +667,8 @@ void qsieve_init_poly_next(qs_t qs_inf, slong i) prime_t * factor_base = qs_inf->factor_base; int * soln1 = qs_inf->soln1; int * soln2 = qs_inf->soln2; - mp_limb_t ** A_inv2B = qs_inf->A_inv2B; - mp_limb_t sign, p, r1, r2; + ulong ** A_inv2B = qs_inf->A_inv2B; + ulong sign, p, r1, r2; fmpz_t temp; fmpz_init(temp); diff --git a/src/qsieve/factor.c b/src/qsieve/factor.c index 04c756122d..b859ff5f11 100644 --- a/src/qsieve/factor.c +++ b/src/qsieve/factor.c @@ -54,7 +54,7 @@ int compare_facs(const void * a, const void * b) void qsieve_factor(fmpz_factor_t factors, const fmpz_t n) { qs_t qs_inf; - mp_limb_t small_factor, delta; + ulong small_factor, delta; ulong expt = 0; unsigned char * sieve; slong ncols, nrows, i, j = 0, count, num_primes; diff --git a/src/qsieve/knuth_schroeppel.c b/src/qsieve/knuth_schroeppel.c index 3c57fe498f..9e188ef21f 100644 --- a/src/qsieve/knuth_schroeppel.c +++ b/src/qsieve/knuth_schroeppel.c @@ -21,26 +21,26 @@ #endif /* Array of possible Knuth-Schroeppel multipliers */ -static const mp_limb_t multipliers[] = {1, 2, 3, 5, 6, 7, 10, 11, 13, 14, 15, +static const ulong multipliers[] = {1, 2, 3, 5, 6, 7, 10, 11, 13, 14, 15, 17, 19, 21, 22, 23, 26, 29, 30, 31, 33, 34, 35, 37, 38, 41, 42, 43, 47}; /* Number of possible Knuth-Schroeppel multipliers */ -#define KS_MULTIPLIERS (sizeof(multipliers)/sizeof(mp_limb_t)) +#define KS_MULTIPLIERS (sizeof(multipliers)/sizeof(ulong)) /* Try to compute a multiplier k such that there are a lot of small primes which are quadratic residues modulo kn. If a small weight of n is found during this process it is returned. */ -mp_limb_t qsieve_knuth_schroeppel(qs_t qs_inf) +ulong qsieve_knuth_schroeppel(qs_t qs_inf) { float weights[KS_MULTIPLIERS]; /* array of Knuth-Schroeppel weights */ float best_weight = -10.0f; /* best weight so far */ ulong i, num_primes, max; float logpdivp; - mp_limb_t nmod8, mod8, p, nmod, pinv, mult; + ulong nmod8, mod8, p, nmod, pinv, mult; int kron, jac; n_primes_t iter; diff --git a/src/qsieve/large_prime_variant.c b/src/qsieve/large_prime_variant.c index 15ebc84589..94fb4d7627 100644 --- a/src/qsieve/large_prime_variant.c +++ b/src/qsieve/large_prime_variant.c @@ -102,15 +102,15 @@ int qsieve_is_relation(qs_t qs_inf, relation_t a) The layout is as follows: total write size of relation (including this write size) - large prime (1 * mp_limb_t) + large prime (1 * ulong) number of small primes (1 * slong) small primes (number of small primes * slong) number of factors (1 * slong) (factor, exponent) (number of factors * fac_t) Y->_mp_size (1 * slong) - Y->_mp_d (Y->_mp_size * mp_limb_t) + Y->_mp_d (Y->_mp_size * ulong) */ -void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, const fmpz_t Y, const qs_poly_t poly) +void qsieve_write_to_file(qs_t qs_inf, ulong prime, const fmpz_t Y, const qs_poly_t poly) { slong num_factors = poly->num_factors; slong * small = poly->small; @@ -124,17 +124,17 @@ void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, const fmpz_t Y, const qs /* Write size of relation */ write_size = sizeof(slong) /* total write size */ - + sizeof(mp_limb_t) /* large prime */ + + sizeof(ulong) /* large prime */ + sizeof(slong) /* number of small primes */ + sizeof(slong) * qs_inf->small_primes /* small primes */ + sizeof(slong) /* number of factors */ + sizeof(fac_t) * num_factors /* factors */ + sizeof(slong) /* Y->_mp_size */ - + sizeof(mp_limb_t) * (Ysz != 0 ? FLINT_ABS(Ysz) : 1); /* Y->_mp_d */ + + sizeof(ulong) * (Ysz != 0 ? FLINT_ABS(Ysz) : 1); /* Y->_mp_d */ fwrite(&write_size, sizeof(slong), 1, (FILE *) qs_inf->siqs); /* Write large prime */ - fwrite(&prime, sizeof(mp_limb_t), 1, (FILE *) qs_inf->siqs); + fwrite(&prime, sizeof(ulong), 1, (FILE *) qs_inf->siqs); /* NOTE: We do not have to write small primes. */ /* Write number of small primes */ @@ -158,14 +158,14 @@ void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, const fmpz_t Y, const qs slong abslimb = FLINT_ABS(*Y); /* Write mock Y->_mp_d */ - fwrite(&abslimb, sizeof(mp_limb_t), 1, (FILE *) qs_inf->siqs); + fwrite(&abslimb, sizeof(ulong), 1, (FILE *) qs_inf->siqs); } else { - mp_srcptr Yd = COEFF_TO_PTR(*Y)->_mp_d; + nn_srcptr Yd = COEFF_TO_PTR(*Y)->_mp_d; /* Write Y->_mp_d */ - fwrite(Yd, sizeof(mp_limb_t), FLINT_ABS(Ysz), (FILE *) qs_inf->siqs); + fwrite(Yd, sizeof(ulong), FLINT_ABS(Ysz), (FILE *) qs_inf->siqs); } } @@ -185,11 +185,11 @@ void qsieve_write_to_file(qs_t qs_inf, mp_limb_t prime, const fmpz_t Y, const qs return a pointer to location of 'prime' in table if it exists else create an entry for it and return pointer to that */ -hash_t * qsieve_get_table_entry(qs_t qs_inf, mp_limb_t prime) +hash_t * qsieve_get_table_entry(qs_t qs_inf, ulong prime) { - mp_limb_t offset, first_offset; + ulong offset, first_offset; hash_t * entry; - mp_limb_t * hash_table = qs_inf->hash_table; + ulong * hash_table = qs_inf->hash_table; hash_t * table = qs_inf->table; slong table_size = qs_inf->table_size; @@ -233,7 +233,7 @@ hash_t * qsieve_get_table_entry(qs_t qs_inf, mp_limb_t prime) add prime to hashtable, increase size of table if necessary and increment count for the added prime */ -void qsieve_add_to_hashtable(qs_t qs_inf, mp_limb_t prime) +void qsieve_add_to_hashtable(qs_t qs_inf, ulong prime) { hash_t * entry; @@ -285,9 +285,9 @@ relation_t qsieve_parse_relation(qs_t qs_inf) fmpz_init(rel.Y); if (FLINT_ABS(Ysz) <= 1) { - mp_limb_t abslimb = 0; + ulong abslimb = 0; - fread(&abslimb, sizeof(mp_limb_t), 1, (FILE *) qs_inf->siqs); + fread(&abslimb, sizeof(ulong), 1, (FILE *) qs_inf->siqs); #if COEFF_MAX != -COEFF_MIN # error @@ -305,7 +305,7 @@ relation_t qsieve_parse_relation(qs_t qs_inf) if (mY->_mp_alloc < FLINT_ABS(Ysz)) _mpz_realloc(mY, FLINT_ABS(Ysz)); - fread(mY->_mp_d, sizeof(mp_limb_t), FLINT_ABS(Ysz), (FILE *) qs_inf->siqs); + fread(mY->_mp_d, sizeof(ulong), FLINT_ABS(Ysz), (FILE *) qs_inf->siqs); *rel.Y = PTR_TO_COEFF(mY); } @@ -545,9 +545,9 @@ int qsieve_process_relation(qs_t qs_inf) slong i, num_relations = 0, num_relations2; slong rel_list_length; slong rlist_length; - mp_limb_t prime; + ulong prime; hash_t * entry; - mp_limb_t * hash_table = qs_inf->hash_table; + ulong * hash_table = qs_inf->hash_table; slong rel_size = 50000; relation_t * rel_list = (relation_t *) flint_malloc(rel_size * sizeof(relation_t)); relation_t * rlist; @@ -573,7 +573,7 @@ int qsieve_process_relation(qs_t qs_inf) if (siqs_eof) break; - fread(&prime, sizeof(mp_limb_t), 1, (FILE *) qs_inf->siqs); + fread(&prime, sizeof(ulong), 1, (FILE *) qs_inf->siqs); entry = qsieve_get_table_entry(qs_inf, prime); if (num_relations == rel_size) @@ -591,8 +591,8 @@ int qsieve_process_relation(qs_t qs_inf) else { /* We have to get to the next relation in the file. We have already - * read write_size (is a slong) and large prime (is an mp_limb_t).*/ - fseek((FILE *) qs_inf->siqs, write_size - sizeof(slong) - sizeof(mp_limb_t), SEEK_CUR); + * read write_size (is a slong) and large prime (is an ulong).*/ + fseek((FILE *) qs_inf->siqs, write_size - sizeof(slong) - sizeof(ulong), SEEK_CUR); } } @@ -612,7 +612,7 @@ int qsieve_process_relation(qs_t qs_inf) #endif rlist = flint_malloc(num_relations * sizeof(relation_t)); - memset(hash_table, 0, (1 << 20) * sizeof(mp_limb_t)); + memset(hash_table, 0, (1 << 20) * sizeof(ulong)); qs_inf->vertices = 0; rlist_length = 0; diff --git a/src/qsieve/linalg.c b/src/qsieve/linalg.c index f0cd0aa18a..c7e0f297a1 100644 --- a/src/qsieve/linalg.c +++ b/src/qsieve/linalg.c @@ -57,7 +57,7 @@ void qsieve_linalg_init(qs_t qs_inf) qs_inf->num_cycles = 0; qs_inf->table_size = 10000; - qs_inf->hash_table = flint_calloc((1 << 20), sizeof(mp_limb_t)); + qs_inf->hash_table = flint_calloc((1 << 20), sizeof(ulong)); qs_inf->table = flint_malloc(qs_inf->table_size * sizeof(hash_t)); } @@ -111,7 +111,7 @@ void qsieve_linalg_realloc(qs_t qs_inf) qs_inf->components = 1; qs_inf->num_cycles = 0; - memset(qs_inf->hash_table, 0, (1 << 20)*sizeof(mp_limb_t)); + memset(qs_inf->hash_table, 0, (1 << 20)*sizeof(ulong)); } void qsieve_linalg_clear(qs_t qs_inf) diff --git a/src/qsieve/poly.c b/src/qsieve/poly.c index a8b171fb86..b4ac259288 100644 --- a/src/qsieve/poly.c +++ b/src/qsieve/poly.c @@ -13,11 +13,11 @@ #include "fmpz.h" #include "qsieve.h" -mp_limb_t qsieve_poly_init(qs_t qs_inf) +ulong qsieve_poly_init(qs_t qs_inf) { ulong num_primes = qs_inf->num_primes; ulong s = qs_inf->s; /* number of prime factors in A coeff */ - mp_limb_t ** A_inv2B; + ulong ** A_inv2B; slong i; fmpz_init(qs_inf->A); @@ -25,36 +25,36 @@ mp_limb_t qsieve_poly_init(qs_t qs_inf) fmpz_init(qs_inf->upp_bound); fmpz_init(qs_inf->low_bound); - qs_inf->curr_subset = flint_malloc(s * sizeof(mp_limb_t)); - qs_inf->first_subset = flint_malloc(s * sizeof(mp_limb_t)); - qs_inf->B_terms = flint_malloc(s * sizeof(mp_limb_t)); - qs_inf->A_ind = flint_malloc(s * sizeof(mp_limb_t)); - qs_inf->A_divp = flint_malloc(s * sizeof(mp_limb_t)); - qs_inf->B0_terms = flint_malloc(s * sizeof(mp_limb_t)); + qs_inf->curr_subset = flint_malloc(s * sizeof(ulong)); + qs_inf->first_subset = flint_malloc(s * sizeof(ulong)); + qs_inf->B_terms = flint_malloc(s * sizeof(ulong)); + qs_inf->A_ind = flint_malloc(s * sizeof(ulong)); + qs_inf->A_divp = flint_malloc(s * sizeof(ulong)); + qs_inf->B0_terms = flint_malloc(s * sizeof(ulong)); - qs_inf->A_inv2B = flint_malloc(s * sizeof(mp_limb_t *)); + qs_inf->A_inv2B = flint_malloc(s * sizeof(ulong *)); - qs_inf->A_inv = flint_malloc(num_primes * sizeof(mp_limb_t)); - qs_inf->soln1 = flint_malloc(num_primes * sizeof(mp_limb_t)); - qs_inf->soln2 = flint_malloc(num_primes * sizeof(mp_limb_t)); + qs_inf->A_inv = flint_malloc(num_primes * sizeof(ulong)); + qs_inf->soln1 = flint_malloc(num_primes * sizeof(ulong)); + qs_inf->soln2 = flint_malloc(num_primes * sizeof(ulong)); qs_inf->poly = flint_malloc((qs_inf->num_handles + 1)* sizeof(qs_poly_s)); for (i = 0; i <= qs_inf->num_handles ; i++) { fmpz_init(qs_inf->poly[i].B); - qs_inf->poly[i].posn1 = flint_malloc((num_primes + 16)*sizeof(mp_limb_t)); - qs_inf->poly[i].posn2 = flint_malloc((num_primes + 16)*sizeof(mp_limb_t)); - qs_inf->poly[i].soln1 = flint_malloc((num_primes + 16)*sizeof(mp_limb_t)); - qs_inf->poly[i].soln2 = flint_malloc((num_primes + 16)*sizeof(mp_limb_t)); - qs_inf->poly[i].small = flint_malloc(qs_inf->small_primes*sizeof(mp_limb_t)); + qs_inf->poly[i].posn1 = flint_malloc((num_primes + 16)*sizeof(ulong)); + qs_inf->poly[i].posn2 = flint_malloc((num_primes + 16)*sizeof(ulong)); + qs_inf->poly[i].soln1 = flint_malloc((num_primes + 16)*sizeof(ulong)); + qs_inf->poly[i].soln2 = flint_malloc((num_primes + 16)*sizeof(ulong)); + qs_inf->poly[i].small = flint_malloc(qs_inf->small_primes*sizeof(ulong)); qs_inf->poly[i].factor = flint_malloc(qs_inf->max_factors*sizeof(fac_t)); } A_inv2B = qs_inf->A_inv2B; for (i = 0; i < s; i++) - A_inv2B[i] = flint_malloc(num_primes * sizeof(mp_limb_t)); + A_inv2B[i] = flint_malloc(num_primes * sizeof(ulong)); for (i = 0; i < s; i++) { diff --git a/src/qsieve/primes_init.c b/src/qsieve/primes_init.c index fcd3fcbb2b..369b8130d7 100644 --- a/src/qsieve/primes_init.c +++ b/src/qsieve/primes_init.c @@ -15,11 +15,11 @@ #include "qsieve.h" prime_t * -compute_factor_base(mp_limb_t * small_factor, qs_t qs_inf, slong num_primes) +compute_factor_base(ulong * small_factor, qs_t qs_inf, slong num_primes) { - mp_limb_t p, nmod, nmod2; - mp_limb_t pinv; - mp_limb_t k = qs_inf->k; + ulong p, nmod, nmod2; + ulong pinv; + ulong k = qs_inf->k; slong num = qs_inf->num_primes; slong fb_prime = 2; prime_t * factor_base = NULL; @@ -93,12 +93,12 @@ compute_factor_base(mp_limb_t * small_factor, qs_t qs_inf, slong num_primes) return factor_base; } -mp_limb_t qsieve_primes_init(qs_t qs_inf) +ulong qsieve_primes_init(qs_t qs_inf) { slong num_primes; slong i; - mp_limb_t k = qs_inf->k; - mp_limb_t small_factor = 0; + ulong k = qs_inf->k; + ulong small_factor = 0; slong bits; prime_t * factor_base; @@ -163,10 +163,10 @@ mp_limb_t qsieve_primes_init(qs_t qs_inf) /* function to call for incrementing number of factor base prime by 'delta' */ -mp_limb_t qsieve_primes_increment(qs_t qs_inf, mp_limb_t delta) +ulong qsieve_primes_increment(qs_t qs_inf, ulong delta) { slong num_primes = qs_inf->num_primes + delta; - mp_limb_t small_factor = 0; + ulong small_factor = 0; compute_factor_base(&small_factor, qs_inf, num_primes + qs_inf->ks_primes); diff --git a/src/qsieve/test/main.c b/src/qsieve/test/main.c index b6333fd050..551516cf2c 100644 --- a/src/qsieve/test/main.c +++ b/src/qsieve/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-factor.c" diff --git a/src/qsieve/test/t-primes_init.c b/src/qsieve/test/t-primes_init.c index 58c4b5eb3f..3761302be6 100644 --- a/src/qsieve/test/t-primes_init.c +++ b/src/qsieve/test/t-primes_init.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(qsieve_primes_init, state) { int i; slong j, k; - mp_limb_t small_factor, pmod; + ulong small_factor, pmod; qs_t qs_inf; fmpz_t n, x, y; diff --git a/src/test/main.c b/src/test/main.c index 8ca0860156..b4e844489c 100644 --- a/src/test/main.c +++ b/src/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-add_ssaaaa.c" diff --git a/src/test/t-add_ssaaaa.c b/src/test/t-add_ssaaaa.c index 3298203c83..09816ff6b1 100644 --- a/src/test/t-add_ssaaaa.c +++ b/src/test/t-add_ssaaaa.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(add_ssaaaa, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t sh1, sl1, sh2, sl2, ah1, al1, ah2, al2; + ulong sh1, sl1, sh2, sl2, ah1, al1, ah2, al2; ah1 = n_randtest(state); al1 = n_randtest(state); diff --git a/src/test/t-add_sssaaaaaa.c b/src/test/t-add_sssaaaaaa.c index a634bb01df..e697d9529b 100644 --- a/src/test/t-add_sssaaaaaa.c +++ b/src/test/t-add_sssaaaaaa.c @@ -10,6 +10,7 @@ (at your option) any later version. See . */ +#include #include "ulong_extras.h" #include "test_helpers.h" @@ -19,7 +20,7 @@ TEST_FUNCTION_START(add_sssaaaaaa, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t s[3], t[3], a[3], b[3]; + ulong s[3], t[3], a[3], b[3]; for (j = 0; j < 3; j++) { diff --git a/src/test/t-add_ssssaaaaaaaa.c b/src/test/t-add_ssssaaaaaaaa.c index 01aae471af..bcda402d31 100644 --- a/src/test/t-add_ssssaaaaaaaa.c +++ b/src/test/t-add_ssssaaaaaaaa.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(add_ssssaaaaaaaa, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t s[4], t[4], a[4], b[4]; + ulong s[4], t[4], a[4], b[4]; int aliasing; for (j = 0; j < 4; j++) diff --git a/src/test/t-flint_clz.c b/src/test/t-flint_clz.c index fa621d746e..23af381085 100644 --- a/src/test/t-flint_clz.c +++ b/src/test/t-flint_clz.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(flint_clz, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t n; + ulong n; unsigned int count = 0; n = n_randtest(state); diff --git a/src/test/t-flint_ctz.c b/src/test/t-flint_ctz.c index c7f9904ae3..472165ee34 100644 --- a/src/test/t-flint_ctz.c +++ b/src/test/t-flint_ctz.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(flint_ctz, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t n; + ulong n; unsigned int count = 0; n = n_randtest(state); diff --git a/src/test/t-io.c b/src/test/t-io.c index 7e0bfcc9e3..0b8210c73c 100644 --- a/src/test/t-io.c +++ b/src/test/t-io.c @@ -468,7 +468,7 @@ TEST_FUNCTION_START(flint_fprintf, state) mpq_t xmpq1, xmpq2; slong xslong_vec[SLONG_VEC_LEN]; - mp_ptr xnmod_vec; + nn_ptr xnmod_vec; fmpz * xfmpz_vec; fmpq * xfmpq_vec; arb_ptr xarb_vec; @@ -595,7 +595,7 @@ TEST_FUNCTION_START(flint_fprintf, state) str1 = flint_calloc(STR_SIZE, sizeof(char)); str2 = flint_calloc(STR_SIZE, sizeof(char)); -#if defined(_LONG_LONG_LIMB) +#if FLINT_LONG_LONG # define ULONG_SLONG_STR \ "Here we print a ulong: %020llu\n" \ "Here we print a slong: %20lld\n" diff --git a/src/test/t-sdiv_qrnnd.c b/src/test/t-sdiv_qrnnd.c index 6de4770773..74d1676324 100644 --- a/src/test/t-sdiv_qrnnd.c +++ b/src/test/t-sdiv_qrnnd.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(sdiv_qrnnd, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { int nsgn; - mp_limb_signed_t d, nh, nl, q, r, ph, pl; + slong d, nh, nl, q, r, ph, pl; do { diff --git a/src/test/t-smul_ppmm.c b/src/test/t-smul_ppmm.c index 1764b07a67..802156da9f 100644 --- a/src/test/t-smul_ppmm.c +++ b/src/test/t-smul_ppmm.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(smul_ppmm, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t ph1, pl1, ph2, pl2, pl2old, n1, n2, m1, m2, bit; + ulong ph1, pl1, ph2, pl2, pl2old, n1, n2, m1, m2, bit; int j, sign; n1 = n_randtest(state); @@ -30,13 +30,13 @@ TEST_FUNCTION_START(smul_ppmm, state) m2 = n2; sign = 1; - if ((mp_limb_signed_t) m1 < WORD(0)) + if ((slong) m1 < WORD(0)) { sign = -1; m1 = -m1; } - if ((mp_limb_signed_t) m2 < WORD(0)) + if ((slong) m2 < WORD(0)) { sign = -sign; m2 = -m2; diff --git a/src/test/t-sub_dddmmmsss.c b/src/test/t-sub_dddmmmsss.c index badb6291a8..61bc3838e3 100644 --- a/src/test/t-sub_dddmmmsss.c +++ b/src/test/t-sub_dddmmmsss.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(sub_dddmmmsss, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t s[3], t[3], a[3], b[3]; + ulong s[3], t[3], a[3], b[3]; for (j = 0; j < 3; j++) { diff --git a/src/test/t-sub_ddmmss.c b/src/test/t-sub_ddmmss.c index 05515aa93b..80f4a199b9 100644 --- a/src/test/t-sub_ddmmss.c +++ b/src/test/t-sub_ddmmss.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(sub_ddmmss, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t dh1, dl1, dh2, dl2, mh, ml, sh, sl; + ulong dh1, dl1, dh2, dl2, mh, ml, sh, sl; mh = n_randtest(state); ml = n_randtest(state); diff --git a/src/test/t-udiv_qrnnd.c b/src/test/t-udiv_qrnnd.c index 59d39f405c..ad78bcfd71 100644 --- a/src/test/t-udiv_qrnnd.c +++ b/src/test/t-udiv_qrnnd.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(udiv_qrnnd, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t d, nh, nl, q, r, ph, pl; + ulong d, nh, nl, q, r, ph, pl; do { diff --git a/src/test/t-udiv_qrnnd_preinv.c b/src/test/t-udiv_qrnnd_preinv.c index 481a4e61c2..c4a05a7d1d 100644 --- a/src/test/t-udiv_qrnnd_preinv.c +++ b/src/test/t-udiv_qrnnd_preinv.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(udiv_qrnnd_preinv, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t d, dinv, nh, nl, q1, r1, q2, r2, norm; + ulong d, dinv, nh, nl, q1, r1, q2, r2, norm; do { diff --git a/src/test/t-umul_ppmm.c b/src/test/t-umul_ppmm.c index 91016907cd..5deafb90ca 100644 --- a/src/test/t-umul_ppmm.c +++ b/src/test/t-umul_ppmm.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(umul_ppmm, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t ph1, pl1, ph2, pl2, pl2old, m1, m2, bit; + ulong ph1, pl1, ph2, pl2, pl2old, m1, m2, bit; m1 = n_randtest(state); m2 = n_randtest(state); diff --git a/src/test_helpers.h b/src/test_helpers.h index 1ad598f461..d21a7a6632 100644 --- a/src/test_helpers.h +++ b/src/test_helpers.h @@ -13,8 +13,10 @@ #ifndef TEST_HELPERS_H #define TEST_HELPERS_H -#include -#include +#include /* clock */ +#include /* strlen, strcmp, strncmp */ +#include /* strtol */ +#include /* LONG_MIN */ #include "templates.h" #include "flint.h" diff --git a/src/thread_pool/test/main.c b/src/thread_pool/test/main.c index 8e4ebb7d16..1ac99e3ffd 100644 --- a/src/thread_pool/test/main.c +++ b/src/thread_pool/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-thread_pool.c" diff --git a/src/thread_support/test/main.c b/src/thread_support/test/main.c index 31a9511714..85ea65b6e0 100644 --- a/src/thread_support/test/main.c +++ b/src/thread_support/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-parallel_binary_splitting.c" diff --git a/src/thread_support/test/t-parallel_binary_splitting.c b/src/thread_support/test/t-parallel_binary_splitting.c index 0ca9b2c744..55f9c60b3e 100644 --- a/src/thread_support/test/t-parallel_binary_splitting.c +++ b/src/thread_support/test/t-parallel_binary_splitting.c @@ -21,7 +21,7 @@ product_res_t; typedef struct { - mp_srcptr factors; + nn_srcptr factors; int left_inplace; } product_args_t; @@ -61,7 +61,7 @@ product_basecase(product_res_t * res, slong a, slong b, product_args_t * args) } static void -bsplit_product(fmpz_t r, mp_srcptr factors, slong len, slong thread_limit, int flags) +bsplit_product(fmpz_t r, nn_srcptr factors, slong len, slong thread_limit, int flags) { product_res_t res; product_args_t args; @@ -89,7 +89,7 @@ TEST_FUNCTION_START(thread_support_parallel_binary_splitting, state) for (iter = 0; iter < 100 * flint_test_multiplier(); iter++) { fmpz_t r, s; - mp_ptr factors; + nn_ptr factors; slong i, n; int flags; @@ -97,7 +97,7 @@ TEST_FUNCTION_START(thread_support_parallel_binary_splitting, state) flint_set_num_threads(n_randint(state, 10) + 1); - factors = flint_malloc(n * sizeof(mp_limb_t)); + factors = flint_malloc(n * sizeof(ulong)); fmpz_init(r); fmpz_init(s); diff --git a/src/ulong_extras.h b/src/ulong_extras.h index 24bb3a9854..b3d392e443 100644 --- a/src/ulong_extras.h +++ b/src/ulong_extras.h @@ -44,7 +44,7 @@ ulong n_randtest_prime(flint_rand_t state, int proved); ulong n_revbin(ulong in, ulong bits); -int n_divides(mp_limb_t * q, mp_limb_t n, mp_limb_t p); +int n_divides(ulong * q, ulong n, ulong p); ulong n_divrem2_precomp(ulong * q, ulong a, ulong n, double npre); ulong n_divrem2_preinv(ulong * q, ulong a, ulong n, ulong ninv); ulong n_div2_preinv(ulong a, ulong n, ulong ninv); @@ -77,6 +77,15 @@ ulong n_flog(ulong n, ulong b); ulong n_clog(ulong n, ulong b); ulong n_clog_2exp(ulong n, ulong b); +#ifdef _MSC_VER +# define DECLSPEC_IMPORT __declspec(dllimport) +#else +# define DECLSPEC_IMPORT +#endif +DECLSPEC_IMPORT ulong __gmpn_gcd_11(ulong, ulong); +DECLSPEC_IMPORT ulong __gmpn_gcd_1(nn_srcptr, long int, ulong); +#undef DECLSPEC_IMPORT + ULONG_EXTRAS_INLINE ulong n_gcd(ulong x, ulong y) { @@ -88,11 +97,11 @@ ulong n_gcd(ulong x, ulong y) my = flint_ctz(y); x >>= mx; y >>= my; - res = (x != 1 && y != 1) ? mpn_gcd_11(x, y) : 1; + res = (x != 1 && y != 1) ? __gmpn_gcd_11(x, y) : 1; res <<= FLINT_MIN(mx, my); return res; #else - return mpn_gcd_1(&x, 1, y); + return __gmpn_gcd_1(&x, 1, y); #endif } else @@ -147,12 +156,12 @@ ulong n_lll_mod_preinv(ulong a_hi, ulong a_mi, ulong a_lo, ulong n, ulong ninv); ulong n_mulmod_precomp(ulong a, ulong b, ulong n, double ninv); ulong n_mulmod_preinv(ulong a, ulong b, ulong n, ulong ninv, ulong norm); -mp_limb_t n_mulmod_precomp_shoup(mp_limb_t w, mp_limb_t p); +ulong n_mulmod_precomp_shoup(ulong w, ulong p); ULONG_EXTRAS_INLINE -mp_limb_t n_mulmod_shoup(mp_limb_t w, mp_limb_t t, mp_limb_t w_precomp, mp_limb_t p) +ulong n_mulmod_shoup(ulong w, ulong t, ulong w_precomp, ulong p) { - mp_limb_t q, r, p_hi, p_lo; + ulong q, r, p_hi, p_lo; umul_ppmm(p_hi, p_lo, w_precomp, t); q = p_hi; diff --git a/src/ulong_extras/cbrt.c b/src/ulong_extras/cbrt.c index 7fe4f0c558..42b9aaa203 100644 --- a/src/ulong_extras/cbrt.c +++ b/src/ulong_extras/cbrt.c @@ -13,12 +13,12 @@ #include "ulong_extras.h" -mp_limb_t -n_cbrt(mp_limb_t n) +ulong +n_cbrt(ulong n) { int bits; double val, x, xcub, num, den; - mp_limb_t ret, upper_limit; + ulong ret, upper_limit; /* Taking care of smaller roots */ @@ -108,11 +108,11 @@ static const float coeff[16][3] = {{0.445434042, 0.864136635, -0.335205926}, {0.540672371, 0.586548233, -0.127254189}, /* [0.90625, 0.93750] */ {0.546715310, 0.573654340, -0.120376066}, /* [0.93750, 0.96875] */ {0.552627494, 0.561446514, -0.114074068}}; /* [0.96875, 1.00000] */ -mp_limb_t -n_cbrt_chebyshev_approx(mp_limb_t n) +ulong +n_cbrt_chebyshev_approx(ulong n) { typedef union { - mp_limb_t uword_val; + ulong uword_val; #ifdef FLINT64 double double_val; #else @@ -122,27 +122,27 @@ n_cbrt_chebyshev_approx(mp_limb_t n) int rem, mul; double factor, root, dec, dec2; - mp_limb_t ret, expo, table_index; + ulong ret, expo, table_index; uni alias; /* upper_limit is the max cube root possible for one word */ #ifdef FLINT64 - const mp_limb_t upper_limit = 2642245; /* 2642245 < (2^64)^(1/3) */ - const mp_limb_t expo_mask = 0x7FF0000000000000; /* exponent bits in double */ - const mp_limb_t mantissa_mask = 0x000FFFFFFFFFFFFF; /* mantissa bits in float */ - const mp_limb_t table_mask = 0x000F000000000000; /* first 4 bits of mantissa */ + const ulong upper_limit = 2642245; /* 2642245 < (2^64)^(1/3) */ + const ulong expo_mask = 0x7FF0000000000000; /* exponent bits in double */ + const ulong mantissa_mask = 0x000FFFFFFFFFFFFF; /* mantissa bits in float */ + const ulong table_mask = 0x000F000000000000; /* first 4 bits of mantissa */ const int mantissa_bits = 52; - const mp_limb_t bias_hex = 0x3FE0000000000000; + const ulong bias_hex = 0x3FE0000000000000; const int bias = 1022; alias.double_val = (double)n; #else - const mp_limb_t upper_limit = 1625; /* 1625 < (2^32)^(1/3) */ - const mp_limb_t expo_mask = 0x7F800000; /* exponent bits in float */ - const mp_limb_t mantissa_mask = 0x007FFFFF; /* mantissa bits in float */ - const mp_limb_t table_mask = 0x00780000; /* first 4 bits of mantissa */ + const ulong upper_limit = 1625; /* 1625 < (2^32)^(1/3) */ + const ulong expo_mask = 0x7F800000; /* exponent bits in float */ + const ulong mantissa_mask = 0x007FFFFF; /* mantissa bits in float */ + const ulong table_mask = 0x00780000; /* first 4 bits of mantissa */ const int mantissa_bits = 23; - const mp_limb_t bias_hex = 0x3F000000; + const ulong bias_hex = 0x3F000000; const int bias = 126; alias.double_val = (float)n; #endif @@ -200,10 +200,10 @@ n_cbrt_chebyshev_approx(mp_limb_t n) return ret; } -mp_limb_t -n_cbrt_binary_search(mp_limb_t x) +ulong +n_cbrt_binary_search(ulong x) { - mp_limb_t low, high, mid, p, upper_limit; + ulong low, high, mid, p, upper_limit; /* upper_limit is the max cube root possible for one word */ @@ -252,10 +252,10 @@ n_cbrt_estimate(double a) ulong n, hi, lo; #ifdef FLINT64 - const mp_limb_t mul_factor = UWORD(6148914691236517205); + const ulong mul_factor = UWORD(6148914691236517205); slong s = UWORD(4607182418800017408); /* ((1 << 10) - 1) << 52 */ #else - const mp_limb_t mul_factor = UWORD(1431655765); + const ulong mul_factor = UWORD(1431655765); slong s = UWORD(1065353216); /* ((1 << 7) - 1 << 23) */ #endif @@ -269,10 +269,10 @@ n_cbrt_estimate(double a) return alias.double_val; } -mp_limb_t -n_cbrtrem(mp_limb_t* remainder, mp_limb_t n) +ulong +n_cbrtrem(ulong* remainder, ulong n) { - mp_limb_t base; + ulong base; if (!n) { diff --git a/src/ulong_extras/clog.c b/src/ulong_extras/clog.c index 17a7aa6fe6..ddcd5fb5dd 100644 --- a/src/ulong_extras/clog.c +++ b/src/ulong_extras/clog.c @@ -15,9 +15,9 @@ #include "gmpcompat.h" #include "ulong_extras.h" -mp_limb_t n_clog(mp_limb_t n, mp_limb_t b) +ulong n_clog(ulong n, ulong b) { - mp_limb_t r, p, t, phi; + ulong r, p, t, phi; r = 0; p = 1; diff --git a/src/ulong_extras/compute_primes.c b/src/ulong_extras/compute_primes.c index e9c073a118..cfdd0ed873 100644 --- a/src/ulong_extras/compute_primes.c +++ b/src/ulong_extras/compute_primes.c @@ -36,7 +36,7 @@ const unsigned int flint_primes_small[] = /* _flint_primes[i] holds an array of 2^i primes */ -FLINT_TLS_PREFIX mp_limb_t * _flint_primes[FLINT_BITS]; +FLINT_TLS_PREFIX ulong * _flint_primes[FLINT_BITS]; FLINT_TLS_PREFIX double * _flint_prime_inverses[FLINT_BITS]; FLINT_TLS_PREFIX slong _flint_primes_used = 0; @@ -68,7 +68,7 @@ n_compute_primes(ulong num_primes) n_primes_t iter; num_computed = UWORD(1) << m; - _flint_primes[m] = flint_malloc(sizeof(mp_limb_t) * num_computed); + _flint_primes[m] = flint_malloc(sizeof(ulong) * num_computed); _flint_prime_inverses[m] = flint_malloc(sizeof(double) * num_computed); n_primes_init(iter); diff --git a/src/ulong_extras/divides.c b/src/ulong_extras/divides.c index 624a2308f9..8d770f952c 100644 --- a/src/ulong_extras/divides.c +++ b/src/ulong_extras/divides.c @@ -12,9 +12,9 @@ #include "flint.h" #include "ulong_extras.h" -int n_divides(mp_limb_t * q, mp_limb_t n, mp_limb_t p) +int n_divides(ulong * q, ulong n, ulong p) { - mp_limb_t quo, rem; + ulong quo, rem; if (p == 0) { diff --git a/src/ulong_extras/divrem2_precomp.c b/src/ulong_extras/divrem2_precomp.c index b6f9297910..a2d3444062 100644 --- a/src/ulong_extras/divrem2_precomp.c +++ b/src/ulong_extras/divrem2_precomp.c @@ -12,10 +12,10 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t -n_divrem2_precomp(mp_limb_t * q, mp_limb_t a, mp_limb_t n, double npre) +ulong +n_divrem2_precomp(ulong * q, ulong a, ulong n, double npre) { - mp_limb_t quot; + ulong quot; slong rem; if (a < n) @@ -24,7 +24,7 @@ n_divrem2_precomp(mp_limb_t * q, mp_limb_t a, mp_limb_t n, double npre) return a; } - if ((mp_limb_signed_t) n < WORD(0)) + if ((slong) n < WORD(0)) { (*q) = UWORD(1); return a - n; @@ -40,10 +40,10 @@ n_divrem2_precomp(mp_limb_t * q, mp_limb_t a, mp_limb_t n, double npre) rem = a - quot * n; } - if (rem < (mp_limb_signed_t) (-n)) - quot -= (mp_limb_t) ((double) (-rem) * npre); + if (rem < (slong) (-n)) + quot -= (ulong) ((double) (-rem) * npre); else if (rem >= (slong) n) - quot += (mp_limb_t) ((double) rem * npre); + quot += (ulong) ((double) rem * npre); else if (rem < WORD(0)) { (*q) = quot - 1; diff --git a/src/ulong_extras/euler_phi.c b/src/ulong_extras/euler_phi.c index 465a7c96b5..bce0e9896e 100644 --- a/src/ulong_extras/euler_phi.c +++ b/src/ulong_extras/euler_phi.c @@ -12,10 +12,10 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_euler_phi(mp_limb_t n) +ulong n_euler_phi(ulong n) { int i; - mp_limb_t phi; + ulong phi; n_factor_t fac; if (n < 2) diff --git a/src/ulong_extras/factor.c b/src/ulong_extras/factor.c index 17c97ef883..95b8767516 100644 --- a/src/ulong_extras/factor.c +++ b/src/ulong_extras/factor.c @@ -12,18 +12,18 @@ #include "flint.h" #include "ulong_extras.h" -static int is_prime(mp_limb_t n, int proved) +static int is_prime(ulong n, int proved) { return proved ? n_is_prime(n) : n_is_probabprime(n); } -void n_factor(n_factor_t * factors, mp_limb_t n, int proved) +void n_factor(n_factor_t * factors, ulong n, int proved) { ulong factor_arr[FLINT_MAX_FACTORS_IN_LIMB]; ulong exp_arr[FLINT_MAX_FACTORS_IN_LIMB]; ulong factors_left; ulong exp; - mp_limb_t cofactor, factor, cutoff; + ulong cofactor, factor, cutoff; factors->num = 0; diff --git a/src/ulong_extras/factor_SQUFOF.c b/src/ulong_extras/factor_SQUFOF.c index 5aa275238e..69cf67718a 100644 --- a/src/ulong_extras/factor_SQUFOF.c +++ b/src/ulong_extras/factor_SQUFOF.c @@ -9,21 +9,21 @@ (at your option) any later version. See . */ -#include "flint.h" +#include #include "ulong_extras.h" -mp_limb_t _ll_factor_SQUFOF(mp_limb_t n_hi, mp_limb_t n_lo, ulong max_iters) +ulong _ll_factor_SQUFOF(ulong n_hi, ulong n_lo, ulong max_iters) { - mp_limb_t n[2]; - mp_limb_t sqrt[2]; - mp_limb_t rem[2]; + ulong n[2]; + ulong sqrt[2]; + ulong rem[2]; slong num, sqroot; ulong p, q; - mp_limb_t l, l2, iq, pnext; - mp_limb_t qarr[50]; - mp_limb_t qupto, qlast, t, r = 0; + ulong l, l2, iq, pnext; + ulong qarr[50]; + ulong qupto, qlast, t, r = 0; ulong i, j; n[0] = n_lo; @@ -117,16 +117,16 @@ cont: ; return q; } -mp_limb_t n_factor_SQUFOF(mp_limb_t n, ulong iters) +ulong n_factor_SQUFOF(ulong n, ulong iters) { - mp_limb_t factor = _ll_factor_SQUFOF(UWORD(0), n, iters); - mp_limb_t multiplier; - mp_limb_t quot, rem; + ulong factor = _ll_factor_SQUFOF(UWORD(0), n, iters); + ulong multiplier; + ulong quot, rem; ulong i; for (i = 1; (i < FLINT_NUM_PRIMES_SMALL) && !factor; i++) { - mp_limb_t multn[2]; + ulong multn[2]; multiplier = flint_primes_small[i]; umul_ppmm(multn[1], multn[0], multiplier, n); factor = _ll_factor_SQUFOF(multn[1], multn[0], iters); diff --git a/src/ulong_extras/factor_ecm.c b/src/ulong_extras/factor_ecm.c index fdc4d7075f..f805c9b37b 100644 --- a/src/ulong_extras/factor_ecm.c +++ b/src/ulong_extras/factor_ecm.c @@ -9,43 +9,38 @@ (at your option) any later version. See . */ +#include #include "ulong_extras.h" #include "nmod_vec.h" static ulong n_ecm_primorial[] = { -#ifdef FLINT64 - - UWORD(2), UWORD(6), UWORD(30), UWORD(210), UWORD(2310), UWORD(30030), - UWORD(510510), UWORD(9699690), UWORD(223092870), UWORD(6469693230), - UWORD(200560490130), UWORD(7420738134810), UWORD(304250263527210), - UWORD(13082761331670030), UWORD(614889782588491410) - -#else - UWORD(2), UWORD(6), UWORD(30), UWORD(210), UWORD(2310), UWORD(30030), - UWORD(510510), UWORD(9699690) - + UWORD(510510), UWORD(9699690), +#if FLINT64 + UWORD(223092870), UWORD(6469693230), UWORD(200560490130), + UWORD(7420738134810), UWORD(304250263527210), UWORD(13082761331670030), + UWORD(614889782588491410) #endif }; -#ifdef FLINT64 +#if FLINT64 #define num_n_ecm_primorials 15 #else #define num_n_ecm_primorials 9 #endif int -n_factor_ecm(mp_limb_t *f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, - flint_rand_t state, mp_limb_t n) +n_factor_ecm(ulong *f, ulong curves, ulong B1, ulong B2, + flint_rand_t state, ulong n) { - mp_limb_t P, num, maxD, mmin, mmax, mdiff, prod, maxj, sig; + ulong P, num, maxD, mmin, mmax, mdiff, prod, maxj, sig; ulong i, j; int ret; n_ecm_t n_ecm_inf; - const mp_limb_t *prime_array; + const ulong *prime_array; n_ecm_inf->normbits = flint_clz(n); n <<= n_ecm_inf->normbits; @@ -185,11 +180,11 @@ n_factor_ecm(mp_limb_t *f, mp_limb_t curves, mp_limb_t B1, mp_limb_t B2, */ void -n_factor_ecm_add(mp_limb_t *x, mp_limb_t *z, mp_limb_t x1, mp_limb_t z1, - mp_limb_t x2, mp_limb_t z2, mp_limb_t x0, mp_limb_t z0, - mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_add(ulong *x, ulong *z, ulong x1, ulong z1, + ulong x2, ulong z2, ulong x0, ulong z0, + ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t u, v, w; + ulong u, v, w; if (z1 == 0) { @@ -251,10 +246,10 @@ n_factor_ecm_add(mp_limb_t *x, mp_limb_t *z, mp_limb_t x1, mp_limb_t z1, /* a24 = (a + 2) / 4 mod n */ void -n_factor_ecm_double(mp_limb_t *x, mp_limb_t *z, mp_limb_t x0, mp_limb_t z0, - mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_double(ulong *x, ulong *z, ulong x0, ulong z0, + ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t u, v, w; + ulong u, v, w; if (z0 == 0) { @@ -277,10 +272,10 @@ n_factor_ecm_double(mp_limb_t *x, mp_limb_t *z, mp_limb_t x0, mp_limb_t z0, /* P (x0 : z0) <- kP using Montgomery ladder algorithm */ void -n_factor_ecm_mul_montgomery_ladder(mp_limb_t *x, mp_limb_t *z, mp_limb_t x0, mp_limb_t z0, - mp_limb_t k, mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_mul_montgomery_ladder(ulong *x, ulong *z, ulong x0, ulong z0, + ulong k, ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t x1, z1, x2, z2, len; /* Q (x1 : z1), P (x2 : z2) */ + ulong x1, z1, x2, z2, len; /* Q (x1 : z1), P (x2 : z2) */ if (k == 0) { @@ -334,15 +329,15 @@ n_factor_ecm_mul_montgomery_ladder(mp_limb_t *x, mp_limb_t *z, mp_limb_t x0, mp_ } int -n_factor_ecm_select_curve(mp_limb_t *f, mp_limb_t sig, mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_select_curve(ulong *f, ulong sig, ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t u, v, w, t, hi, lo; - mp_ptr a; + ulong u, v, w, t, hi, lo; + nn_ptr a; int ret = 0; TMP_INIT; TMP_START; - a = TMP_ALLOC(2 * sizeof(mp_limb_t)); + a = TMP_ALLOC(2 * sizeof(ulong)); u = sig; @@ -419,10 +414,10 @@ n_factor_ecm_select_curve(mp_limb_t *f, mp_limb_t sig, mp_limb_t n, n_ecm_t n_ec } int -n_factor_ecm_stage_I(mp_limb_t *f, const mp_limb_t *prime_array, mp_limb_t num, - mp_limb_t B1, mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_stage_I(ulong *f, const ulong *prime_array, ulong num, + ulong B1, ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t times; + ulong times; ulong i, j, p; for (i = 0; i < num; i++) @@ -450,15 +445,15 @@ n_factor_ecm_stage_I(mp_limb_t *f, const mp_limb_t *prime_array, mp_limb_t num, } int -n_factor_ecm_stage_II(mp_limb_t *f, mp_limb_t B1, mp_limb_t B2, mp_limb_t P, - mp_limb_t n, n_ecm_t n_ecm_inf) +n_factor_ecm_stage_II(ulong *f, ulong B1, ulong B2, ulong P, + ulong n, n_ecm_t n_ecm_inf) { - mp_limb_t g, Qx, Qz, Rx, Rz, Qdx, Qdz, a, b; - mp_limb_t mmin, mmax, maxj, Q0x2, Q0z2; + ulong g, Qx, Qz, Rx, Rz, Qdx, Qdz, a, b; + ulong mmin, mmax, maxj, Q0x2, Q0z2; ulong i, j; int ret; - mp_ptr arrx, arrz; + nn_ptr arrx, arrz; mmin = (B1 + (P/2)) / P; mmax = ((B2 - P/2) + P - 1)/P; /* ceil */ diff --git a/src/ulong_extras/factor_insert.c b/src/ulong_extras/factor_insert.c index 7d69addabc..d26253f219 100644 --- a/src/ulong_extras/factor_insert.c +++ b/src/ulong_extras/factor_insert.c @@ -11,7 +11,7 @@ #include "ulong_extras.h" -void n_factor_insert(n_factor_t * factors, mp_limb_t p, ulong exp) +void n_factor_insert(n_factor_t * factors, ulong p, ulong exp) { slong i; diff --git a/src/ulong_extras/factor_lehman.c b/src/ulong_extras/factor_lehman.c index 464f6bb5a6..bf5646e2ed 100644 --- a/src/ulong_extras/factor_lehman.c +++ b/src/ulong_extras/factor_lehman.c @@ -13,10 +13,10 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_factor_lehman(mp_limb_t n) +ulong n_factor_lehman(ulong n) { double limit; - mp_limb_t cuberoot, k; + ulong cuberoot, k; n_factor_t factors; slong bound; @@ -27,7 +27,7 @@ mp_limb_t n_factor_lehman(mp_limb_t n) if ((n & 1) == 0) return 2; limit = pow(n, 1.0/3.0); - cuberoot = (mp_limb_t) ceil(limit); + cuberoot = (ulong) ceil(limit); bound = n_prime_pi(cuberoot); n_factor_init(&factors); @@ -41,13 +41,13 @@ mp_limb_t n_factor_lehman(mp_limb_t n) for (k = 1; k <= cuberoot + 1; k++) { double low = 2.0*sqrt((double) k)*sqrt((double) n); - mp_limb_t x = (mp_limb_t) ceil(low - 0.0001); - mp_limb_t end = (mp_limb_t) floor(0.0001 + low + pow(n, 1.0/6.0)/((double) 4.0*sqrt((double) k))); - mp_limb_t sub = k*n*4; + ulong x = (ulong) ceil(low - 0.0001); + ulong end = (ulong) floor(0.0001 + low + pow(n, 1.0/6.0)/((double) 4.0*sqrt((double) k))); + ulong sub = k*n*4; for ( ; x <= end; x++) { - mp_limb_t p, sq = x*x - sub; + ulong p, sq = x*x - sub; if (n_is_square(sq)) { sq = sqrt((double) sq); diff --git a/src/ulong_extras/factor_one_line.c b/src/ulong_extras/factor_one_line.c index 2fb01bf746..79c0f91786 100644 --- a/src/ulong_extras/factor_one_line.c +++ b/src/ulong_extras/factor_one_line.c @@ -15,9 +15,9 @@ #define FLINT_ONE_LINE_MULTIPLIER 480 -mp_limb_t n_factor_one_line(mp_limb_t n, ulong iters) +ulong n_factor_one_line(ulong n, ulong iters) { - mp_limb_t orig_n = n, in, square, sqrti, mod, factor, factoring = iters, iin; + ulong orig_n = n, in, square, sqrti, mod, factor, factoring = iters, iin; n *= FLINT_ONE_LINE_MULTIPLIER; iin = 0; diff --git a/src/ulong_extras/factor_partial.c b/src/ulong_extras/factor_partial.c index 8313378025..39a9a9c906 100644 --- a/src/ulong_extras/factor_partial.c +++ b/src/ulong_extras/factor_partial.c @@ -12,19 +12,19 @@ #include "flint.h" #include "ulong_extras.h" -int is_prime2(mp_limb_t n, int proved) +int is_prime2(ulong n, int proved) { if (proved) return n_is_prime(n); else return n_is_probabprime(n); } -mp_limb_t n_factor_partial(n_factor_t * factors, mp_limb_t n, mp_limb_t limit, int proved) +ulong n_factor_partial(n_factor_t * factors, ulong n, ulong limit, int proved) { ulong factor_arr[FLINT_MAX_FACTORS_IN_LIMB]; ulong exp_arr[FLINT_MAX_FACTORS_IN_LIMB]; ulong factors_left; ulong exp; - mp_limb_t cofactor, factor, cutoff, prod; + ulong cofactor, factor, cutoff, prod; cofactor = n_factor_trial_partial(factors, n, &prod, FLINT_FACTOR_TRIAL_PRIMES, limit); if (prod > limit) return cofactor; diff --git a/src/ulong_extras/factor_pollard_brent.c b/src/ulong_extras/factor_pollard_brent.c index 642b9e1439..4ee44bae8d 100644 --- a/src/ulong_extras/factor_pollard_brent.c +++ b/src/ulong_extras/factor_pollard_brent.c @@ -12,11 +12,11 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t -n_sqr_and_add_a(mp_limb_t y, mp_limb_t a, mp_limb_t n, mp_limb_t ninv, - mp_limb_t normbits) +ulong +n_sqr_and_add_a(ulong y, ulong a, ulong n, ulong ninv, + ulong normbits) { - mp_limb_t hi, lo; + ulong hi, lo; y = n_mulmod_preinv(y, y, n, ninv, normbits); add_ssaaaa(hi, lo, UWORD(0), y, UWORD(0), a); @@ -34,11 +34,11 @@ n_sqr_and_add_a(mp_limb_t y, mp_limb_t a, mp_limb_t n, mp_limb_t ninv, } int -n_factor_pollard_brent_single(mp_limb_t *factor, mp_limb_t n, mp_limb_t ninv, - mp_limb_t ai, mp_limb_t xi, mp_limb_t normbits, - mp_limb_t max_iters) +n_factor_pollard_brent_single(ulong *factor, ulong n, ulong ninv, + ulong ai, ulong xi, ulong normbits, + ulong max_iters) { - mp_limb_t iter, i, k, j, minval, m, one_shift_norm, x, y, a, q, ys, subval; + ulong iter, i, k, j, minval, m, one_shift_norm, x, y, a, q, ys, subval; int ret; if (n < 4) @@ -121,10 +121,10 @@ n_factor_pollard_brent_single(mp_limb_t *factor, mp_limb_t n, mp_limb_t ninv, } int -n_factor_pollard_brent(mp_limb_t *factor, flint_rand_t state, mp_limb_t n_in, - mp_limb_t max_tries, mp_limb_t max_iters) +n_factor_pollard_brent(ulong *factor, flint_rand_t state, ulong n_in, + ulong max_tries, ulong max_iters) { - mp_limb_t normbits, a, x, n, ninv, max; + ulong normbits, a, x, n, ninv, max; int ret; ret = 0; diff --git a/src/ulong_extras/factor_power235.c b/src/ulong_extras/factor_power235.c index 9b921e0c0a..9f1aa22034 100644 --- a/src/ulong_extras/factor_power235.c +++ b/src/ulong_extras/factor_power235.c @@ -14,7 +14,7 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_factor_power235(ulong * exp, mp_limb_t n) +ulong n_factor_power235(ulong * exp, ulong n) { static char mod63[63] = {7,7,4,0,5,4,0,5,6,5,4,4,0,4,4,0,5,4,5,4,4,0, 5,4,0,5,4,6,7,4,0,4,4,0,4,6,7,5,4,0,4,4,0,5, @@ -42,7 +42,7 @@ mp_limb_t n_factor_power235(ulong * exp, mp_limb_t n) if (t & 1) { double x = sqrt((double) n); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 2)) { *exp = 2; @@ -53,7 +53,7 @@ mp_limb_t n_factor_power235(ulong * exp, mp_limb_t n) if (t & 2) { double x = pow((double) n, 1.0 / 3.0); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 3)) { *exp = 3; @@ -64,7 +64,7 @@ mp_limb_t n_factor_power235(ulong * exp, mp_limb_t n) if (t & 4) { double x = pow((double) n, 1.0 / 5.0); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 5)) { *exp = 5; diff --git a/src/ulong_extras/factor_pp1.c b/src/ulong_extras/factor_pp1.c index 8e2220480d..776cfda5cc 100644 --- a/src/ulong_extras/factor_pp1.c +++ b/src/ulong_extras/factor_pp1.c @@ -36,7 +36,7 @@ slong n_factor_pp1_table[][2] = { #if 0 /* For debugging */ -void n_pp1_print(mp_limb_t x, mp_limb_t y, ulong norm) +void n_pp1_print(ulong x, ulong y, ulong norm) { if (norm) { @@ -50,7 +50,7 @@ void n_pp1_print(mp_limb_t x, mp_limb_t y, ulong norm) #define n_pp1_2k(x, y, n, ninv, x0, norm) \ do { \ - const mp_limb_t two = (UWORD(2) << norm); \ + const ulong two = (UWORD(2) << norm); \ y = n_mulmod_preinv(y, x, n, ninv, norm); \ y = n_submod(y, x0, n); \ x = n_mulmod_preinv(x, x, n, ninv, norm); \ @@ -59,18 +59,18 @@ void n_pp1_print(mp_limb_t x, mp_limb_t y, ulong norm) #define n_pp1_2kp1(x, y, n, ninv, x0, norm) \ do { \ - const mp_limb_t two = (UWORD(2) << norm); \ + const ulong two = (UWORD(2) << norm); \ x = n_mulmod_preinv(x, y, n, ninv, norm); \ x = n_submod(x, x0, n); \ y = n_mulmod_preinv(y, y, n, ninv, norm); \ y = n_submod(y, two, n); \ } while (0) -void n_pp1_pow_ui(mp_limb_t * x, mp_limb_t * y, ulong exp, - mp_limb_t n, mp_limb_t ninv, ulong norm) +void n_pp1_pow_ui(ulong * x, ulong * y, ulong exp, + ulong n, ulong ninv, ulong norm) { - const mp_limb_t x0 = *x; - const mp_limb_t two = (UWORD(2) << norm); + const ulong x0 = *x; + const ulong two = (UWORD(2) << norm); ulong bit = ((UWORD(1) << FLINT_BIT_COUNT(exp)) >> 2); (*y) = n_mulmod_preinv(*x, *x, n, ninv, norm); @@ -87,7 +87,7 @@ void n_pp1_pow_ui(mp_limb_t * x, mp_limb_t * y, ulong exp, } } -mp_limb_t n_pp1_factor(mp_limb_t n, mp_limb_t x, ulong norm) +ulong n_pp1_factor(ulong n, ulong x, ulong norm) { if (norm) { @@ -102,10 +102,10 @@ mp_limb_t n_pp1_factor(mp_limb_t n, mp_limb_t x, ulong norm) return n_gcd(n, x); } -mp_limb_t n_pp1_find_power(mp_limb_t * x, mp_limb_t * y, - ulong p, mp_limb_t n, mp_limb_t ninv, ulong norm) +ulong n_pp1_find_power(ulong * x, ulong * y, + ulong p, ulong n, ulong ninv, ulong norm) { - mp_limb_t factor; + ulong factor; do { @@ -116,11 +116,11 @@ mp_limb_t n_pp1_find_power(mp_limb_t * x, mp_limb_t * y, return factor; } -mp_limb_t n_factor_pp1(mp_limb_t n, ulong B1, ulong c) +ulong n_factor_pp1(ulong n, ulong B1, ulong c) { slong i, j; - mp_limb_t factor = 0; - mp_limb_t x, y = 0, oldx, oldy, ninv; + ulong factor = 0; + ulong x, y = 0, oldx, oldy, ninv; ulong pr, oldpr, sqrt, bits0, norm; n_primes_t iter; @@ -207,7 +207,7 @@ mp_limb_t n_factor_pp1(mp_limb_t n, ulong B1, ulong c) return factor; } -mp_limb_t n_factor_pp1_wrapper(mp_limb_t n) +ulong n_factor_pp1_wrapper(ulong n) { slong bits = FLINT_BIT_COUNT(n); ulong B1; diff --git a/src/ulong_extras/factor_trial.c b/src/ulong_extras/factor_trial.c index 6cc710b7c5..aee16d2ddb 100644 --- a/src/ulong_extras/factor_trial.c +++ b/src/ulong_extras/factor_trial.c @@ -11,18 +11,18 @@ #include "ulong_extras.h" -mp_limb_t n_factor_trial(n_factor_t * factors, mp_limb_t n, ulong num_primes) +ulong n_factor_trial(n_factor_t * factors, ulong n, ulong num_primes) { return n_factor_trial_range(factors, n, UWORD(0), num_primes); } -mp_limb_t n_factor_trial_partial(n_factor_t * factors, mp_limb_t n, mp_limb_t * prod, ulong num_primes, mp_limb_t limit) +ulong n_factor_trial_partial(n_factor_t * factors, ulong n, ulong * prod, ulong num_primes, ulong limit) { unsigned int exp; - mp_limb_t p; + ulong p; double ppre; ulong i; - const mp_limb_t * primes; + const ulong * primes; const double * inverses; (*prod) = 1; @@ -46,13 +46,13 @@ mp_limb_t n_factor_trial_partial(n_factor_t * factors, mp_limb_t n, mp_limb_t * return n; } -mp_limb_t n_factor_trial_range(n_factor_t * factors, mp_limb_t n, ulong start, ulong num_primes) +ulong n_factor_trial_range(n_factor_t * factors, ulong n, ulong start, ulong num_primes) { unsigned int exp; - mp_limb_t p; + ulong p; double ppre; ulong i; - const mp_limb_t * primes; + const ulong * primes; const double * inverses; primes = n_primes_arr_readonly(num_primes); diff --git a/src/ulong_extras/factorial_fast_mod2_preinv.c b/src/ulong_extras/factorial_fast_mod2_preinv.c index 53467681a6..8c0650e59b 100644 --- a/src/ulong_extras/factorial_fast_mod2_preinv.c +++ b/src/ulong_extras/factorial_fast_mod2_preinv.c @@ -15,13 +15,13 @@ #include "nmod_vec.h" #include "nmod_poly.h" -mp_limb_t -n_factorial_fast_mod2_preinv(ulong n, mp_limb_t p, mp_limb_t pinv) +ulong +n_factorial_fast_mod2_preinv(ulong n, ulong p, ulong pinv) { slong i, m; nmod_t mod; - mp_ptr t, u, v; - mp_limb_t r, s; + nn_ptr t, u, v; + ulong r, s; if (p == UWORD(1) || n >= p) return UWORD(0); diff --git a/src/ulong_extras/factorial_mod2_preinv.c b/src/ulong_extras/factorial_mod2_preinv.c index 667c7ee059..0b8e9d3a53 100644 --- a/src/ulong_extras/factorial_mod2_preinv.c +++ b/src/ulong_extras/factorial_mod2_preinv.c @@ -13,7 +13,7 @@ #include "ulong_extras.h" -static const mp_limb_t small_factorials[] = +static const ulong small_factorials[] = { UWORD(1), UWORD(1), UWORD(2), UWORD(6), UWORD(24), UWORD(120), UWORD(720), UWORD(5040), UWORD(40320), UWORD(362880), UWORD(3628800), UWORD(39916800), UWORD(479001600), @@ -31,9 +31,9 @@ static const mp_limb_t small_factorials[] = #endif -mp_limb_t n_factorial_mod2_preinv(ulong n, mp_limb_t p, mp_limb_t pinv) +ulong n_factorial_mod2_preinv(ulong n, ulong p, ulong pinv) { - mp_limb_t prod, hi, lo; + ulong prod, hi, lo; if (n <= MAX_SMALL_FACTORIAL) return n_mod2_preinv(small_factorials[n], p, pinv); diff --git a/src/ulong_extras/flog.c b/src/ulong_extras/flog.c index d1f2fc47d6..3f9b09b6c7 100644 --- a/src/ulong_extras/flog.c +++ b/src/ulong_extras/flog.c @@ -13,9 +13,9 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_flog(mp_limb_t n, mp_limb_t b) +ulong n_flog(ulong n, ulong b) { - mp_limb_t r, p, phi; + ulong r, p, phi; r = 0; p = 1; diff --git a/src/ulong_extras/is_oddprime.c b/src/ulong_extras/is_oddprime.c index 183fb3985d..16e5244e42 100644 --- a/src/ulong_extras/is_oddprime.c +++ b/src/ulong_extras/is_oddprime.c @@ -15,7 +15,7 @@ #if FLINT64 -mp_limb_t FLINT_ODD_PRIME_LOOKUP[] = +ulong FLINT_ODD_PRIME_LOOKUP[] = { 0x816d129a64b4cb6eUL, UWORD(0x2196820d864a4c32), UWORD(0xa48961205a0434c9), UWORD(0x4a2882d129861144), UWORD(0x834992132424030), 0x148a48844225064bUL, @@ -32,7 +32,7 @@ mp_limb_t FLINT_ODD_PRIME_LOOKUP[] = #else -mp_limb_t FLINT_ODD_PRIME_LOOKUP[] = +ulong FLINT_ODD_PRIME_LOOKUP[] = { 0x64b4cb6eUL, 0x816d129aUL, UWORD(0x864a4c32), 0x2196820dUL, UWORD(0x5a0434c9), UWORD(0xa4896120), UWORD(0x29861144), UWORD(0x4a2882d1), @@ -54,17 +54,17 @@ mp_limb_t FLINT_ODD_PRIME_LOOKUP[] = #endif -int n_is_oddprime_small(mp_limb_t n) +int n_is_oddprime_small(ulong n) { - mp_limb_t q = n / 2; - mp_limb_t x = (q & (FLINT_BITS - UWORD(1))); + ulong q = n / 2; + ulong x = (q & (FLINT_BITS - UWORD(1))); return (FLINT_ODD_PRIME_LOOKUP[q / FLINT_BITS] & (UWORD(1) << x)) >> x; } -int n_is_oddprime_binary(mp_limb_t n) +int n_is_oddprime_binary(ulong n) { ulong diff, prime_lo, prime_hi; - const mp_limb_t * primes; + const ulong * primes; n_prime_pi_bounds(&prime_lo, &prime_hi, n); primes = n_primes_arr_readonly(prime_hi + 1); diff --git a/src/ulong_extras/is_perfect_power235.c b/src/ulong_extras/is_perfect_power235.c index 37e9cb87ac..2c9d4a5991 100644 --- a/src/ulong_extras/is_perfect_power235.c +++ b/src/ulong_extras/is_perfect_power235.c @@ -14,7 +14,7 @@ #include "flint.h" #include "ulong_extras.h" -int n_is_perfect_power235(mp_limb_t n) +int n_is_perfect_power235(ulong n) { static unsigned char mod63[63] = {7,7,4,0,5,4,0,5,6,5,4,4,0,4,4,0,5,4,5,4, 4,0,5,4,0,5,4,6,7,4,0,4,4,0,4,6,7,5,4,0,4,4,0,5, @@ -43,21 +43,21 @@ int n_is_perfect_power235(mp_limb_t n) if (t & 1) { double x = sqrt((double) n); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 2)) return 1; } if (t & 2) { double x = pow((double) n, 1.0 / 3.0); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 3)) return 1; } if (t & 4) { double x = pow((double) n, 1.0 / 5.0); - mp_limb_t y = (mp_limb_t) (x + 0.5); + ulong y = (ulong) (x + 0.5); if (n == n_pow(y, 5)) return 1; } diff --git a/src/ulong_extras/is_prime.c b/src/ulong_extras/is_prime.c index 1501a1466b..b38cfea209 100644 --- a/src/ulong_extras/is_prime.c +++ b/src/ulong_extras/is_prime.c @@ -15,7 +15,7 @@ #include #include "ulong_extras.h" -int n_is_prime(mp_limb_t n) +int n_is_prime(ulong n) { /* flint's "BPSW" checked against Feitsma and Galway's database [1, 2] up to 2^64 by Dana Jacobsen. @@ -43,12 +43,12 @@ int n_is_prime(mp_limb_t n) } int -n_is_prime_pocklington(mp_limb_t n, ulong iterations) +n_is_prime_pocklington(ulong n, ulong iterations) { int pass; slong i; ulong j; - mp_limb_t n1, cofactor, b, c, ninv, limit, F, Fsq, det, rootn, val, c1, c2, upper_limit; + ulong n1, cofactor, b, c, ninv, limit, F, Fsq, det, rootn, val, c1, c2, upper_limit; n_factor_t factors; c = 0; @@ -70,7 +70,7 @@ n_is_prime_pocklington(mp_limb_t n, ulong iterations) n1 = n - 1; n_factor_init(&factors); - limit = (mp_limb_t) pow((double)n1, 1.0/3); + limit = (ulong) pow((double)n1, 1.0/3); val = n_pow(limit, 3); @@ -112,7 +112,7 @@ n_is_prime_pocklington(mp_limb_t n, ulong iterations) c = 1; for (i = factors.num - 1; i >= 0; i--) { - mp_limb_t exp = n1 / factors.p[i]; + ulong exp = n1 / factors.p[i]; pass = 0; for (j = 2; j < iterations && pass == 0; j++) @@ -136,7 +136,7 @@ n_is_prime_pocklington(mp_limb_t n, ulong iterations) return (n_gcd(n, c) == UWORD(1)); } -mp_limb_t flint_pseudosquares[] = {17, 73, 241, 1009, 2641, 8089, 18001, +ulong flint_pseudosquares[] = {17, 73, 241, 1009, 2641, 8089, 18001, 53881, 87481, 117049, 515761, 1083289, 3206641, 3818929, 9257329, 22000801, 48473881, 48473881, 175244281, 427733329, 427733329, 898716289u, 2805544681u, 2805544681u, 2805544681u @@ -159,11 +159,11 @@ mp_limb_t flint_pseudosquares[] = {17, 73, 241, 1009, 2641, 8089, 18001, #define FLINT_NUM_PSEUDOSQUARES 25 #endif -int n_is_prime_pseudosquare(mp_limb_t n) +int n_is_prime_pseudosquare(ulong n) { unsigned int i, j, m1; - mp_limb_t p, B, NB, exp, mod8; - const mp_limb_t * primes; + ulong p, B, NB, exp, mod8; + const ulong * primes; const double * inverses; if (n < UWORD(2)) @@ -197,7 +197,7 @@ int n_is_prime_pseudosquare(mp_limb_t n) for (j = 0; j <= i; j++) { - mp_limb_t mod = n_powmod2(primes[j], exp, n); + ulong mod = n_powmod2(primes[j], exp, n); if ((mod != UWORD(1)) && (mod != n - 1)) return 0; else if (mod == n - 1) @@ -210,7 +210,7 @@ int n_is_prime_pseudosquare(mp_limb_t n) return 1; else if (mod8 == 5) { - mp_limb_t mod = n_powmod2(UWORD(2), exp, n); + ulong mod = n_powmod2(UWORD(2), exp, n); if (mod == n - 1) return 1; else @@ -221,7 +221,7 @@ int n_is_prime_pseudosquare(mp_limb_t n) if (m1) return 1; for (j = i + 1; j < FLINT_NUM_PSEUDOSQUARES + 1; j++) { - mp_limb_t mod = n_powmod2(primes[j], exp, n); + ulong mod = n_powmod2(primes[j], exp, n); if (mod == n - 1) return 1; else if (mod != 1) diff --git a/src/ulong_extras/is_probabprime.c b/src/ulong_extras/is_probabprime.c index c969a09aea..bd4e7f9b61 100644 --- a/src/ulong_extras/is_probabprime.c +++ b/src/ulong_extras/is_probabprime.c @@ -27,15 +27,15 @@ n_pair_t; Currently it acts as such all the way up to 2^64. */ -int n_is_probabprime(mp_limb_t n) +int n_is_probabprime(ulong n) { - mp_limb_t d; + ulong d; unsigned int norm; int isprime; #if FLINT64 double npre; #else - mp_limb_t ninv; + ulong ninv; #endif if (n <= UWORD(1)) return 0; @@ -108,7 +108,7 @@ int n_is_probabprime(mp_limb_t n) } int -n_is_probabprime_BPSW(mp_limb_t n) +n_is_probabprime_BPSW(ulong n) { if (n <= UWORD(1)) return 0; @@ -129,7 +129,7 @@ n_is_probabprime_BPSW(mp_limb_t n) } else { - mp_limb_t d; + ulong d; d = n - UWORD(1); while ((d & UWORD(1)) == UWORD(0)) @@ -143,7 +143,7 @@ n_is_probabprime_BPSW(mp_limb_t n) } else { - mp_limb_t ninv = n_preinvert_limb(n); + ulong ninv = n_preinvert_limb(n); if (n_is_strong_probabprime2_preinv(n, ninv, WORD(2), d) == 0) return 0; } @@ -153,7 +153,7 @@ n_is_probabprime_BPSW(mp_limb_t n) } int -n_is_probabprime_fermat(mp_limb_t n, mp_limb_t i) +n_is_probabprime_fermat(ulong n, ulong i) { if (FLINT_BIT_COUNT(n) <= FLINT_D_BITS) return (n_powmod(i, n - 1, n) == UWORD(1)); @@ -162,11 +162,11 @@ n_is_probabprime_fermat(mp_limb_t n, mp_limb_t i) } n_pair_t -fchain_precomp(mp_limb_t m, mp_limb_t n, double npre) +fchain_precomp(ulong m, ulong n, double npre) { n_pair_t current = {0, 0}, old; int length; - mp_limb_t power, xy; + ulong power, xy; old.x = UWORD(2); old.y = n - UWORD(3); @@ -201,11 +201,11 @@ fchain_precomp(mp_limb_t m, mp_limb_t n, double npre) } n_pair_t -fchain2_preinv(mp_limb_t m, mp_limb_t n, mp_limb_t ninv) +fchain2_preinv(ulong m, ulong n, ulong ninv) { n_pair_t current = {0, 0}, old; int length; - mp_limb_t power, xy; + ulong power, xy; old.x = UWORD(2); old.y = n - UWORD(3); @@ -240,9 +240,9 @@ fchain2_preinv(mp_limb_t m, mp_limb_t n, mp_limb_t ninv) } int -n_is_probabprime_fibonacci(mp_limb_t n) +n_is_probabprime_fibonacci(ulong n) { - mp_limb_t m; + ulong m; n_pair_t V; if ((ulong) FLINT_ABS((slong) n) <= UWORD(3)) @@ -265,7 +265,7 @@ n_is_probabprime_fibonacci(mp_limb_t n) } else { - mp_limb_t ninv = n_preinvert_limb(n); + ulong ninv = n_preinvert_limb(n); V = fchain2_preinv(m, n, ninv); return (n_mulmod2_preinv(n - UWORD(3), V.x, n, ninv) == @@ -274,11 +274,11 @@ n_is_probabprime_fibonacci(mp_limb_t n) } n_pair_t -lchain_precomp(mp_limb_t m, mp_limb_t a, mp_limb_t n, double npre) +lchain_precomp(ulong m, ulong a, ulong n, double npre) { n_pair_t current = {0, 0}, old; int length, i; - mp_limb_t power, xy, xx, yy; + ulong power, xy, xx, yy; old.x = UWORD(2); old.y = a; @@ -311,11 +311,11 @@ lchain_precomp(mp_limb_t m, mp_limb_t a, mp_limb_t n, double npre) } n_pair_t -lchain2_preinv(mp_limb_t m, mp_limb_t a, mp_limb_t n, mp_limb_t ninv) +lchain2_preinv(ulong m, ulong a, ulong n, ulong ninv) { n_pair_t current = {0, 0}, old; int length, i; - mp_limb_t power, xy, xx, yy; + ulong power, xy, xx, yy; old.x = UWORD(2); old.y = a; @@ -348,18 +348,18 @@ lchain2_preinv(mp_limb_t m, mp_limb_t a, mp_limb_t n, mp_limb_t ninv) } int -n_is_probabprime_lucas(mp_limb_t n) +n_is_probabprime_lucas(ulong n) { int i; slong D, Q; - mp_limb_t A; - mp_limb_t left, right; + ulong A; + ulong left, right; n_pair_t V; D = 0; Q = 0; - if (((n % 2) == 0) || (FLINT_ABS((mp_limb_signed_t) n) <= 2)) + if (((n % 2) == 0) || (FLINT_ABS((slong) n) <= 2)) { return (n == UWORD(2)); } @@ -419,7 +419,7 @@ n_is_probabprime_lucas(mp_limb_t n) } else { - mp_limb_t ninv = n_preinvert_limb(n); + ulong ninv = n_preinvert_limb(n); V = lchain2_preinv(n + 1, A, n, ninv); left = n_mulmod_precomp(A, V.x, n, ninv); diff --git a/src/ulong_extras/is_square.c b/src/ulong_extras/is_square.c index 3485573182..0b0444ca4b 100644 --- a/src/ulong_extras/is_square.c +++ b/src/ulong_extras/is_square.c @@ -25,15 +25,15 @@ int mod63[63] = {1,1,0,0,1,0,0,1,0,1,0,0,0,0,1,0,1,0,1,0,0, 0,1,0,0,1,0,0,1,0,0,0,0,0,0,1,1,1,0,0,0,0, 0,1,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0}; -int n_is_square(mp_limb_t x) +int n_is_square(ulong x) { - mp_limb_t sq; + ulong sq; if (!mod64[x % UWORD(64)]) return 0; if (!mod63[x % UWORD(63)]) return 0; if (!mod65[x % UWORD(65)]) return 0; - sq = (mp_limb_t) (sqrt((double) x) + 0.5); + sq = (ulong) (sqrt((double) x) + 0.5); return (x == sq*sq); } diff --git a/src/ulong_extras/is_squarefree.c b/src/ulong_extras/is_squarefree.c index 2dd9d03ddb..f4a4e0a59a 100644 --- a/src/ulong_extras/is_squarefree.c +++ b/src/ulong_extras/is_squarefree.c @@ -12,7 +12,7 @@ #include "flint.h" #include "ulong_extras.h" -int n_is_squarefree(mp_limb_t n) +int n_is_squarefree(ulong n) { return n_moebius_mu(n) != 0; } diff --git a/src/ulong_extras/is_strong_probabprime2_preinv.c b/src/ulong_extras/is_strong_probabprime2_preinv.c index d20b4854e6..611c12c41f 100644 --- a/src/ulong_extras/is_strong_probabprime2_preinv.c +++ b/src/ulong_extras/is_strong_probabprime2_preinv.c @@ -14,11 +14,11 @@ #include "ulong_extras.h" int -n_is_strong_probabprime2_preinv(mp_limb_t n, mp_limb_t ninv, mp_limb_t a, - mp_limb_t d) +n_is_strong_probabprime2_preinv(ulong n, ulong ninv, ulong a, + ulong d) { - mp_limb_t t = d; - mp_limb_t y; + ulong t = d; + ulong y; FLINT_ASSERT(a < n); diff --git a/src/ulong_extras/is_strong_probabprime_precomp.c b/src/ulong_extras/is_strong_probabprime_precomp.c index c28541368e..ea7b6001fa 100644 --- a/src/ulong_extras/is_strong_probabprime_precomp.c +++ b/src/ulong_extras/is_strong_probabprime_precomp.c @@ -15,11 +15,11 @@ #include "ulong_extras.h" int -n_is_strong_probabprime_precomp(mp_limb_t n, double npre, mp_limb_t a, - mp_limb_t d) +n_is_strong_probabprime_precomp(ulong n, double npre, ulong a, + ulong d) { - mp_limb_t t = d; - mp_limb_t y; + ulong t = d; + ulong y; /* Map large base to range 2 ... n - 1 */ if (a >= n) diff --git a/src/ulong_extras/jacobi.c b/src/ulong_extras/jacobi.c index a7f3159e1b..976c635c30 100644 --- a/src/ulong_extras/jacobi.c +++ b/src/ulong_extras/jacobi.c @@ -14,9 +14,9 @@ #include "ulong_extras.h" /* return (x|y)*(-1)^tstbit(r,1) */ -int _n_jacobi_unsigned(mp_limb_t x, mp_limb_t y, unsigned int r) +int _n_jacobi_unsigned(ulong x, ulong y, unsigned int r) { - mp_limb_t t, st; + ulong t, st; int e; FLINT_ASSERT(y & 1); @@ -43,12 +43,12 @@ int _n_jacobi_unsigned(mp_limb_t x, mp_limb_t y, unsigned int r) return (int)(r & 2) - 1; } -int n_jacobi_unsigned(mp_limb_t x, mp_limb_t y) +int n_jacobi_unsigned(ulong x, ulong y) { return _n_jacobi_unsigned(x, y, 0); } -int n_jacobi(mp_limb_signed_t x, mp_limb_t y) +int n_jacobi(slong x, ulong y) { return _n_jacobi_unsigned(FLINT_ABS(x), y, FLINT_SIGN_EXT(x) & y); } diff --git a/src/ulong_extras/mod2_precomp.c b/src/ulong_extras/mod2_precomp.c index 0544e17b34..f07dca26b4 100644 --- a/src/ulong_extras/mod2_precomp.c +++ b/src/ulong_extras/mod2_precomp.c @@ -12,15 +12,15 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t -n_mod2_precomp(mp_limb_t a, mp_limb_t n, double npre) +ulong +n_mod2_precomp(ulong a, ulong n, double npre) { - mp_limb_t quot; + ulong quot; slong rem; if (a < n) return a; - if ((mp_limb_signed_t) n < WORD(0)) + if ((slong) n < WORD(0)) return a - n; if (n == 1) @@ -29,14 +29,14 @@ n_mod2_precomp(mp_limb_t a, mp_limb_t n, double npre) rem = 0; } else { - quot = (mp_limb_t) ((double) a * npre); + quot = (ulong) ((double) a * npre); rem = a - quot * n; } - if (rem < (mp_limb_signed_t) (-n)) - quot -= (mp_limb_t) ((double) (-rem) * npre); + if (rem < (slong) (-n)) + quot -= (ulong) ((double) (-rem) * npre); else if (rem >= (slong) n) - quot += (mp_limb_t) ((double) rem * npre); + quot += (ulong) ((double) rem * npre); else if (rem < WORD(0)) return rem + n; else diff --git a/src/ulong_extras/mod_precomp.c b/src/ulong_extras/mod_precomp.c index b21cc75a40..6d80277d35 100644 --- a/src/ulong_extras/mod_precomp.c +++ b/src/ulong_extras/mod_precomp.c @@ -12,13 +12,13 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_mod_precomp(mp_limb_t a, mp_limb_t n, double npre) +ulong n_mod_precomp(ulong a, ulong n, double npre) { - mp_limb_t quot, rem; + ulong quot, rem; - quot = (mp_limb_t) ((double) a * npre); + quot = (ulong) ((double) a * npre); rem = a - quot*n; if ((slong) rem < 0) /* unlikely */ rem += n; - return rem - (n & (((mp_limb_signed_t) (n - rem - 1)) >> (FLINT_BITS-1))); + return rem - (n & (((slong) (n - rem - 1)) >> (FLINT_BITS-1))); } diff --git a/src/ulong_extras/moebius_mu.c b/src/ulong_extras/moebius_mu.c index e735cdcd24..27d706b408 100644 --- a/src/ulong_extras/moebius_mu.c +++ b/src/ulong_extras/moebius_mu.c @@ -15,7 +15,7 @@ #define FLINT_MU_LOOKUP_CUTOFF 1024 #if FLINT64 -const mp_limb_t FLINT_MOEBIUS_ODD[] = +const ulong FLINT_MOEBIUS_ODD[] = { UWORD(0x4289108a05208102), UWORD(0x19988004a8a12422), UWORD(0x1a8245028906a062), UWORD(0x229428012aa26a00), UWORD(0x8422a98980440a18), 0x224925084068929aUL, @@ -25,7 +25,7 @@ const mp_limb_t FLINT_MOEBIUS_ODD[] = UWORD(0x0108884a22186025) }; #else -const mp_limb_t FLINT_MOEBIUS_ODD[] = +const ulong FLINT_MOEBIUS_ODD[] = { UWORD(0x05208102), 0x4289108aUL, UWORD(0xa8a12422), UWORD(0x19988004), UWORD(0x8906a062), UWORD(0x1a824502), UWORD(0x2aa26a00), UWORD(0x22942801), UWORD(0x80440a18), UWORD(0x8422a989), @@ -41,8 +41,8 @@ void n_moebius_mu_vec(int * mu, ulong len) { ulong k; ulong pi; - const mp_limb_t * primes; - mp_limb_t p, q; + const ulong * primes; + ulong p, q; pi = n_prime_pi(len); primes = n_primes_arr_readonly(pi); @@ -62,7 +62,7 @@ void n_moebius_mu_vec(int * mu, ulong len) } } -int n_moebius_mu(mp_limb_t n) +int n_moebius_mu(ulong n) { int i; n_factor_t fac; @@ -76,7 +76,7 @@ int n_moebius_mu(mp_limb_t n) if (n < FLINT_MU_LOOKUP_CUTOFF) { - mp_limb_t m; + ulong m; n -= 1; m = FLINT_MOEBIUS_ODD[n / FLINT_BITS]; m &= (UWORD(3) << (n % FLINT_BITS)); diff --git a/src/ulong_extras/mulmod_precomp.c b/src/ulong_extras/mulmod_precomp.c index 79e29e94ea..bcabf5dfdc 100644 --- a/src/ulong_extras/mulmod_precomp.c +++ b/src/ulong_extras/mulmod_precomp.c @@ -11,12 +11,12 @@ #include "ulong_extras.h" -mp_limb_t n_mulmod_precomp(mp_limb_t a, mp_limb_t b, mp_limb_t n, double npre) +ulong n_mulmod_precomp(ulong a, ulong b, ulong n, double npre) { - mp_limb_t quot; + ulong quot; slong rem; - quot = (mp_limb_t) ((double) a * (double) b * npre); + quot = (ulong) ((double) a * (double) b * npre); rem = a * b - quot * n; if (rem < 0) { diff --git a/src/ulong_extras/mulmod_precomp_shoup.c b/src/ulong_extras/mulmod_precomp_shoup.c index b0d2fd6ac3..cd6d5daf95 100644 --- a/src/ulong_extras/mulmod_precomp_shoup.c +++ b/src/ulong_extras/mulmod_precomp_shoup.c @@ -13,11 +13,11 @@ #include "flint.h" #include "ulong_extras.h" -/* Computes the W' = [w * b / p] (b = mp_limb_t power) */ -mp_limb_t -n_mulmod_precomp_shoup(mp_limb_t w, mp_limb_t p) +/* Computes the W' = [w * b / p] (b = ulong power) */ +ulong +n_mulmod_precomp_shoup(ulong w, ulong p) { - mp_limb_t q, r; + ulong q, r; udiv_qrnnd(q, r, w, UWORD(0), p); return q; } diff --git a/src/ulong_extras/nextprime.c b/src/ulong_extras/nextprime.c index 8e02741ff1..89a1dad4f4 100644 --- a/src/ulong_extras/nextprime.c +++ b/src/ulong_extras/nextprime.c @@ -47,7 +47,7 @@ static const unsigned short n_modular_primes_tab[N_MOD_TAB] = { }; -static mp_limb_t bsearch_uint(mp_limb_t n, const unsigned int *t, int tlen) +static ulong bsearch_uint(ulong n, const unsigned int *t, int tlen) { int lo = 0; int hi = tlen-1; @@ -59,7 +59,7 @@ static mp_limb_t bsearch_uint(mp_limb_t n, const unsigned int *t, int tlen) return t[lo]; } -mp_limb_t n_nextprime(mp_limb_t n, int FLINT_UNUSED(proved)) +ulong n_nextprime(ulong n, int FLINT_UNUSED(proved)) { ulong i, index; diff --git a/src/ulong_extras/nth_prime.c b/src/ulong_extras/nth_prime.c index d42521f217..cb5b463793 100644 --- a/src/ulong_extras/nth_prime.c +++ b/src/ulong_extras/nth_prime.c @@ -11,7 +11,7 @@ #include "ulong_extras.h" -mp_limb_t n_nth_prime(ulong n) +ulong n_nth_prime(ulong n) { if (n == 0) { @@ -21,7 +21,7 @@ mp_limb_t n_nth_prime(ulong n) return n_primes_arr_readonly(n)[n-1]; } -void n_nth_prime_bounds(mp_limb_t *lo, mp_limb_t *hi, ulong n) +void n_nth_prime_bounds(ulong *lo, ulong *hi, ulong n) { int bits, ll; double llo, lhi; @@ -37,6 +37,6 @@ void n_nth_prime_bounds(mp_limb_t *lo, mp_limb_t *hi, ulong n) else if (n < 528491312) ll = 2; else ll = 3; - *lo = (mp_limb_t) (n * (llo + ll - 1)); - *hi = (mp_limb_t) (n * (lhi + (ll+1) - (n >= 15985 ? 0.9427 : 0.0))); + *lo = (ulong) (n * (llo + ll - 1)); + *hi = (ulong) (n * (lhi + (ll+1) - (n >= 15985 ? 0.9427 : 0.0))); } diff --git a/src/ulong_extras/powmod_precomp.c b/src/ulong_extras/powmod_precomp.c index 0d08bab3ac..09a50711a8 100644 --- a/src/ulong_extras/powmod_precomp.c +++ b/src/ulong_extras/powmod_precomp.c @@ -12,10 +12,10 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t -n_powmod_ui_precomp(mp_limb_t a, mp_limb_t exp, mp_limb_t n, double npre) +ulong +n_powmod_ui_precomp(ulong a, ulong exp, ulong n, double npre) { - mp_limb_t x, y; + ulong x, y; if (n == UWORD(1)) return WORD(0); @@ -35,8 +35,8 @@ n_powmod_ui_precomp(mp_limb_t a, mp_limb_t exp, mp_limb_t n, double npre) return x; } -mp_limb_t -n_powmod_precomp(mp_limb_t a, mp_limb_signed_t exp, mp_limb_t n, double npre) +ulong +n_powmod_precomp(ulong a, slong exp, ulong n, double npre) { if (exp < 0) { diff --git a/src/ulong_extras/preinvert_limb.c b/src/ulong_extras/preinvert_limb.c index 6d3ac50896..16ecf27c33 100644 --- a/src/ulong_extras/preinvert_limb.c +++ b/src/ulong_extras/preinvert_limb.c @@ -55,7 +55,7 @@ static const unsigned short rec_word_tab[512] = { #define _invert_limb(dinv, d) \ do { \ - mp_limb_t _v0, _v1, _v2, _d21, _e, _m0; \ + ulong _v0, _v1, _v2, _d21, _e, _m0; \ FLINT_ASSERT(((d) & (UWORD(1)<<(FLINT_BITS - 1))) != 0); \ _v0 = rec_word_tab[((d) >> 22) & 0x1FF]; \ _d21 = ((d) >> 11) + 1; \ @@ -63,12 +63,12 @@ static const unsigned short rec_word_tab[512] = { umul_ppmm(_v1, _e, _m0, _d21); \ _v1 = (_v0 << 4) - _v1 - 1; \ _e = -_v1*((d) >> 1); \ - _m0 = -((d) & (mp_limb_t) 1); \ + _m0 = -((d) & (ulong) 1); \ _e -= ((_v1 - (_v1 >> 1)) & _m0); \ umul_ppmm(_v2, _m0, _v1, _e); \ _v2 = (_v1 << 15) + (_v2 >> 1); \ umul_ppmm(_v0, _d21, _v2, (d)); \ - add_ssaaaa(_v0, _d21, _v0, _d21, (mp_limb_t) 0, (d)); \ + add_ssaaaa(_v0, _d21, _v0, _d21, (ulong) 0, (d)); \ (dinv) = _v2 - (_v0 + (d)); \ } while(0) @@ -96,20 +96,20 @@ static const unsigned short rec_word_tab[256] = { #define _invert_limb(dinv, d) \ do { \ - mp_limb_t _v0, _v2, _d40, _e, _m0; \ + ulong _v0, _v2, _d40, _e, _m0; \ FLINT_ASSERT(((d) & (UWORD(1)<<(FLINT_BITS - 1))) != 0); \ _d40 = ((d) >> 24) + 1; \ _v0 = rec_word_tab[((d) >> 55) & 0xFF]; \ _v0 = (_v0 << 11) - ((_v0*_v0*_d40) >> 40) - 1; \ - _v2 = ((_v0*((((mp_limb_t) 1) << 60) - _v0*_d40)) >> 47); \ + _v2 = ((_v0*((((ulong) 1) << 60) - _v0*_d40)) >> 47); \ _v2 += (_v0 << 13); \ _e = -_v2*((d) >> 1); \ - _m0 = -((d) & (mp_limb_t) 1); \ + _m0 = -((d) & (ulong) 1); \ _e -= ((_v2 - (_v2 >> 1)) & _m0); \ umul_ppmm(_v0, _d40, _v2, _e); \ _v2 = (_v2 << 31) + (_v0 >> 1); \ umul_ppmm(_v0, _d40, _v2, (d)); \ - add_ssaaaa(_v0, _d40, _v0, _d40, (mp_limb_t) 0, (d)); \ + add_ssaaaa(_v0, _d40, _v0, _d40, (ulong) 0, (d)); \ (dinv) = _v2 - (_v0 + (d)); \ } while (0) diff --git a/src/ulong_extras/prime_pi.c b/src/ulong_extras/prime_pi.c index f1f9a5146d..c77cbf2397 100644 --- a/src/ulong_extras/prime_pi.c +++ b/src/ulong_extras/prime_pi.c @@ -23,10 +23,10 @@ const unsigned char FLINT_PRIME_PI_ODD_LOOKUP[] = }; -ulong n_prime_pi(mp_limb_t n) +ulong n_prime_pi(ulong n) { ulong low, mid, high; - const mp_limb_t * primes; + const ulong * primes; if (n < FLINT_PRIME_PI_ODD_LOOKUP_CUTOFF) { diff --git a/src/ulong_extras/prime_pi_bounds.c b/src/ulong_extras/prime_pi_bounds.c index 212b51cf08..d9434af7c3 100644 --- a/src/ulong_extras/prime_pi_bounds.c +++ b/src/ulong_extras/prime_pi_bounds.c @@ -15,7 +15,7 @@ FLINT_DLL extern const unsigned char FLINT_PRIME_PI_ODD_LOOKUP[]; -void n_prime_pi_bounds(ulong *lo, ulong *hi, mp_limb_t n) +void n_prime_pi_bounds(ulong *lo, ulong *hi, ulong n) { if (n < FLINT_PRIME_PI_ODD_LOOKUP_CUTOFF) { diff --git a/src/ulong_extras/primes_arr_readonly.c b/src/ulong_extras/primes_arr_readonly.c index 14a6ccbb67..f54b5a55eb 100644 --- a/src/ulong_extras/primes_arr_readonly.c +++ b/src/ulong_extras/primes_arr_readonly.c @@ -11,7 +11,7 @@ #include "ulong_extras.h" -const mp_limb_t * n_primes_arr_readonly(ulong num_primes) +const ulong * n_primes_arr_readonly(ulong num_primes) { slong m; diff --git a/src/ulong_extras/primes_extend_small.c b/src/ulong_extras/primes_extend_small.c index a5a42417fb..d68706a709 100644 --- a/src/ulong_extras/primes_extend_small.c +++ b/src/ulong_extras/primes_extend_small.c @@ -13,7 +13,7 @@ #include "ulong_extras.h" void -n_primes_extend_small(n_primes_t iter, mp_limb_t bound) +n_primes_extend_small(n_primes_t iter, ulong bound) { while (iter->small_primes[iter->small_num - 2] < bound) { diff --git a/src/ulong_extras/primes_jump_after.c b/src/ulong_extras/primes_jump_after.c index dd66a636ea..8468dd4a0f 100644 --- a/src/ulong_extras/primes_jump_after.c +++ b/src/ulong_extras/primes_jump_after.c @@ -12,7 +12,7 @@ #include "ulong_extras.h" void -n_primes_jump_after(n_primes_t iter, mp_limb_t n) +n_primes_jump_after(n_primes_t iter, ulong n) { if (n < iter->small_primes[iter->small_num - 1]) { diff --git a/src/ulong_extras/primes_sieve_range.c b/src/ulong_extras/primes_sieve_range.c index 4bf43604ce..af7e7dbada 100644 --- a/src/ulong_extras/primes_sieve_range.c +++ b/src/ulong_extras/primes_sieve_range.c @@ -12,9 +12,9 @@ #include "ulong_extras.h" static void -mark(char * sieve, mp_limb_t a, ulong len, mp_limb_t p) +mark(char * sieve, ulong a, ulong len, ulong p) { - mp_limb_t t; + ulong t; t = p * p; if (t >= a) @@ -36,11 +36,11 @@ mark(char * sieve, mp_limb_t a, ulong len, mp_limb_t p) } static void -n_sieve_odd(char * sieve, ulong n, mp_limb_t a, - unsigned int * sieve_primes, mp_limb_t bound) +n_sieve_odd(char * sieve, ulong n, ulong a, + unsigned int * sieve_primes, ulong bound) { ulong i; - mp_limb_t p; + ulong p; for (i = 0; i < n / 2; i++) sieve[i] = 1; @@ -57,9 +57,9 @@ n_sieve_odd(char * sieve, ulong n, mp_limb_t a, } void -n_primes_sieve_range(n_primes_t iter, mp_limb_t a, mp_limb_t b) +n_primes_sieve_range(n_primes_t iter, ulong a, ulong b) { - mp_limb_t bound; + ulong bound; ulong len, odd_len; /* a and b must be odd */ diff --git a/src/ulong_extras/primitive_root_prime.c b/src/ulong_extras/primitive_root_prime.c index b0cae780d3..22137b20f1 100644 --- a/src/ulong_extras/primitive_root_prime.c +++ b/src/ulong_extras/primitive_root_prime.c @@ -19,7 +19,7 @@ ulong n_primitive_root_prime_prefactor(ulong p, n_factor_t * factors) return 1; // compute the divisions "(p-1) / factors" once for all - mp_limb_signed_t exps[FLINT_MAX_FACTORS_IN_LIMB]; + slong exps[FLINT_MAX_FACTORS_IN_LIMB]; for (slong i = 0; i < factors->num; i++) exps[i] = (p-1) / factors->p[i]; diff --git a/src/ulong_extras/profile/p-factor.c b/src/ulong_extras/profile/p-factor.c index eefe3a583e..f1d9df02fc 100644 --- a/src/ulong_extras/profile/p-factor.c +++ b/src/ulong_extras/profile/p-factor.c @@ -25,7 +25,7 @@ void sample(void * arg, ulong count) { fac_one_line_t * params = (fac_one_line_t *) arg; ulong i, j; - mp_limb_t n2; + ulong n2; for (i = 0; i < count; i++) { diff --git a/src/ulong_extras/profile/p-factor_pp1.c b/src/ulong_extras/profile/p-factor_pp1.c index beb8275756..20cdcdae8f 100644 --- a/src/ulong_extras/profile/p-factor_pp1.c +++ b/src/ulong_extras/profile/p-factor_pp1.c @@ -17,11 +17,11 @@ int main(int argc, char** argv) { double tbest = 1.0e300; - mp_limb_t nums[1000]; + ulong nums[1000]; slong i; slong bits, B1, count; - mp_limb_t n, cofactor; + ulong n, cofactor; n_factor_t fac; FLINT_TEST_INIT(state); diff --git a/src/ulong_extras/profile/p-is_probabprime_BPSW.c b/src/ulong_extras/profile/p-is_probabprime_BPSW.c index 8e20248fcd..1face901d0 100644 --- a/src/ulong_extras/profile/p-is_probabprime_BPSW.c +++ b/src/ulong_extras/profile/p-is_probabprime_BPSW.c @@ -23,7 +23,7 @@ void sample(void * arg, ulong count) BPSW_t * params = (BPSW_t *) arg; ulong bits = params->bits; ulong i; - mp_limb_t d; + ulong d; FLINT_TEST_INIT(state); diff --git a/src/ulong_extras/profile/p-lll_mod_preinv.c b/src/ulong_extras/profile/p-lll_mod_preinv.c index 43ec31aca0..466d5b33e5 100644 --- a/src/ulong_extras/profile/p-lll_mod_preinv.c +++ b/src/ulong_extras/profile/p-lll_mod_preinv.c @@ -21,17 +21,17 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t d, dinv, r = 0; + ulong d, dinv, r = 0; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; ulong type = info->type; ulong i; - mp_ptr arr, arr2; + nn_ptr arr, arr2; FLINT_TEST_INIT(state); - arr = (mp_ptr) flint_malloc(1024*sizeof(mp_limb_t)); - arr2 = (mp_ptr) flint_malloc(1024*sizeof(mp_limb_t)); + arr = (nn_ptr) flint_malloc(1024*sizeof(ulong)); + arr2 = (nn_ptr) flint_malloc(1024*sizeof(ulong)); for (i = 0; i < count; i++) { diff --git a/src/ulong_extras/profile/p-mod2_precomp.c b/src/ulong_extras/profile/p-mod2_precomp.c index e64a77c970..970bca6f00 100644 --- a/src/ulong_extras/profile/p-mod2_precomp.c +++ b/src/ulong_extras/profile/p-mod2_precomp.c @@ -15,10 +15,10 @@ void sample(void * arg, ulong count) { - mp_limb_t d, r = 0; + ulong d, r = 0; double dpre; ulong i; - mp_ptr array = (mp_ptr) flint_malloc(1024*sizeof(mp_limb_t)); + nn_ptr array = (nn_ptr) flint_malloc(1024*sizeof(ulong)); FLINT_TEST_INIT(state); diff --git a/src/ulong_extras/profile/p-mod2_preinv.c b/src/ulong_extras/profile/p-mod2_preinv.c index 6e3d0850f4..d6e512b88f 100644 --- a/src/ulong_extras/profile/p-mod2_preinv.c +++ b/src/ulong_extras/profile/p-mod2_preinv.c @@ -21,13 +21,13 @@ typedef struct void sample(void * arg, ulong count) { - mp_limb_t d, dinv, r = 0; + ulong d, dinv, r = 0; double dpre; info_t * info = (info_t *) arg; flint_bitcnt_t bits = info->bits; ulong type = info->type; ulong i; - mp_ptr arr = (mp_ptr) flint_malloc(1024*sizeof(mp_limb_t)); + nn_ptr arr = (nn_ptr) flint_malloc(1024*sizeof(ulong)); FLINT_TEST_INIT(state); @@ -50,7 +50,7 @@ void sample(void * arg, ulong count) /*case 1: prof_start(); - for (mp_size_t j = 0; j < UWORD(10000); j++) + for (slong j = 0; j < UWORD(10000); j++) { r += n_empty(arr[j&1023], d, dinv); } @@ -72,7 +72,7 @@ void sample(void * arg, ulong count) /*case 3: prof_start(); - for (mp_size_t j = 0; j < UWORD(10000); j++) + for (slong j = 0; j < UWORD(10000); j++) { r += n_mod3_preinv(arr[j&1023], d, dinv); } diff --git a/src/ulong_extras/profile/p-mod_precomp.c b/src/ulong_extras/profile/p-mod_precomp.c index eec615c8d1..dcde871780 100644 --- a/src/ulong_extras/profile/p-mod_precomp.c +++ b/src/ulong_extras/profile/p-mod_precomp.c @@ -15,10 +15,10 @@ void sample(void * arg, ulong count) { - mp_limb_t d, bits; + ulong d, bits; double dpre; ulong i; - mp_ptr array = (mp_ptr) flint_malloc(1000*sizeof(mp_limb_t)); + nn_ptr array = (nn_ptr) flint_malloc(1000*sizeof(ulong)); FLINT_TEST_INIT(state); diff --git a/src/ulong_extras/profile/p-mulmod2_preinv.c b/src/ulong_extras/profile/p-mulmod2_preinv.c index 1739e7fb7f..1752dd0b22 100644 --- a/src/ulong_extras/profile/p-mulmod2_preinv.c +++ b/src/ulong_extras/profile/p-mulmod2_preinv.c @@ -15,8 +15,8 @@ void sample(void * arg, ulong count) { - mp_limb_t a, d, dinv; - mp_ptr array = (mp_ptr) flint_malloc(1000*sizeof(mp_limb_t)); + ulong a, d, dinv; + nn_ptr array = (nn_ptr) flint_malloc(1000*sizeof(ulong)); ulong i; FLINT_TEST_INIT(state); @@ -24,7 +24,7 @@ void sample(void * arg, ulong count) for (i = 0; i < count; i++) { int j; - mp_limb_t bits = n_randint(state, 53) + 1; + ulong bits = n_randint(state, 53) + 1; d = n_randbits(state, bits); a = n_randint(state, d); dinv = n_preinvert_limb(d); diff --git a/src/ulong_extras/profile/p-mulmod_precomp.c b/src/ulong_extras/profile/p-mulmod_precomp.c index d6d8d28c40..9e77104685 100644 --- a/src/ulong_extras/profile/p-mulmod_precomp.c +++ b/src/ulong_extras/profile/p-mulmod_precomp.c @@ -16,16 +16,16 @@ void sample(void * arg, ulong count) { ulong i; - mp_limb_t a, d; + ulong a, d; double dpre; - mp_ptr array = (mp_ptr) flint_malloc(1000*sizeof(mp_limb_t)); + nn_ptr array = (nn_ptr) flint_malloc(1000*sizeof(ulong)); FLINT_TEST_INIT(state); for (i = 0; i < count; i++) { int j; - mp_limb_t bits = n_randint(state, 53) + 1; + ulong bits = n_randint(state, 53) + 1; d = n_randbits(state, bits); a = n_randint(state, d); dpre = n_precompute_inverse(d); diff --git a/src/ulong_extras/randomisation.c b/src/ulong_extras/randomisation.c index dbe3c223b6..f9a5bc1ea5 100644 --- a/src/ulong_extras/randomisation.c +++ b/src/ulong_extras/randomisation.c @@ -16,13 +16,13 @@ #include "ulong_extras.h" #include "fmpz.h" -mp_limb_t n_randbits(flint_rand_t state, unsigned int bits) +ulong n_randbits(flint_rand_t state, unsigned int bits) { if (bits == 0) return UWORD(0); else return (UWORD(1) << (bits - 1)) | n_randint(state, l_shift(UWORD(1), bits)); } -mp_limb_t n_urandint(flint_rand_t state, mp_limb_t limit) +ulong n_urandint(flint_rand_t state, ulong limit) { if ((limit & (limit - 1)) == 0) { @@ -30,8 +30,8 @@ mp_limb_t n_urandint(flint_rand_t state, mp_limb_t limit) } else { - const mp_limb_t rand_max = UWORD_MAX; - mp_limb_t bucket_size, num_of_buckets, rand_within_range; + const ulong rand_max = UWORD_MAX; + ulong bucket_size, num_of_buckets, rand_within_range; bucket_size = 1 + (rand_max - limit + 1)/limit; num_of_buckets = bucket_size*limit; @@ -46,7 +46,7 @@ mp_limb_t n_urandint(flint_rand_t state, mp_limb_t limit) } #if FLINT64 -mp_limb_t n_randlimb(flint_rand_t state) +ulong n_randlimb(flint_rand_t state) { state->__randval = (state->__randval*UWORD(13282407956253574709) + UWORD(286824421)); state->__randval2 = (state->__randval2*UWORD(7557322358563246341) + UWORD(286824421)); @@ -54,7 +54,7 @@ mp_limb_t n_randlimb(flint_rand_t state) return (state->__randval>>32) + ((state->__randval2>>32) << 32); } #else -mp_limb_t n_randlimb(flint_rand_t state) +ulong n_randlimb(flint_rand_t state) { state->__randval = (state->__randval*UWORD(1543932465) + UWORD(1626832771)); state->__randval2 = (state->__randval2*UWORD(2495927737) + UWORD(1626832771)); @@ -63,10 +63,10 @@ mp_limb_t n_randlimb(flint_rand_t state) } #endif -mp_limb_t n_randtest_bits(flint_rand_t state, int bits) +ulong n_randtest_bits(flint_rand_t state, int bits) { - mp_limb_t m; - mp_limb_t n; + ulong m; + ulong n; m = n_randlimb(state); @@ -107,22 +107,22 @@ mp_limb_t n_randtest_bits(flint_rand_t state, int bits) return n; } -mp_limb_t n_randtest(flint_rand_t state) +ulong n_randtest(flint_rand_t state) { return n_randtest_bits(state, n_randint(state, FLINT_BITS + 1)); } -mp_limb_t n_randtest_not_zero(flint_rand_t state) +ulong n_randtest_not_zero(flint_rand_t state) { - mp_limb_t n; + ulong n; while ((n = n_randtest(state)) == 0) ; return n; } -mp_limb_t n_randprime(flint_rand_t state, ulong bits, int proved) +ulong n_randprime(flint_rand_t state, ulong bits, int proved) { - mp_limb_t rand; + ulong rand; if (bits < 2) { @@ -153,7 +153,7 @@ mp_limb_t n_randprime(flint_rand_t state, ulong bits, int proved) return rand; } -mp_limb_t n_randtest_prime(flint_rand_t state, int proved) +ulong n_randtest_prime(flint_rand_t state, int proved) { return n_randprime(state, 2 + n_randint(state, FLINT_BITS - 1), proved); } diff --git a/src/ulong_extras/remove.c b/src/ulong_extras/remove.c index 2aac65d418..de4d863026 100644 --- a/src/ulong_extras/remove.c +++ b/src/ulong_extras/remove.c @@ -13,11 +13,11 @@ #include "ulong_extras.h" int -n_remove(mp_limb_t * n, mp_limb_t p) +n_remove(ulong * n, ulong p) { int exp, i; - mp_limb_t powp[6]; - mp_limb_t quot, rem; + ulong powp[6]; + ulong quot, rem; if (p == 2) { diff --git a/src/ulong_extras/remove2_precomp.c b/src/ulong_extras/remove2_precomp.c index 21e39beab5..20580c8603 100644 --- a/src/ulong_extras/remove2_precomp.c +++ b/src/ulong_extras/remove2_precomp.c @@ -13,10 +13,10 @@ #include "ulong_extras.h" int -n_remove2_precomp(mp_limb_t * n, mp_limb_t p, double ppre) +n_remove2_precomp(ulong * n, ulong p, double ppre) { int exp = 0; - mp_limb_t quot, rem = UWORD(0); + ulong quot, rem = UWORD(0); if (p == 2) { diff --git a/src/ulong_extras/root.c b/src/ulong_extras/root.c index 5fa5ccff2d..543de4ff70 100644 --- a/src/ulong_extras/root.c +++ b/src/ulong_extras/root.c @@ -49,7 +49,7 @@ static const double inv_table[] = { max_base[n] = UWORD_MAX^(1/n) for n in range [1, FLINT_BITS] max_base[0] is set to 0, although it will never be called */ -static const mp_limb_t max_base[] = { +static const ulong max_base[] = { #ifdef FLINT64 UWORD(0), UWORD_MAX, UWORD(4294967296), UWORD(2642245), UWORD(65536), UWORD(7131), UWORD(1625), UWORD(565), UWORD(256), UWORD(138), UWORD(84), @@ -73,10 +73,10 @@ static const mp_limb_t max_base[] = { /* this table consists of 65 values in case of FLINT64, otherwise 33 */ -mp_limb_t -n_root(mp_limb_t n, mp_limb_t root) +ulong +n_root(ulong n, ulong root) { - mp_limb_t x, currval, base, upper_limit; + ulong x, currval, base, upper_limit; double dx; if (!n || !root) diff --git a/src/ulong_extras/root_estimate.c b/src/ulong_extras/root_estimate.c index f1b5deae09..a19736234c 100644 --- a/src/ulong_extras/root_estimate.c +++ b/src/ulong_extras/root_estimate.c @@ -18,7 +18,7 @@ /* this table contains the value of UWORD_MAX / n, for n in range [1, FLINT_BITS] */ -static const mp_limb_t mul_factor[] = { +static const ulong mul_factor[] = { #ifdef FLINT64 UWORD(0), UWORD_MAX, UWORD(9223372036854775807), UWORD(6148914691236517205), UWORD(4611686018427387903), @@ -74,7 +74,7 @@ static const mp_limb_t mul_factor[] = { /* https://en.wikipedia.org/wiki/Fast_inverse_square_root */ /* Instead of the inverse square root, we calculate the nth root */ -mp_limb_t +ulong n_root_estimate(double a, int n) { typedef union { @@ -104,5 +104,5 @@ n_root_estimate(double a, int n) i = hi; i += s; alias.uword_val = i; - return (mp_limb_t)alias.double_val; + return (ulong)alias.double_val; } diff --git a/src/ulong_extras/rootrem.c b/src/ulong_extras/rootrem.c index 91aaf639fc..470c530b36 100644 --- a/src/ulong_extras/rootrem.c +++ b/src/ulong_extras/rootrem.c @@ -50,7 +50,7 @@ static const double inv_table[] = { max_base[n] = UWORD_MAX^(1/n) for n in range [1, FLINT_BITS] max_base[0] is set to 0, although it will never be called */ -static const mp_limb_t max_base[] = { +static const ulong max_base[] = { #ifdef FLINT64 UWORD(0), UWORD_MAX, UWORD(4294967296), UWORD(2642245), UWORD(65536), UWORD(7131), UWORD(1625), UWORD(565), UWORD(256), UWORD(138), UWORD(84), @@ -74,10 +74,10 @@ static const mp_limb_t max_base[] = { /* this table consists of 65 values in case of FLINT64, otherwise 33 */ -mp_limb_t -n_rootrem(mp_limb_t* remainder, mp_limb_t n, mp_limb_t root) +ulong +n_rootrem(ulong* remainder, ulong n, ulong root) { - mp_limb_t x, currval, base, upper_limit; + ulong x, currval, base, upper_limit; double dx; if (!root) diff --git a/src/ulong_extras/sizeinbase.c b/src/ulong_extras/sizeinbase.c index 42c60edd46..39a739682b 100644 --- a/src/ulong_extras/sizeinbase.c +++ b/src/ulong_extras/sizeinbase.c @@ -13,7 +13,7 @@ #include "flint.h" #include "ulong_extras.h" -int n_sizeinbase(mp_limb_t n, int base) +int n_sizeinbase(ulong n, int base) { if (n == 0) return 1; diff --git a/src/ulong_extras/sqrt.c b/src/ulong_extras/sqrt.c index 71b50538bf..3083feac37 100644 --- a/src/ulong_extras/sqrt.c +++ b/src/ulong_extras/sqrt.c @@ -13,11 +13,11 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_sqrt(mp_limb_t a) +ulong n_sqrt(ulong a) { - mp_limb_t is; + ulong is; - is = (mp_limb_t) sqrt((double) a); + is = (ulong) sqrt((double) a); is -= (is*is > a); #if FLINT64 diff --git a/src/ulong_extras/sqrtmod.c b/src/ulong_extras/sqrtmod.c index d993dcc2fe..3a61751bdd 100644 --- a/src/ulong_extras/sqrtmod.c +++ b/src/ulong_extras/sqrtmod.c @@ -13,11 +13,11 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_sqrtmod(mp_limb_t a, mp_limb_t p) +ulong n_sqrtmod(ulong a, ulong p) { slong i, r, m, iter; - mp_limb_t p1, k, b, g, bpow, gpow, res; - mp_limb_t pinv; + ulong p1, k, b, g, bpow, gpow, res; + ulong pinv; if (a <= 1) { @@ -27,7 +27,7 @@ mp_limb_t n_sqrtmod(mp_limb_t a, mp_limb_t p) /* just do a brute force search */ if (p < 600) { - mp_limb_t t, t2; + ulong t, t2; if (p > 50 && n_jacobi_unsigned(a, p) == -1) return 0; diff --git a/src/ulong_extras/sqrtmod_primepow.c b/src/ulong_extras/sqrtmod_primepow.c index 101c921ed6..d2470bdc85 100644 --- a/src/ulong_extras/sqrtmod_primepow.c +++ b/src/ulong_extras/sqrtmod_primepow.c @@ -11,14 +11,14 @@ #include "ulong_extras.h" -slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) +slong n_sqrtmod_2pow(ulong ** sqrt, ulong a, slong exp) { - mp_limb_t r = (a & 1); - mp_limb_t * s; + ulong r = (a & 1); + ulong * s; if (exp == 0) /* special case for sqrt of 0 mod 1 */ { - *sqrt = flint_malloc(sizeof(mp_limb_t)); + *sqrt = flint_malloc(sizeof(ulong)); (*sqrt)[0] = 0; return 1; @@ -26,7 +26,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) if (exp == 1) /* special case mod 2 */ { - *sqrt = flint_malloc(sizeof(mp_limb_t)); + *sqrt = flint_malloc(sizeof(ulong)); if (r) (*sqrt)[0] = 1; else (*sqrt)[0] = 0; @@ -40,7 +40,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) if (r < 2) /* 0, 1 mod 4 */ { - *sqrt = flint_malloc(sizeof(mp_limb_t)*2); + *sqrt = flint_malloc(sizeof(ulong)*2); (*sqrt)[0] = r; (*sqrt)[1] = r + 2; @@ -55,7 +55,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) if (r) /* a is odd */ { - mp_limb_t roots[2]; + ulong roots[2]; slong i, ex, pow; if ((a & 7) != 1) /* check square root exists */ @@ -94,7 +94,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) roots[i] = r; } - *sqrt = flint_malloc(sizeof(mp_limb_t)*4); + *sqrt = flint_malloc(sizeof(ulong)*4); (*sqrt)[0] = roots[0]; /* write out both pairs of roots */ (*sqrt)[1] = pow - roots[0]; @@ -117,7 +117,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) { a = (UWORD(1)<<(exp - k/2)); num = (UWORD(1)<<(k/2)); - s = flint_malloc(num*sizeof(mp_limb_t)); + s = flint_malloc(num*sizeof(ulong)); for (i = 0; i < num; i++) s[i] = i*a; @@ -152,14 +152,14 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) if (num == 1) /* one root */ { - s = flint_realloc(s, a*sizeof(mp_limb_t)); + s = flint_realloc(s, a*sizeof(ulong)); for (i = 1; (ulong) i < a; i++) s[i] = s[i - 1] + r; } else if (num == 2) /* two roots */ { - s = flint_realloc(s, 2*a*sizeof(mp_limb_t)); + s = flint_realloc(s, 2*a*sizeof(ulong)); for (i = 1; (ulong) i < a; i++) { @@ -168,7 +168,7 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) } } else /* num == 4, i.e. four roots */ { - s = flint_realloc(s, 4*a*sizeof(mp_limb_t)); + s = flint_realloc(s, 4*a*sizeof(ulong)); for (i = 1; (ulong) i < a; i++) { @@ -185,10 +185,10 @@ slong n_sqrtmod_2pow(mp_limb_t ** sqrt, mp_limb_t a, slong exp) } } -slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) +slong n_sqrtmod_primepow(ulong ** sqrt, ulong a, ulong p, slong exp) { - mp_limb_t r, pow, a1, pinv, powinv; - mp_limb_t * s; + ulong r, pow, a1, pinv, powinv; + ulong * s; slong i, ex, k, num; if (exp < 0) @@ -198,7 +198,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) if (exp == 0) /* special case, sqrt of 0 mod 1 */ { - *sqrt = flint_malloc(sizeof(mp_limb_t)); + *sqrt = flint_malloc(sizeof(ulong)); (*sqrt)[0] = 0; return 1; @@ -217,7 +217,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) return 0; } - *sqrt = flint_malloc(sizeof(mp_limb_t)*(1 + (r != 0))); + *sqrt = flint_malloc(sizeof(ulong)*(1 + (r != 0))); (*sqrt)[0] = r; if (r) (*sqrt)[1] = p - r; @@ -255,7 +255,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) r += k*pow; } - *sqrt = flint_malloc(sizeof(mp_limb_t)*2); + *sqrt = flint_malloc(sizeof(ulong)*2); (*sqrt)[0] = r; (*sqrt)[1] = pow - r; @@ -264,7 +264,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) { for (k = 1, pow = p; k < exp; k++) /* find highest power of p dividing a */ { - mp_limb_t pow2 = pow * p; + ulong pow2 = pow * p; if (a % pow2 != 0) break; @@ -276,7 +276,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) { a = n_pow(p, exp - k/2); num = n_pow(p, k/2); - s = flint_malloc(num*sizeof(mp_limb_t)); + s = flint_malloc(num*sizeof(ulong)); for (i = 0; i < num; i++) s[i] = i*a; @@ -307,7 +307,7 @@ slong n_sqrtmod_primepow(mp_limb_t ** sqrt, mp_limb_t a, mp_limb_t p, slong exp) s[0] *= a; /* multiply roots by p^(k/2) */ s[1] *= a; - s = flint_realloc(s, 2*a*sizeof(mp_limb_t)); + s = flint_realloc(s, 2*a*sizeof(ulong)); for (i = 1; (ulong) i < a; i++) { diff --git a/src/ulong_extras/sqrtmodn.c b/src/ulong_extras/sqrtmodn.c index 234af6e548..73cc2d1c73 100644 --- a/src/ulong_extras/sqrtmodn.c +++ b/src/ulong_extras/sqrtmodn.c @@ -12,24 +12,24 @@ #include "ulong_extras.h" /* compute square roots of a modulo m given factorisation of m */ -slong n_sqrtmodn(mp_limb_t ** sqrt, mp_limb_t a, n_factor_t * fac) +slong n_sqrtmodn(ulong ** sqrt, ulong a, n_factor_t * fac) { - mp_limb_t m = 1, minv = 1; + ulong m = 1, minv = 1; slong i, j, num; - mp_limb_t * x, * sn, * ind, ** s; + ulong * x, * sn, * ind, ** s; /* Check if modulus is one, that is, it has a trivial representation */ if (fac->num == 0) { - *sqrt = flint_malloc(sizeof(mp_limb_t)); + *sqrt = flint_malloc(sizeof(ulong)); (*sqrt)[0] = 0; return 1; } - x = flint_malloc(sizeof(mp_limb_t)*fac->num); - sn = flint_malloc(sizeof(mp_limb_t)*fac->num); - ind = flint_malloc(sizeof(mp_limb_t)*fac->num); - s = flint_malloc(sizeof(mp_limb_t *)*fac->num); + x = flint_malloc(sizeof(ulong)*fac->num); + sn = flint_malloc(sizeof(ulong)*fac->num); + ind = flint_malloc(sizeof(ulong)*fac->num); + s = flint_malloc(sizeof(ulong *)*fac->num); /* compute prime powers and square roots of a mod x_i = p_i^r_i*/ num = 1; @@ -53,7 +53,7 @@ slong n_sqrtmodn(mp_limb_t ** sqrt, mp_limb_t a, n_factor_t * fac) } } - *sqrt = flint_malloc(num*sizeof(mp_limb_t)); + *sqrt = flint_malloc(num*sizeof(ulong)); /* compute values s_i = 1 mod x_i and s_i = 0 mod x_j for j != i @@ -61,7 +61,7 @@ slong n_sqrtmodn(mp_limb_t ** sqrt, mp_limb_t a, n_factor_t * fac) */ for (i = 0; i < fac->num; i++) { - mp_limb_t xp = 1, si; + ulong xp = 1, si; /* compute product of x_j for j != i */ for (j = 0; j < i; j++) diff --git a/src/ulong_extras/sqrtrem.c b/src/ulong_extras/sqrtrem.c index 1326a3dbc9..854f5f37c9 100644 --- a/src/ulong_extras/sqrtrem.c +++ b/src/ulong_extras/sqrtrem.c @@ -13,11 +13,11 @@ #include "flint.h" #include "ulong_extras.h" -mp_limb_t n_sqrtrem(mp_limb_t * r, mp_limb_t a) +ulong n_sqrtrem(ulong * r, ulong a) { - mp_limb_t is; + ulong is; - is = (mp_limb_t) sqrt((double) a); + is = (ulong) sqrt((double) a); is -= (is*is > a); #if FLINT64 diff --git a/src/ulong_extras/test/main.c b/src/ulong_extras/test/main.c index fbc2ce09cd..0fd9ca4180 100644 --- a/src/ulong_extras/test/main.c +++ b/src/ulong_extras/test/main.c @@ -9,9 +9,6 @@ (at your option) any later version. See . */ -#include -#include - /* Include functions *********************************************************/ #include "t-addmod.c" diff --git a/src/ulong_extras/test/t-cbrt.c b/src/ulong_extras/test/t-cbrt.c index 025e19cec9..1c1057786a 100644 --- a/src/ulong_extras/test/t-cbrt.c +++ b/src/ulong_extras/test/t-cbrt.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(n_cbrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans; + ulong n, val, ans; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -47,7 +47,7 @@ TEST_FUNCTION_START(n_cbrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -75,7 +75,7 @@ TEST_FUNCTION_START(n_cbrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -103,7 +103,7 @@ TEST_FUNCTION_START(n_cbrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); diff --git a/src/ulong_extras/test/t-cbrt_binary_search.c b/src/ulong_extras/test/t-cbrt_binary_search.c index 232bb9858d..70f89950b6 100644 --- a/src/ulong_extras/test/t-cbrt_binary_search.c +++ b/src/ulong_extras/test/t-cbrt_binary_search.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(n_cbrt_binary_search, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans; + ulong n, val, ans; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -47,7 +47,7 @@ TEST_FUNCTION_START(n_cbrt_binary_search, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -75,7 +75,7 @@ TEST_FUNCTION_START(n_cbrt_binary_search, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -103,7 +103,7 @@ TEST_FUNCTION_START(n_cbrt_binary_search, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); diff --git a/src/ulong_extras/test/t-cbrt_chebyshev_approx.c b/src/ulong_extras/test/t-cbrt_chebyshev_approx.c index b3b5ab38d9..246c0625d6 100644 --- a/src/ulong_extras/test/t-cbrt_chebyshev_approx.c +++ b/src/ulong_extras/test/t-cbrt_chebyshev_approx.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(n_cbrt_chebyshev_approx, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans; + ulong n, val, ans; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -46,7 +46,7 @@ TEST_FUNCTION_START(n_cbrt_chebyshev_approx, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -74,7 +74,7 @@ TEST_FUNCTION_START(n_cbrt_chebyshev_approx, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); @@ -102,7 +102,7 @@ TEST_FUNCTION_START(n_cbrt_chebyshev_approx, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) { - mp_limb_t n, val, ans, bits; + ulong n, val, ans, bits; mpz_t mpz_n, mpz_val; mpz_init(mpz_n); diff --git a/src/ulong_extras/test/t-cbrtrem.c b/src/ulong_extras/test/t-cbrtrem.c index a821da4e7a..dd1c726acd 100644 --- a/src/ulong_extras/test/t-cbrtrem.c +++ b/src/ulong_extras/test/t-cbrtrem.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_cbrtrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, c, i, j; + ulong a, b, c, i, j; mpz_t e, f, g; mpz_init(e); diff --git a/src/ulong_extras/test/t-clog.c b/src/ulong_extras/test/t-clog.c index b3139f1b3e..0546015b90 100644 --- a/src/ulong_extras/test/t-clog.c +++ b/src/ulong_extras/test/t-clog.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_clog, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a = 0, b = 0, k, x; + ulong a = 0, b = 0, k, x; while (a < 1) a = n_randtest(state); diff --git a/src/ulong_extras/test/t-compute_primes.c b/src/ulong_extras/test/t-compute_primes.c index 782d3e606f..9449dcc463 100644 --- a/src/ulong_extras/test/t-compute_primes.c +++ b/src/ulong_extras/test/t-compute_primes.c @@ -17,10 +17,10 @@ TEST_FUNCTION_START(compute_primes, state) { slong i, lim = 1000000; n_primes_t pg; - mp_limb_t * ref_primes; + ulong * ref_primes; double * ref_inverses; - ref_primes = flint_malloc(sizeof(mp_limb_t) * lim); + ref_primes = flint_malloc(sizeof(ulong) * lim); ref_inverses = flint_malloc(sizeof(double) * lim); n_primes_init(pg); @@ -34,7 +34,7 @@ TEST_FUNCTION_START(compute_primes, state) for (i = 0; i < 100 * flint_test_multiplier(); i++) { slong n; - const mp_limb_t * primes; + const ulong * primes; const double * inverses; n = n_randint(state, lim); diff --git a/src/ulong_extras/test/t-divides.c b/src/ulong_extras/test/t-divides.c index 4ce146e5ae..688b54336d 100644 --- a/src/ulong_extras/test/t-divides.c +++ b/src/ulong_extras/test/t-divides.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_divides, state) for (i = 0; i < 2000 * flint_test_multiplier(); i++) { - mp_limb_t n, p, q; + ulong n, p, q; int nbits, pbits; int flag, type; diff --git a/src/ulong_extras/test/t-divrem2_precomp.c b/src/ulong_extras/test/t-divrem2_precomp.c index e06b987d5b..352c9ffaaa 100644 --- a/src/ulong_extras/test/t-divrem2_precomp.c +++ b/src/ulong_extras/test/t-divrem2_precomp.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_divrem2_precomp, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t d, n, r1, r2, q1, q2; + ulong d, n, r1, r2, q1, q2; double dpre; d = n_randtest_not_zero(state); diff --git a/src/ulong_extras/test/t-factor.c b/src/ulong_extras/test/t-factor.c index bd4092ee18..64a61fb575 100644 --- a/src/ulong_extras/test/t-factor.c +++ b/src/ulong_extras/test/t-factor.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_factor, state) for (ix = 0; ix < 1000 * flint_test_multiplier(); ix++) { - mp_limb_t n1, n2; + ulong n1, n2; n_factor_t factors; int type; diff --git a/src/ulong_extras/test/t-factor_SQUFOF.c b/src/ulong_extras/test/t-factor_SQUFOF.c index f1f90dced0..e14ee6aa12 100644 --- a/src/ulong_extras/test/t-factor_SQUFOF.c +++ b/src/ulong_extras/test/t-factor_SQUFOF.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_factor_SQUFOF, state) for (i = 0; i < 300 * flint_test_multiplier(); i++) /* Test random numbers */ { - mp_limb_t n1, n2; + ulong n1, n2; do { diff --git a/src/ulong_extras/test/t-factor_ecm.c b/src/ulong_extras/test/t-factor_ecm.c index 68b0a943e5..13b13f3e54 100644 --- a/src/ulong_extras/test/t-factor_ecm.c +++ b/src/ulong_extras/test/t-factor_ecm.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_factor_ecm, state) { int i, j, k, result, fails; - mp_limb_t prime1, prime2, prod, f, mod; + ulong prime1, prime2, prod, f, mod; fails = 0; diff --git a/src/ulong_extras/test/t-factor_one_line.c b/src/ulong_extras/test/t-factor_one_line.c index 26f967d772..3fbadabbff 100644 --- a/src/ulong_extras/test/t-factor_one_line.c +++ b/src/ulong_extras/test/t-factor_one_line.c @@ -22,7 +22,7 @@ TEST_FUNCTION_START(n_factor_one_line, state) for (i = 0; i < num_iter; i++) /* Test random numbers */ { - mp_limb_t n1, n2, bits; + ulong n1, n2, bits; do { diff --git a/src/ulong_extras/test/t-factor_partial.c b/src/ulong_extras/test/t-factor_partial.c index 8d88366522..b0c3ee555f 100644 --- a/src/ulong_extras/test/t-factor_partial.c +++ b/src/ulong_extras/test/t-factor_partial.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_factor_partial, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random numbers */ { - mp_limb_t n1, n2, prod, limit; + ulong n1, n2, prod, limit; n_factor_t factors; n_factor_init(&factors); diff --git a/src/ulong_extras/test/t-factor_trial_partial.c b/src/ulong_extras/test/t-factor_trial_partial.c index fb0aa4f999..87332f5e1a 100644 --- a/src/ulong_extras/test/t-factor_trial_partial.c +++ b/src/ulong_extras/test/t-factor_trial_partial.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_factor_trial_partial, state) for (ix = 0; ix < 1000 * flint_test_multiplier(); ix++) /* Test random numbers */ { - mp_limb_t n1, n2, prod, limit; + ulong n1, n2, prod, limit; n_factor_t factors; n_factor_init(&factors); diff --git a/src/ulong_extras/test/t-factor_trial_range.c b/src/ulong_extras/test/t-factor_trial_range.c index 5c10309844..c376690625 100644 --- a/src/ulong_extras/test/t-factor_trial_range.c +++ b/src/ulong_extras/test/t-factor_trial_range.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_factor_trial_range, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random numbers */ { - mp_limb_t n1, n2; + ulong n1, n2; n_factor_t factors; n_factor_init(&factors); diff --git a/src/ulong_extras/test/t-factorial_fast_mod2_preinv.c b/src/ulong_extras/test/t-factorial_fast_mod2_preinv.c index 63a18ad906..3ca2de02d2 100644 --- a/src/ulong_extras/test/t-factorial_fast_mod2_preinv.c +++ b/src/ulong_extras/test/t-factorial_fast_mod2_preinv.c @@ -14,10 +14,10 @@ #ifndef n_factorial_mod2_foolproof #define n_factorial_mod2_foolproof n_factorial_mod2_foolproof -static mp_limb_t -n_factorial_mod2_foolproof(ulong n, mp_limb_t p, mp_limb_t pinv) +static ulong +n_factorial_mod2_foolproof(ulong n, ulong p, ulong pinv) { - mp_limb_t prod = UWORD(1) % p; + ulong prod = UWORD(1) % p; while (n) { diff --git a/src/ulong_extras/test/t-is_perfect_power.c b/src/ulong_extras/test/t-is_perfect_power.c index 6c395f3c42..c71140d905 100644 --- a/src/ulong_extras/test/t-is_perfect_power.c +++ b/src/ulong_extras/test/t-is_perfect_power.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_perfect_power, state) { int i, result; ulong bits, root, hi, n; - mp_limb_t d; + ulong d; for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test that squares pass the test */ { diff --git a/src/ulong_extras/test/t-is_perfect_power235.c b/src/ulong_extras/test/t-is_perfect_power235.c index 0f902bda3d..ec23b949ba 100644 --- a/src/ulong_extras/test/t-is_perfect_power235.c +++ b/src/ulong_extras/test/t-is_perfect_power235.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_perfect_power235, state) { int i, result; ulong bits; - mp_limb_t d; + ulong d; for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test that square pass the test */ { diff --git a/src/ulong_extras/test/t-is_prime.c b/src/ulong_extras/test/t-is_prime.c index c832431ab0..d20ed25fda 100644 --- a/src/ulong_extras/test/t-is_prime.c +++ b/src/ulong_extras/test/t-is_prime.c @@ -15,7 +15,7 @@ #if FLINT64 /* n < 10^16 that pass base 2, 3, 7, 61 and 24251 sprp test */ -mp_limb_t composites[] = { +ulong composites[] = { UWORD(669094855201), UWORD(1052516956501), UWORD(2007193456621), UWORD(2744715551581), UWORD(9542968210729), UWORD(17699592963781), UWORD(19671510288601), UWORD(24983920772821), UWORD(24984938689453), @@ -63,7 +63,7 @@ mp_limb_t composites[] = { TEST_FUNCTION_START(n_is_prime, state) { int i, result; - mp_limb_t d; + ulong d; mpz_t d_m; slong pow; ulong bits; @@ -123,7 +123,7 @@ TEST_FUNCTION_START(n_is_prime, state) } #if FLINT64 - for (i = 0; i < sizeof(composites) / sizeof(mp_limb_t); i++) + for (i = 0; i < sizeof(composites) / sizeof(ulong); i++) { d = composites[i]; diff --git a/src/ulong_extras/test/t-is_prime_pocklington.c b/src/ulong_extras/test/t-is_prime_pocklington.c index e8f9af30be..987fa15426 100644 --- a/src/ulong_extras/test/t-is_prime_pocklington.c +++ b/src/ulong_extras/test/t-is_prime_pocklington.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_prime_pocklington, state) { int i, result; ulong count = 0; - mp_limb_t d; + ulong d; mpz_t d_m; for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test that primes pass the test */ diff --git a/src/ulong_extras/test/t-is_prime_pseudosquare.c b/src/ulong_extras/test/t-is_prime_pseudosquare.c index baeaacdb91..da53c37654 100644 --- a/src/ulong_extras/test/t-is_prime_pseudosquare.c +++ b/src/ulong_extras/test/t-is_prime_pseudosquare.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_is_prime_pseudosquare, state) { int i, result; - mp_limb_t d; + ulong d; mpz_t d_m; for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test that primes pass the test */ diff --git a/src/ulong_extras/test/t-is_probabprime.c b/src/ulong_extras/test/t-is_probabprime.c index ddab8ce7d9..e1a720206e 100644 --- a/src/ulong_extras/test/t-is_probabprime.c +++ b/src/ulong_extras/test/t-is_probabprime.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_is_probabprime, state) { int i, result; - mp_limb_t d; + ulong d; mpz_t d_m; slong pow; ulong bits; diff --git a/src/ulong_extras/test/t-is_probabprime_BPSW.c b/src/ulong_extras/test/t-is_probabprime_BPSW.c index aacff57ac7..98396622ca 100644 --- a/src/ulong_extras/test/t-is_probabprime_BPSW.c +++ b/src/ulong_extras/test/t-is_probabprime_BPSW.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_is_probabprime_BPSW, state) { int i, result; - mp_limb_t d; + ulong d; mpz_t d_m; for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test that primes pass the test */ diff --git a/src/ulong_extras/test/t-is_probabprime_fermat.c b/src/ulong_extras/test/t-is_probabprime_fermat.c index a975bfa53b..f4119edb1b 100644 --- a/src/ulong_extras/test/t-is_probabprime_fermat.c +++ b/src/ulong_extras/test/t-is_probabprime_fermat.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_probabprime_fermat, state) { int i, result; ulong count = UWORD(0); - mp_limb_t d, j; + ulong d, j; mpz_t d_m; for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test that primes pass the test */ diff --git a/src/ulong_extras/test/t-is_probabprime_fibonacci.c b/src/ulong_extras/test/t-is_probabprime_fibonacci.c index d5a0dd5192..737bf315b6 100644 --- a/src/ulong_extras/test/t-is_probabprime_fibonacci.c +++ b/src/ulong_extras/test/t-is_probabprime_fibonacci.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_probabprime_fibonacci, state) { int i, result; ulong count = UWORD(0); - mp_limb_t d; + ulong d; mpz_t d_m; slong test_multiplier; diff --git a/src/ulong_extras/test/t-is_probabprime_lucas.c b/src/ulong_extras/test/t-is_probabprime_lucas.c index 90f313ba52..1573e60819 100644 --- a/src/ulong_extras/test/t-is_probabprime_lucas.c +++ b/src/ulong_extras/test/t-is_probabprime_lucas.c @@ -17,7 +17,7 @@ TEST_FUNCTION_START(n_is_probabprime_lucas, state) { int i, result; ulong count = UWORD(0); - mp_limb_t d; + ulong d; mpz_t d_m; slong test_multiplier; diff --git a/src/ulong_extras/test/t-is_square.c b/src/ulong_extras/test/t-is_square.c index c16d3b3c7f..1ae40f1e39 100644 --- a/src/ulong_extras/test/t-is_square.c +++ b/src/ulong_extras/test/t-is_square.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_is_square, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test that non-squares pass */ { - mp_limb_t a, s, bits; + ulong a, s, bits; bits = n_randint(state, FLINT_BITS/2) + 1; a = n_randtest_bits(state, bits); @@ -31,7 +31,7 @@ TEST_FUNCTION_START(n_is_square, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test that squares pass */ { - mp_limb_t a, s, bits; + ulong a, s, bits; bits = n_randint(state, FLINT_BITS/2); a = n_randtest_bits(state, bits); diff --git a/src/ulong_extras/test/t-is_strong_probabprime2_preinv.c b/src/ulong_extras/test/t-is_strong_probabprime2_preinv.c index 5f6cc31eee..8ce469693d 100644 --- a/src/ulong_extras/test/t-is_strong_probabprime2_preinv.c +++ b/src/ulong_extras/test/t-is_strong_probabprime2_preinv.c @@ -23,7 +23,7 @@ TEST_FUNCTION_START(n_is_strong_probabprime2_preinv, state) for (i = 0; i < 100 * test_multiplier; i++) /* Test that primes pass the test */ { - mp_limb_t a, d, dinv, norm; + ulong a, d, dinv, norm; mpz_t d_m; ulong j; @@ -56,7 +56,7 @@ TEST_FUNCTION_START(n_is_strong_probabprime2_preinv, state) for (i = 0; i < 100 * test_multiplier; i++) /* Test that not too many composites pass */ { - mp_limb_t a, d, dinv, norm; + ulong a, d, dinv, norm; mpz_t d_m; ulong j; diff --git a/src/ulong_extras/test/t-is_strong_probabprime_precomp.c b/src/ulong_extras/test/t-is_strong_probabprime_precomp.c index 6db8775180..5fef501700 100644 --- a/src/ulong_extras/test/t-is_strong_probabprime_precomp.c +++ b/src/ulong_extras/test/t-is_strong_probabprime_precomp.c @@ -23,10 +23,10 @@ TEST_FUNCTION_START(n_is_strong_probabprime_precomp, state) for (i = 0; i < 100 * test_multiplier; i++) /* Test that primes pass the test */ { - mp_limb_t a, d, norm; + ulong a, d, norm; mpz_t d_m; double dpre; - mp_limb_t bits = n_randint(state, FLINT_D_BITS-1) + 2; + ulong bits = n_randint(state, FLINT_D_BITS-1) + 2; mpz_init(d_m); @@ -57,10 +57,10 @@ TEST_FUNCTION_START(n_is_strong_probabprime_precomp, state) for (i = 0; i < 100 * test_multiplier; i++) /* Test that not too many composites pass */ { - mp_limb_t a, d, norm; + ulong a, d, norm; mpz_t d_m; double dpre; - mp_limb_t bits = n_randint(state, FLINT_D_BITS-3) + 4; + ulong bits = n_randint(state, FLINT_D_BITS-3) + 4; mpz_init(d_m); diff --git a/src/ulong_extras/test/t-jacobi.c b/src/ulong_extras/test/t-jacobi.c index a8a890267f..6a8674980a 100644 --- a/src/ulong_extras/test/t-jacobi.c +++ b/src/ulong_extras/test/t-jacobi.c @@ -19,9 +19,9 @@ TEST_FUNCTION_START(n_jacobi, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t d; + ulong d; mpz_t a_m, d_m; - mp_limb_signed_t a; + slong a; int r1, r2; mpz_init(a_m); diff --git a/src/ulong_extras/test/t-mod2_precomp.c b/src/ulong_extras/test/t-mod2_precomp.c index b15e5b169a..4cbf87357f 100644 --- a/src/ulong_extras/test/t-mod2_precomp.c +++ b/src/ulong_extras/test/t-mod2_precomp.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_mod2_precomp, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t d, n, r1, r2; + ulong d, n, r1, r2; double dpre; d = n_randtest_not_zero(state); diff --git a/src/ulong_extras/test/t-mod_precomp.c b/src/ulong_extras/test/t-mod_precomp.c index d0a64f5280..936a2736e7 100644 --- a/src/ulong_extras/test/t-mod_precomp.c +++ b/src/ulong_extras/test/t-mod_precomp.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_mod_precomp, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t bits, d, n, r1, r2; + ulong bits, d, n, r1, r2; double dpre; bits = n_randint(state, FLINT_D_BITS) + 1; diff --git a/src/ulong_extras/test/t-mulmod_precomp.c b/src/ulong_extras/test/t-mulmod_precomp.c index 46b47121a0..2a6d995373 100644 --- a/src/ulong_extras/test/t-mulmod_precomp.c +++ b/src/ulong_extras/test/t-mulmod_precomp.c @@ -18,10 +18,10 @@ TEST_FUNCTION_START(n_mulmod_precomp, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, d, r1, r2, p1, p2, dinv; + ulong a, b, d, r1, r2, p1, p2, dinv; double dpre; - mp_limb_t bits = n_randint(state, FLINT_D_BITS) + 1; + ulong bits = n_randint(state, FLINT_D_BITS) + 1; d = n_randtest_bits(state, bits); a = n_randtest(state) % d; b = n_randtest(state) % d; diff --git a/src/ulong_extras/test/t-mulmod_shoup.c b/src/ulong_extras/test/t-mulmod_shoup.c index ec4f1b4d04..e47d4373d9 100644 --- a/src/ulong_extras/test/t-mulmod_shoup.c +++ b/src/ulong_extras/test/t-mulmod_shoup.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_mulmod_shoup, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, d, r1, r2, q, p1, p2, w_pr; + ulong a, b, d, r1, r2, q, p1, p2, w_pr; d = n_randtest_not_zero(state) / 2 + 1; a = n_randtest(state) % d; diff --git a/src/ulong_extras/test/t-nextprime.c b/src/ulong_extras/test/t-nextprime.c index 82d81c369e..aea5121691 100644 --- a/src/ulong_extras/test/t-nextprime.c +++ b/src/ulong_extras/test/t-nextprime.c @@ -17,8 +17,8 @@ TEST_FUNCTION_START(n_nextprime, state) { - mp_limb_t n; - mp_limb_t res1, res2; + ulong n; + ulong res1, res2; slong rep; mpz_t mpz_n; diff --git a/src/ulong_extras/test/t-nth_prime_bounds.c b/src/ulong_extras/test/t-nth_prime_bounds.c index aad13b89ce..9bb413be34 100644 --- a/src/ulong_extras/test/t-nth_prime_bounds.c +++ b/src/ulong_extras/test/t-nth_prime_bounds.c @@ -15,7 +15,7 @@ #define check_prime_bounds(n, ans) \ do { \ int ok, reasonable; \ - mp_limb_t lo, hi; \ + ulong lo, hi; \ n_nth_prime_bounds(&lo, &hi, n); \ \ ok = lo <= ans && ans <= hi; \ diff --git a/src/ulong_extras/test/t-pow.c b/src/ulong_extras/test/t-pow.c index d3896caa94..c97ba2f64e 100644 --- a/src/ulong_extras/test/t-pow.c +++ b/src/ulong_extras/test/t-pow.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_pow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test a^e1 * a^e2 = a^(e1 + e2) */ { - mp_limb_t exp1, exp2, n, bits, r1, r2; + ulong exp1, exp2, n, bits, r1, r2; bits = n_randint(state, 55) + 10; exp1 = n_randint(state, 5); diff --git a/src/ulong_extras/test/t-powmod.c b/src/ulong_extras/test/t-powmod.c index 5a0dfa841c..259d24d958 100644 --- a/src/ulong_extras/test/t-powmod.c +++ b/src/ulong_extras/test/t-powmod.c @@ -19,9 +19,9 @@ TEST_FUNCTION_START(n_powmod, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t bits, a, d, r1, r2; + ulong bits, a, d, r1, r2; mpz_t a_m, d_m, r2_m; - mp_limb_signed_t exp; + slong exp; mpz_init(a_m); mpz_init(d_m); @@ -62,7 +62,7 @@ TEST_FUNCTION_START(n_powmod, state) /* check 0^0 = 1 */ for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t bits, d, r; + ulong bits, d, r; bits = n_randint(state, FLINT_D_BITS) + 1; d = n_randtest_bits(state, bits); diff --git a/src/ulong_extras/test/t-powmod_precomp.c b/src/ulong_extras/test/t-powmod_precomp.c index d5b64e83c3..b7eb28594a 100644 --- a/src/ulong_extras/test/t-powmod_precomp.c +++ b/src/ulong_extras/test/t-powmod_precomp.c @@ -19,9 +19,9 @@ TEST_FUNCTION_START(n_powmod_precomp, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, d, r1, r2, bits; + ulong a, d, r1, r2, bits; mpz_t a_m, d_m, r2_m; - mp_limb_signed_t exp; + slong exp; double dpre; mpz_init(a_m); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(n_powmod_precomp, state) /* check 0^0 = 1 */ for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t bits, d, r; + ulong bits, d, r; double dpre; bits = n_randint(state, FLINT_D_BITS) + 1; diff --git a/src/ulong_extras/test/t-powmod_ui_precomp.c b/src/ulong_extras/test/t-powmod_ui_precomp.c index ce71fb3b9a..13802ac90a 100644 --- a/src/ulong_extras/test/t-powmod_ui_precomp.c +++ b/src/ulong_extras/test/t-powmod_ui_precomp.c @@ -19,9 +19,9 @@ TEST_FUNCTION_START(n_powmod_ui_precomp, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, d, r1, r2, bits; + ulong a, d, r1, r2, bits; mpz_t a_m, d_m, r2_m; - mp_limb_t exp; + ulong exp; double dpre; mpz_init(a_m); @@ -59,7 +59,7 @@ TEST_FUNCTION_START(n_powmod_ui_precomp, state) /* check 0^0 = 1 */ for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t bits, d, r; + ulong bits, d, r; double dpre; bits = n_randint(state, FLINT_D_BITS) + 1; diff --git a/src/ulong_extras/test/t-preinvert_limb_prenorm.c b/src/ulong_extras/test/t-preinvert_limb_prenorm.c index 24e4feb61e..d015499797 100644 --- a/src/ulong_extras/test/t-preinvert_limb_prenorm.c +++ b/src/ulong_extras/test/t-preinvert_limb_prenorm.c @@ -14,7 +14,7 @@ #define invert_limb_naive(ninv, n) \ do { \ - mp_limb_t dummy; \ + ulong dummy; \ udiv_qrnnd (ninv, dummy, ~(n), ~(WORD(0)), n); \ } while (0) @@ -24,7 +24,7 @@ TEST_FUNCTION_START(n_preinvert_limb_prenorm, state) for (i = 0; i < 100000 * flint_test_multiplier(); i++) { - mp_limb_t n, ninv1, ninv2; + ulong n, ninv1, ninv2; n = n_randtest(state); n |= (UWORD(1) << (FLINT_BITS - 1)); diff --git a/src/ulong_extras/test/t-primes.c b/src/ulong_extras/test/t-primes.c index 13a3d0809a..da0d34a3b2 100644 --- a/src/ulong_extras/test/t-primes.c +++ b/src/ulong_extras/test/t-primes.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_primes, state) /* compare with n_nextprime */ { n_primes_t iter; - mp_limb_t p, q; + ulong p, q; n_primes_init(iter); q = 0; diff --git a/src/ulong_extras/test/t-primes_jump_after.c b/src/ulong_extras/test/t-primes_jump_after.c index b9f66530e1..c36acff26b 100644 --- a/src/ulong_extras/test/t-primes_jump_after.c +++ b/src/ulong_extras/test/t-primes_jump_after.c @@ -24,7 +24,7 @@ TEST_FUNCTION_START(n_primes_jump_after, state) for (k = 0; k < 10 * flint_test_multiplier(); k++) { - mp_limb_t p, q; + ulong p, q; q = n_randint(state, UWORD(1000000000)); diff --git a/src/ulong_extras/test/t-remove.c b/src/ulong_extras/test/t-remove.c index 78ca4aa5d0..189244b177 100644 --- a/src/ulong_extras/test/t-remove.c +++ b/src/ulong_extras/test/t-remove.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_remove, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test random numbers */ { - mp_limb_t n1, n2, orig_n; + ulong n1, n2, orig_n; mpz_t d_n2, d_n1, d_p; int exp1, exp2; ulong j; @@ -51,7 +51,7 @@ TEST_FUNCTION_START(n_remove, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test perfect powers */ { - mp_limb_t n1, n2, orig_n, base; + ulong n1, n2, orig_n, base; mpz_t d_n2, d_n1, d_p; int exp1, exp2, exp; ulong j; diff --git a/src/ulong_extras/test/t-remove2_precomp.c b/src/ulong_extras/test/t-remove2_precomp.c index 5f9e4352b0..152868ca96 100644 --- a/src/ulong_extras/test/t-remove2_precomp.c +++ b/src/ulong_extras/test/t-remove2_precomp.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_remove2_precomp, state) { int i, result; - const mp_limb_t * primes; + const ulong * primes; const double * inverses; primes = n_primes_arr_readonly(10000); @@ -24,7 +24,7 @@ TEST_FUNCTION_START(n_remove2_precomp, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test random numbers */ { - mp_limb_t n1, n2, orig_n; + ulong n1, n2, orig_n; mpz_t d_n2, d_n1, d_p; int exp1, exp2; ulong j; @@ -56,7 +56,7 @@ TEST_FUNCTION_START(n_remove2_precomp, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) /* Test perfect powers */ { - mp_limb_t n1, n2, orig_n, base; + ulong n1, n2, orig_n, base; mpz_t d_n2, d_n1, d_p; int exp1, exp2, exp; ulong j; diff --git a/src/ulong_extras/test/t-root.c b/src/ulong_extras/test/t-root.c index 096fb3672a..98a7049b97 100644 --- a/src/ulong_extras/test/t-root.c +++ b/src/ulong_extras/test/t-root.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_root, state) { int i, result; - mp_limb_t upper_limit; + ulong upper_limit; #if FLINT64 upper_limit = 2642245; @@ -28,7 +28,7 @@ TEST_FUNCTION_START(n_root, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, c, d, val; + ulong a, c, d, val; mpz_t e, f, g; mpz_init(e); @@ -64,7 +64,7 @@ TEST_FUNCTION_START(n_root, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, c, d, max_pow, base; + ulong a, c, d, max_pow, base; base = n_randint(state, upper_limit - 2) + 2; /* base form 2 to 2642245*/ max_pow = n_flog(UWORD_MAX, base); @@ -88,7 +88,7 @@ TEST_FUNCTION_START(n_root, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, c, d, max_pow, base; + ulong a, c, d, max_pow, base; base = n_randint(state, upper_limit - 2) + 2; /* base between 2 to 2642245*/ max_pow = n_flog(UWORD_MAX, base); @@ -112,7 +112,7 @@ TEST_FUNCTION_START(n_root, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, c, d, max_pow, base, val; + ulong a, c, d, max_pow, base, val; mpz_t e, g, h; mpz_init(e); diff --git a/src/ulong_extras/test/t-rootrem.c b/src/ulong_extras/test/t-rootrem.c index 9564b2b326..b699c6a32d 100644 --- a/src/ulong_extras/test/t-rootrem.c +++ b/src/ulong_extras/test/t-rootrem.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_rootrem, state) { int i, result; - mp_limb_t upper_limit; + ulong upper_limit; #if FLINT64 upper_limit = 2642245; @@ -28,7 +28,7 @@ TEST_FUNCTION_START(n_rootrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, c, d, val, j; + ulong a, b, c, d, val, j; mpz_t e, f, g, h; mpz_init(e); @@ -68,7 +68,7 @@ TEST_FUNCTION_START(n_rootrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, c, d, max_pow, base; + ulong a, b, c, d, max_pow, base; base = n_randint(state, upper_limit - 2) + 2; /* base form 2 to 2642245*/ max_pow = n_flog(UWORD_MAX, base); @@ -92,7 +92,7 @@ TEST_FUNCTION_START(n_rootrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, c, d, max_pow, base; + ulong a, b, c, d, max_pow, base; base = n_randint(state, upper_limit - 2) + 2; /* base between 2 to 2642245*/ max_pow = n_flog(UWORD_MAX, base); @@ -116,7 +116,7 @@ TEST_FUNCTION_START(n_rootrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, b, c, d, j, val, max_pow, base; + ulong a, b, c, d, j, val, max_pow, base; mpz_t e, f, g, h; mpz_init(e); diff --git a/src/ulong_extras/test/t-sizeinbase.c b/src/ulong_extras/test/t-sizeinbase.c index ea2ae9927e..308e8623b8 100644 --- a/src/ulong_extras/test/t-sizeinbase.c +++ b/src/ulong_extras/test/t-sizeinbase.c @@ -16,7 +16,7 @@ TEST_FUNCTION_START(n_sizeinbase, state) { - mp_limb_t n; + ulong n; int base, size1, size2; slong rep; mpz_t t; diff --git a/src/ulong_extras/test/t-sqrt.c b/src/ulong_extras/test/t-sqrt.c index 73f0c8077d..3b4f5fbf9f 100644 --- a/src/ulong_extras/test/t-sqrt.c +++ b/src/ulong_extras/test/t-sqrt.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_sqrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, s1, s2; + ulong a, s1, s2; mpz_t a_m, s2_m; mpz_init(a_m); @@ -43,7 +43,7 @@ TEST_FUNCTION_START(n_sqrt, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, s1, s2, bits; + ulong a, s1, s2, bits; mpz_t a_m, s2_m; mpz_init(a_m); diff --git a/src/ulong_extras/test/t-sqrtmod.c b/src/ulong_extras/test/t-sqrtmod.c index 5fa56c70d3..bcd510f2a9 100644 --- a/src/ulong_extras/test/t-sqrtmod.c +++ b/src/ulong_extras/test/t-sqrtmod.c @@ -18,7 +18,7 @@ TEST_FUNCTION_START(n_sqrtmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random integers */ { - mp_limb_t a, b, p, pinv; + ulong a, b, p, pinv; p = n_randtest_prime(state, 0); a = n_randtest(state) % p; @@ -37,7 +37,7 @@ TEST_FUNCTION_START(n_sqrtmod, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random squares */ { - mp_limb_t a, b, p, pinv; + ulong a, b, p, pinv; p = n_randtest_prime(state, 0); diff --git a/src/ulong_extras/test/t-sqrtmod_primepow.c b/src/ulong_extras/test/t-sqrtmod_primepow.c index 73e8112eb2..5536657cc4 100644 --- a/src/ulong_extras/test/t-sqrtmod_primepow.c +++ b/src/ulong_extras/test/t-sqrtmod_primepow.c @@ -18,9 +18,9 @@ TEST_FUNCTION_START(n_sqrtmod_primepow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random squares mod a power of 2 */ { - mp_limb_t a, b, p, pow, pow2, pinv; + ulong a, b, p, pow, pow2, pinv; slong exp, num, i; - mp_limb_t * sqrt; + ulong * sqrt; int btest; p = 2; @@ -69,10 +69,10 @@ TEST_FUNCTION_START(n_sqrtmod_primepow, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random squares mod other prime powers */ { - mp_limb_t a, b, p, pow, pow2, pinv; + ulong a, b, p, pow, pow2, pinv; slong exp, maxexp, num, i; flint_bitcnt_t bits; - mp_limb_t * sqrt; + ulong * sqrt; int btest; bits = n_randint(state, 18) + 2; @@ -125,10 +125,10 @@ TEST_FUNCTION_START(n_sqrtmod_primepow, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) /* Test random nonsquares */ { - mp_limb_t a, b, p, pow, pinv; + ulong a, b, p, pow, pinv; slong exp, maxexp; flint_bitcnt_t bits; - mp_limb_t * sqrt; + ulong * sqrt; bits = n_randint(state, 18) + 2; p = n_randprime(state, bits, 0); diff --git a/src/ulong_extras/test/t-sqrtmodn.c b/src/ulong_extras/test/t-sqrtmodn.c index 094eaa0001..c79d3507d4 100644 --- a/src/ulong_extras/test/t-sqrtmodn.c +++ b/src/ulong_extras/test/t-sqrtmodn.c @@ -18,10 +18,10 @@ TEST_FUNCTION_START(n_sqrtmodn, state) for (i = 0; i < 1000 * flint_test_multiplier(); i++) /* Test random squares mod n */ { - mp_limb_t a, b, n, ninv; + ulong a, b, n, ninv; slong num, i; flint_bitcnt_t bits; - mp_limb_t * sqrt; + ulong * sqrt; int btest; n_factor_t fac; @@ -62,9 +62,9 @@ TEST_FUNCTION_START(n_sqrtmodn, state) for (i = 0; i < 500 * flint_test_multiplier(); i++) /* test random nonsquares */ { - mp_limb_t a, b, n, ninv; + ulong a, b, n, ninv; flint_bitcnt_t bits; - mp_limb_t * sqrt; + ulong * sqrt; n_factor_t fac; bits = n_randint(state, 18) + 2; diff --git a/src/ulong_extras/test/t-sqrtrem.c b/src/ulong_extras/test/t-sqrtrem.c index 59eb47968e..ca74be5ebb 100644 --- a/src/ulong_extras/test/t-sqrtrem.c +++ b/src/ulong_extras/test/t-sqrtrem.c @@ -19,7 +19,7 @@ TEST_FUNCTION_START(n_sqrtrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, r1, r2, s1, s2; + ulong a, r1, r2, s1, s2; mpz_t a_m, r2_m, s2_m; mpz_init(a_m); @@ -46,7 +46,7 @@ TEST_FUNCTION_START(n_sqrtrem, state) for (i = 0; i < 10000 * flint_test_multiplier(); i++) { - mp_limb_t a, r1, r2, s1, s2, bits; + ulong a, r1, r2, s1, s2, bits; mpz_t a_m, r2_m, s2_m; mpz_init(a_m);